metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JianmingGuo/gateway-IDS",
"score": 2
} |
#### File: siov/siov1/view.py
```python
from django.shortcuts import render
# Create your views here.
def tohome(request):
return render(request,'index.html')
def about(request):
return render(request,'about.html')
def contact(request):
return render(request,'contact.html')
def canids(request):
return render(request,'canids.html')
def index(request):
return render(request,'index.html')
def gateway(request):
return render(request,'gateway.html')
def blog(request):
return render(request,'blog.html')
```
#### File: sourcecode/ANN/ann.py
```python
import tensorflow.compat.v1 as tf
tf.disable_eager_execution() #关闭eager运算
tf.disable_v2_behavior() #禁用TensorFlow 2.x行为
import numpy as np
#训练步数
training_steps = 30000
#构造数据集
'''data=[]
label=[]
for i in range(200):
x1=np.random.uniform(-1,1)
x2=np.random.uniform(0,2)
if x1**2 +x2**2 <= 1:
data.append([np.random.normal(x1,0.1),np.random.normal(x2,0.1)])
label.append(0)
else:
data.append([np.random.normal(x1,0.1),np.random.normal(x2,0.1)])
label.append(1)
#翻转
data = np.hstack(data).reshape(-1,2)
label =np.hstack(label).reshape(-1,1)
#reader = csv.reader(open('f://dos1.csv'))'''
#读取csv文件中的内容
filename_queue1 = tf.train.string_input_producer(["f://spoofing1.csv"])
reader1 = tf.TextLineReader()
key1, value1 = reader1.read(filename_queue1)
filename_queue2 = tf.train.string_input_producer(["f://spoofing2.csv"])
reader2 = tf.TextLineReader()
key2, value2 = reader2.read(filename_queue2)
record_defaults = [[1.0],[1.0],[1.0],[1.0],[1.0],[1.0],[1.0],[1.0],[1.0],[1.0]]
col1,col2,col3,col4,col5,col6,col7,col8,col9,col10= tf.decode_csv(value1, record_defaults=record_defaults)
features = tf.concat([[col1],[col2],[col3],[col4],[col5],[col6],[col7],[col8],[col9]],0)
init_op = tf.global_variables_initializer()
local_init_op = tf.local_variables_initializer()
data=[]
label=[]
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(10000):
d,l=sess.run([features,col10])
data.append(d)
label.append(l)
coord.request_stop()
coord.join(threads)
col1,col2,col3,col4,col5,col6,col7,col8,col9,col10= tf.decode_csv(value2, record_defaults=record_defaults)
features = tf.concat([[col1],[col2],[col3],[col4],[col5],[col6],[col7],[col8],[col9]],0)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(10000):
d,l=sess.run([features,col10])
data.append(d)
label.append(l)
coord.request_stop()
coord.join(threads)
data = np.hstack(data).reshape(-1,9)
label =np.hstack(label).reshape(-1,1)
def hidden_layer(input_tensor,weight1,bias1,weight2,bias2,weight3,bias3):
layer1=tf.nn.relu(tf.matmul(input_tensor,weight1)+bias1)
layer2=tf.nn.relu(tf.matmul(layer1,weight2)+bias2)
return tf.matmul(layer2,weight3)+bias3
x = tf.placeholder(tf.float32,shape=(None,9),name="x-input")
y_= tf.placeholder(tf.float32,shape=(None,1),name="y-output")
weight1 = tf.Variable(tf.truncated_normal([9,50],stddev=0.1))
bias1 =tf.Variable(tf.constant(0.1,shape=[50]))
weight2 = tf.Variable(tf.truncated_normal([50,50],stddev=0.1))
bias2 =tf.Variable(tf.constant(0.1,shape=[50]))
weight3 = tf.Variable(tf.truncated_normal([50,1],stddev=0.1))
bias3 =tf.Variable(tf.constant(0.1,shape=[1]))
sample_size = len(data)
#输出y
y = hidden_layer(x,weight1,bias1,weight2,bias2,weight3,bias3)
#损失函数
error_loss = tf.reduce_sum(tf.pow(y_-y,2))/sample_size
tf.add_to_collection("losses",error_loss)
#加入正则化
#regularizer = tf.contrib.layers.l2_regularizer(0.01)
regularizer=tf.keras.regularizers.l2(0.001)
regularization = regularizer(weight1)+regularizer(weight2)+regularizer(weight3)
tf.add_to_collection("losses",regularization)
loss = tf.add_n(tf.get_collection("losses"))
#定义优化器
train_op = tf.train.AdamOptimizer(0.05).minimize(loss)
#train_op = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
#定义准确率
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(training_steps):
sess.run(train_op,feed_dict={x:data,y_:label})
if i%2000 ==0:
loss_value = sess.run(loss,feed_dict={x:data,y_:label})
print("After %d steps, losses:%f" %(i,loss_value))
#验证
#test_accuracy = sess.run(accuracy,feed_dict={x:data1,y_:label1})
#print(test_accuracy*100)
```
#### File: sourcecode/Entropy-HTM/idEncoder.py
```python
import os
import random
import sys
import zipfile
from operator import itemgetter
import numpy
import numpy as np
import prettytable
from prettytable import PrettyTable
from nupic.frameworks.opf.model_factory import ModelFactory
import csv
import matplotlib.pyplot as plt
PAGE_CATEGORIES = [
'04f2', '00a0', '0370', '05a2', '0690', '0110', '04b0', '02b0', '05a0', '0165', '0081', '018f', '02a0', '0220', '0080', '01f1', '0120', '0153', '0260', '0545', '04f1', '0043', '059b', '0587', '05e4', '05f0', '04b1', '0329', '0382', '043f', '0510', '02c0', '051a', '0050', '00a1', '0018', '0034', '0044', '0440', '0042', '04f0', '0517', '0164', '0350', '0316'
]
# print(PAGE_CATEGORIES)
# Configure the sensor/input region using the "SDRCategoryEncoder" to encode
# the page category into SDRs suitable for processing directly by the TM
SENSOR_PARAMS = {
"verbosity": 0,
"encoders": {
"page": {
"fieldname": "page",
"name": "page",
"type": "SDRCategoryEncoder",
# The output of this encoder will be passed directly to the TM region,
# therefore the number of bits should match TM's "inputWidth" parameter
"n": 512,
# Use ~2% sparsity
"w": 12
},
},
}
# Configure the temporal memory to learn a sequence of page SDRs and make
# predictions on the next page of the sequence.
TM_PARAMS = {
"seed": 1960,
# Use "nupic.bindings.algorithms.TemporalMemoryCPP" algorithm
"temporalImp": "tm_cpp",
# Should match the encoder output
"inputWidth": 512,
"columnCount": 2048,
# Use 1 cell per column for first order prediction.
# Use more cells per column for variable order predictions.
"cellsPerColumn": 6,
}
# Configure the output region with a classifier used to decode TM SDRs back
# into pages
CL_PARAMS = {
"implementation": "cpp",
"regionName": "SDRClassifierRegion",
# alpha parameter controls how fast the classifier learns/forgets. Higher
# values make it adapt faster and forget older patterns faster.
"alpha": 0.1,
"steps": 1,
}
# Create a simple HTM network that will receive the current page as input, pass
# the encoded page SDR to the temporal memory to learn the sequences and
# interpret the output SDRs from the temporary memory using the SDRClassifier
# whose output will be a list of predicted next pages and their probabilities.
#
# page => [encoder] => [TM] => [classifier] => prediction
#
MODEL_PARAMS = {
"version": 1,
"model": "HTMPrediction",
"modelParams": {
# 'anomalyParams': { u'anomalyCacheRecords': None,
# u'autoDetectThreshold': None,
# u'autoDetectWaitRecords': None},
"inferenceType": "TemporalAnomaly",
"sensorParams": SENSOR_PARAMS,
# The purpose of the spatial pooler is to create a stable representation of
# the input SDRs. In our case the category encoder output is already a
# stable representation of the category therefore adding the spatial pooler
# to this network would not help and could potentially slow down the
# learning process
"spEnable": False,
"spParams": {},
"tmEnable": True,
"tmParams": TM_PARAMS,
"clParams": CL_PARAMS,
},
}
def main():
# Create HTM prediction model and enable inference on the page field
model = ModelFactory.create(MODEL_PARAMS)
model.enableInference({"predictedField": "page"})
# Use the model encoder to display the encoded SDRs the model will learn
sdr_table = PrettyTable(field_names=["Page Category",
"Encoded SDR (on bit indices)"],
sortby="Page Category")
sdr_table.align = "l"
encoder = model._getEncoder()
sdrout = np.zeros(encoder.getWidth(), dtype=np.bool)
for page in PAGE_CATEGORIES:
encoder.encodeIntoArray({"page": page}, sdrout)
sdr_table.add_row([page, sdrout.nonzero()[0]])
sdrlist = encoder.getsdrs()
numpy.save("idEn.npy", sdrlist)
if __name__ == "__main__":
random.seed(1)
np.random.seed(1)
main()
```
#### File: sourcecode/Entropy-HTM/webdata.py
```python
import os
import random
import sys
import zipfile
from operator import itemgetter
import numpy as np
import prettytable
from prettytable import PrettyTable
from nupic.frameworks.opf.model_factory import ModelFactory
import csv
import matplotlib.pyplot as plt
# List of page categories used in the dataset
# PAGE_CATEGORIES = [
# "frontpage", "news", "tech", "local", "opinion", "on-air", "misc", "weather",
# "msn-news", "health", "living", "business", "msn-sports", "sports", "summary",
# "bbs", "travel"
# ]
PAGE_CATEGORIES_1 = [
'04f2', '00a0', '0370', '05a2', '0690', '0110', '04b0', '02b0', '05a0', '0165', '0081', '018f', '02a0', '0220', '0080', '01f1', '0120', '0153', '0260', '0545', '04f1', '0043', '059b', '0587', '05e4', '05f0', '04b1', '0329', '0382', '043f', '0510', '02c0', '051a', '0050', '00a1', '0018', '0034', '0044', '0440', '0042', '04f0', '0517', '0164', '0350', '0316'
]
PAGE_CATEGORIES = [
'02A0', '0130', '0131', '0690', '00A1', '00A0', '02C0', '043F', '0545', '0329', '0140', '0440', '04B1', '0260', '0002', '04F0', '05F0', '018F', '02B0', '01F1', '0350', '0430', '0370', '0153', '0316', '05A2', '05A0'
]
# PAGE_CATEGORIES_PRE = [
# '04f2', '00a0', '0370', '05a2', '0690', '0110', '04b0', '02b0', '05a0', '0165', '0081', '018f', '02a0', '0220', '0080', '01f1', '0120', '0153', '0260', '0545', '04f1', '0043', '059b', '0587', '05e4', '05f0', '04b1', '0329', '0382', '043f', '0510', '02c0', '051a', '0050', '00a1', '0018', '0034', '0044', '0440', '0042', '04f0', '0517', '0164', '0350', '0316'
# ]
# PAGE_CATEGORIES = []
# for i1 in PAGE_CATEGORIES_PRE:
# for i2 in range(3):
# i3 = i1+'_'+str(i2)
# PAGE_CATEGORIES.append(i3)
# print(PAGE_CATEGORIES)
# Configure the sensor/input region using the "SDRCategoryEncoder" to encode
# the page category into SDRs suitable for processing directly by the TM
SENSOR_PARAMS = {
"verbosity": 0,
"encoders": {
"page": {
"fieldname": "page",
"name": "page",
"type": "SDRCategoryEncoder",
# The output of this encoder will be passed directly to the TM region,
# therefore the number of bits should match TM's "inputWidth" parameter
"n": 1024,
# Use ~2% sparsity
"w": 21
},
},
}
# Configure the temporal memory to learn a sequence of page SDRs and make
# predictions on the next page of the sequence.
TM_PARAMS = {
"seed": 1960,
# Use "nupic.bindings.algorithms.TemporalMemoryCPP" algorithm
"temporalImp": "tm_cpp",
# Should match the encoder output
"inputWidth": 1024,
"columnCount": 2048,
# Use 1 cell per column for first order prediction.
# Use more cells per column for variable order predictions.
"cellsPerColumn": 8,
}
# Configure the output region with a classifier used to decode TM SDRs back
# into pages
CL_PARAMS = {
"implementation": "cpp",
"regionName": "SDRClassifierRegion",
# alpha parameter controls how fast the classifier learns/forgets. Higher
# values make it adapt faster and forget older patterns faster.
"alpha": 0.1,
"steps": 1,
}
# Create a simple HTM network that will receive the current page as input, pass
# the encoded page SDR to the temporal memory to learn the sequences and
# interpret the output SDRs from the temporary memory using the SDRClassifier
# whose output will be a list of predicted next pages and their probabilities.
#
# page => [encoder] => [TM] => [classifier] => prediction
#
MODEL_PARAMS = {
"version": 1,
"model": "HTMPrediction",
"modelParams": {
# 'anomalyParams': { u'anomalyCacheRecords': None,
# u'autoDetectThreshold': None,
# u'autoDetectWaitRecords': None},
"inferenceType": "TemporalAnomaly",
"sensorParams": SENSOR_PARAMS,
# The purpose of the spatial pooler is to create a stable representation of
# the input SDRs. In our case the category encoder output is already a
# stable representation of the category therefore adding the spatial pooler
# to this network would not help and could potentially slow down the
# learning process
"spEnable": False,
"spParams": {},
"tmEnable": True,
"tmParams": TM_PARAMS,
"clParams": CL_PARAMS,
},
}
# Learn page sequences from the first 10,000 user sessions.
# We chose 10,000 because it gives results that are good enough for this example
# Use more records for learning to improve the prediction accuracy
LEARNING_RECORDS = 11000
def calc(i,win,copy):
p = win - 1
re = 0
while (p >= 0):
re += copy[i - p]
p -= 1
return re
def computeAccuracy(model, size, top):
"""
Compute prediction accuracy by checking if the next page in the sequence is
within the top N predictions calculated by the model
Args:
model: HTM model
size: Sample size
top: top N predictions to use
Returns: Probability the next page in the sequence is within the top N
predicted pages
"""
accuracy = []
# Load MSNBC web data file
with open("raw.csv") as datafile:
# Skip header lines (first 7 lines)
next(datafile)
reader = csv.reader(datafile)
pages = [(row[1:2][0]) for row in reader][50000:60000]
for i in range(len(pages)-1):
result = model.run({"page": pages[i]})
inferences = result.inferences["multiStepPredictions"][1]
# Get top N predictions for the next page
predicted = sorted(inferences.items(), key=itemgetter(1), reverse=True)[:top]
real = pages[i + 1]
real_part = real[:4]
cp_predicted = []
for i1 in predicted:
cp_predicted.append(i1[0][:4])
# Check if the next page is within the predicted pages
if real in zip(*predicted)[0]:
accuracy.append(1)
elif real_part in cp_predicted:
accuracy.append(0.5)
else:
accuracy.append(0)
copy = []
win = 100
for i1 in accuracy:
copy.append(i1) # copy是anomaly_list的复制版
if win:
top = len(copy)
i = win
while (i < top):
accuracy[i] = calc(i, win, copy)
i = i + 1
# 下面是画图模块
X1 = [i for i in range(len(accuracy))]
plt.figure(figsize=(16, 6), dpi=80)
plt.figure(1)
ax1 = plt.subplot(211)
plt.plot(X1, accuracy)
plt.show()
return np.mean(accuracy)
def anomaly_output(model):
anomaly_list = []
win = 100
attack_point = 0
attack_end = 0
attack_dic = {}
switch = 0
attack_prob = 0
# Load MSNBC web data file
with open("raw.csv") as datafile:
# csvfile = file('anomaly.csv', 'w')
# writer = csv.writer(csvfile)
next(datafile)
reader = csv.reader(datafile)
pages = [(row[1]) for row in reader][50000:170000]
scores_list = []
for i in range(len(pages)-1):
result = model.run({"page": pages[i]})
inferences = result.inferences["multiStepPredictions"][1]
# predicted = sorted(inferences.items(), key=itemgetter(1), reverse=True)[:3]
# real = pages[i+1]
anomalyScore = result.inferences["anomalyScore"]
anomaly_list.append(anomalyScore)
# writer.writerow([pages[i], anomalyScore])
l1 = len(anomaly_list)
scores = 0
if l1 > win:
for j in range(win):
scores += anomaly_list[l1-j-1]
scores_list.append(scores)
if len(scores_list) > 1500:
if not switch:
history = np.mean(scores_list[-1500:-500])
if scores_list[-1] > 1.5 * history:
if attack_point == 0:
attack_point = len(scores_list)
attack_prob = 0
switch = 1
else:
attack_prob += 1
else:
attack_point = 0
switch = 1
attack_end = 0
attack_prob = 0
if attack_prob >= 1000:
attack_end = len(scores_list)
if attack_end:
attack_dic[attack_point] = attack_end
sys.stdout.write("\rInferenced {} Sessions".format(i + 1))
sys.stdout.flush()
# 下面是画图模块
X1 = [i for i in range(len(scores_list))]
plt.figure(figsize=(16, 6), dpi=80)
plt.figure(1)
ax1 = plt.subplot(211)
plt.plot(X1, scores_list)
plt.show()
print
print(np.mean(scores_list))
print(np.var(scores_list))
print(np.std(scores_list))
maxindex = scores_list.index(max(scores_list))
print ("Intrude point: ",maxindex)
print ("Anomaly scores: ",max(scores_list))
print(attack_dic)
def readUserSession(datafile):
"""
Reads the user session record from the file's cursor position
Args:
datafile: Data file whose cursor points at the beginning of the record
Returns:
list of pages in the order clicked by the user
"""
for line in datafile:
pages = line.split()
total = len(pages)
# Select user sessions with 2 or more pages
if total < 2:
continue
# Exclude outliers by removing extreme long sessions
if total > 500:
continue
return [PAGE_CATEGORIES[int(i) - 1] for i in pages]
return []
def main():
# Create HTM prediction model and enable inference on the page field
model = ModelFactory.create(MODEL_PARAMS)
model.enableInference({"predictedField": "page"})
# Use the model encoder to display the encoded SDRs the model will learn
sdr_table = PrettyTable(field_names=["Page Category",
"Encoded SDR (on bit indices)"],
sortby="Page Category")
sdr_table.align = "l"
with open("raw.csv") as datafile:
# Skip header lines (first 7 lines)
next(datafile)
reader = csv.reader(datafile)
pages = []
for row in reader:
try:
pages.append(row[1])
except:
pass
pages_set = set(pages)
print pages_set
print
print "Start learning page sequences using the first {} user " \
"sessions".format(LEARNING_RECORDS)
model.enableLearning()
for count in range(LEARNING_RECORDS):
model.run({"page": pages[count]})
# Simple progress status
sys.stdout.write("\rLearned {} Sessions".format(count + 1))
sys.stdout.flush()
print "\nFinished learning"
model.disableLearning()
# accuracy = computeAccuracy(model, 100, 3)
anomaly_output(model)
# Use the new HTM model to predict next user session
# The test data starts right after the learning data
# print
# print "Start Inference using a new user session from the dataset"
# prediction_table = PrettyTable(field_names=["Page", "Prediction"],
# hrules=prettytable.ALL)
# prediction_table.align["Prediction"] = "l"
#
# # Infer one page of the sequence at the time
# model.resetSequenceStates()
# # session = readUserSession(datafile)
# for page in pages[LEARNING_RECORDS+1:]:
# result = model.run({"page": page})
# inferences = result.inferences["multiStepPredictions"][1]
#
# # Print predictions ordered by probabilities
# predicted = sorted(inferences.items(),
# key=itemgetter(1),
# reverse=True)
# prediction_table.add_row([page, zip(*predicted)[0]])
# predicted = sorted(inferences.items(),
# key=itemgetter(1),
# reverse=True)
# prediction_table.add_row([page, zip(*predicted)[0]])
# print "User Session to Predict: ", session
# print prediction_table
# print
# print "Compute prediction accuracy by checking if the next page in the " \
# "sequence is within the predicted pages calculated by the model:"
# accuracy = computeAccuracy(model, 100, 1)
# print " - Prediction Accuracy:", accuracy
# print accuracy
# print " - Accuracy Predicting Top 3 Pages:", accuracy
if __name__ == "__main__":
random.seed(1)
np.random.seed(1)
main()
``` |
{
"source": "JianmingGuo/SJTUsousou",
"score": 2
} |
#### File: SJTUsoso/blog/blog_gen2.py
```python
import csv
import codecs
from datetime import datetime, timedelta
import random
import json
import sys
import os
import django
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'SJTUsoso.settings'
django.setup()
from blog.models import *
with open ('./data/prose.json','r',encoding='utf-8') as fp:
json_data = json.load(fp)
test = json_data[0]['content']
def context(test):
test1 = test.replace('\n','</p><p>')
test2 = test1[4:]+'</p>'
return test2
author = ['MekAkUActOR','<PASSWORD>','superuser','10','11','1','6']
for i in range(50):
title = json_data[i]['title']
text = json_data[i]['content']
content = context(text)
au = random.choice(author)
obj = Blog(author=au, title=title, blog_type='散文', content=content, created_time='2020-06-19 04:34:06.183597', last_updated_time='2020-06-19 04:34:06.183597', create_month='June', like_num=0)
obj.save()
```
#### File: SJTUsoso/blog/genBlog.py
```python
import random
import json
from generate_class import genModel
class myGenModel(genModel):
def post_process(self, para_word):
last_end = -1
for i, item in enumerate(para_word):
# Find where to end
if item in ['[SEP]', '[CLS]', '。', ',', ';', '.']:
last_end = i
# Replace words
if item == '[MASK]' or item == '[UNK]':
para_word[i] = ''
elif item == '[CLS]' or item == '[SEP]':
para_word[i] = '\n'
# End paragraph at last_end
if para_word[last_end-1] is not '。':
para_word[last_end] = '。'
para_text = ''.join(para_word[:last_end+1]).strip()
return para_text
def gen_word(lst, empty=0.):
if random.random() < empty:
return ''
return random.choice(lst)
def gen_title():
lst_ver = ['论', '论', '论', '关于', '关于', '关于', '谈谈', '谈', '谈一谈',
'说说', '说一说', '聊聊', '聊一聊', '讲讲', '讲一讲',
'记', '记', '记', '记下', '写写', '写一写', ]
lst_pre = ['我', '我', '我那', '这', '那', '那', '一些', ]
lst_adj = ['美丽的', '可爱的', '蓬勃的', '感人的', '动人的', '璀璨的',
'轻快的', '晶莹的', '淡淡的', '匆匆的', '碌碌的', '茫茫的',
'逝去的', '失去的', '怅然的', '迷惘的', '昔日的', '冷冽的',
'经历的', '忘却的', '听闻的', '所知道的', '所了解的', '难以置信的',
'想象中的', '希冀中的', '希望里的', '盼望着的', '热望着的', ]
lst_nou = ['时光', '流年', '记忆', '年华', '青春', '往年', '岁月',
'春光', '秋日', '盛夏', '寒冬', '春草', '冬雪', '春夏秋冬',
'思考', '感想', '想法', '感触', '体悟', '滋味', '随想',
'母亲', '友人', '爱情', '亲情', '友谊', '倩影', '印象',
'天空', '大地', '生命', '万物', '永恒', '人间', '人和事',
'城市', '小城', '乡野', '老屋', '空气', '景色', '景物', ]
title = gen_word(lst_ver, 0.35) + gen_word(lst_pre, 0.55) + gen_word(lst_adj, 0.35) + gen_word(lst_nou)
return title
def gen_content(model, title):
n_ph = random.randint(1, 4)
temp = random.uniform(0.5, 1.5)
for i in range(n_ph):
length = random.randint(80, 350)
if i == 0:
intitle = title + '。'
cont = model.gen_ph(intitle, length=length, temperature=temp-0.3)
cont = cont[len(intitle):]
else:
cont.join(model.gen_ph(length=length, temperature=temp))
model.clear()
return cont
def gen_dict(model):
title = gen_title()
content = gen_content(model, title)
return {'title': title, 'content': content}
def file_w(lst, fpath):
jsn = json.dumps(lst, ensure_ascii=False)
with open(fpath, 'w', encoding='utf-8') as tf:
tf.write(jsn)
def file_r(fpath):
with open(fpath, 'r', encoding='utf-8') as tf:
jsn = tf.read()
lst = json.loads(jsn)
return lst
if __name__ == '__main__':
# import sys
# import os
# from tqdm import tqdm
# import django
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path.append(BASE_DIR)
#
# os.environ['DJANGO_SETTINGS_MODULE'] = 'SJTUsoso.settings'
# django.setup()
#
# from blog.models import MessageBoard
model = myGenModel(model_path='model/prose_pretrain',
tokenizer_path='model/prose_pretrain/vocab.txt', verbose=1)
lst = []
for i in range(3):
dct = gen_dict(model)
print(dct['title'])
print(dct['content'])
lst.append(dct)
file_w(lst, 'generated/prose.json')
# nlst = file_r('generated/prose.json')
# print(nlst)
```
#### File: SJTUsoso/blog/icf.py
```python
import math
import pymysql
from blog.models import Rate
class ItemBasedCF:
def __init__(self):
# 读取文件,并生成用户-物品的评分表和测试集
self.train = dict()
result= Rate.objects.values("mark", "user_id", "video_id")
for i in result:
score = i["mark"]
user = i["user_id"]
item = i["video_id"]
self.train.setdefault(user, {})
self.train[user][item] = int(float(score))
#print(self.train)
def ItemSimilarity(self):
# 建立物品-物品的共现矩阵
cooccur = dict() # 物品-物品的共现矩阵
buy = dict() # 物品被多少个不同用户购买N
for user, items in self.train.items():
for i in items.keys():
buy.setdefault(i, 0)
buy[i] += 1
cooccur.setdefault(i, {})
for j in items.keys():
if i == j: continue
cooccur[i].setdefault(j, 0)
cooccur[i][j] += 1
# 计算相似度矩阵
self.similar = dict()
for i, related_items in cooccur.items():
self.similar.setdefault(i, {})
for j, cij in related_items.items():
self.similar[i][j] = cij / (math.sqrt(buy[i] * buy[j]))
#print(self.similar)
return self.similar
# 给用户user推荐,前K个相关用户,前N个物品
def Recommend(self, user, K=10, N=10):
rank = dict()
action_item = self.train[user]
# 用户user产生过行为的item和评分
for item, score in action_item.items():
sortedItems = sorted(self.similar[item].items(), key=lambda x: x[1], reverse=True)[0:K]
for j, wj in sortedItems:
if j in action_item.keys():
continue
rank.setdefault(j, 0)
rank[j] += score * wj
return dict(sorted(rank.items(), key=lambda x: x[1], reverse=True)[0:N])
```
#### File: SJTUsoso/soso/csv2art.py
```python
import csv
import codecs
from datetime import datetime, timedelta
import random
import sys
import os
import django
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'SJTUsoso.settings'
django.setup()
from soso.models import *
from soso.genKw import get_kw, get_view
# qs = SosoSitearticle.objects.filter(date__range=["2020-01-01", "2020-12-31"]).order_by('-view')[:10]
# print(qs.count())
thedate = datetime(2020, 2, 1)
def get_date():
global thedate
if random.random() < 0.9:
thedate += timedelta(days=-1)
return thedate
if __name__ == '__main__':
with codecs.open('article/jwc_mxxstz.csv', 'r', encoding='gb18030') as f:
reader = csv.reader(f)
for row in reader:
id = row[0]
title = row[1]
url = row[2]
cont = row[3].split(',')[:-1]
cont = " ".join(cont)
if len(cont) > 1:
cont = cont[1:]
content = title + ' ' + cont
(kw1, kw2, kw3) = get_kw(content)
view = get_view()
date = get_date()
cat = "面向学生通知"
print(title, date)
art = SosoSitearticle(title=title, url=url, text=cont, date=date, view=view, category=cat, kw1=kw1[:19], kw2=kw2[:19], kw3=kw3[:19])
art.save()
# print(art)
```
#### File: SJTUsoso/soso/models.py
```python
from django.db import models
# Create your models here.
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
from django.contrib.auth.models import User
import time
import calendar
import random
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db.models.fields import exceptions
from ckeditor_uploader.fields import RichTextUploadingField
import os
from mdeditor.fields import MDTextField
from django.db.models.fields import exceptions
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
# Create your models here.
class SosoSitearticle(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=200, blank=True, null=True)
url = models.CharField(max_length=200, blank=True, null=True)
text = models.TextField(blank=True, null=True)
date = models.DateField() # web date
view = models.IntegerField() # web views times
category = models.CharField(max_length=20, blank=True, null=True)
kw1 = models.CharField(max_length=20, blank=True, null=True)
kw2 = models.CharField(max_length=20, blank=True, null=True)
kw3 = models.CharField(max_length=20, blank=True, null=True)
img_url = models.ImageField(upload_to='images', blank=True)
sml = models.IntegerField(blank=True, null=True, default=0)
def getImage(self):
if self.img_url:
return self.img_url.url
else:
return os.path.join("/media/images","soso-"+str(random.randint(1,10))+".jpg")
class Meta:
#managed = False
db_table = 'soso_siteArticle'
``` |
{
"source": "JianmingXia/StudyTest",
"score": 3
} |
#### File: KnowledgeQuizTool/MillionHeroAssistant/main.py
```python
import time
from argparse import ArgumentParser
import operator
from functools import partial
from terminaltables import SingleTable
from config import api_version
from config import app_id
from config import app_key
from config import app_secret
from config import data_directory
from config import image_compress_level
from core.android import analyze_current_screen_text, save_screen
from core.nearby import calculate_relation
from core.nlp.word_analyze import analyze_keyword_from_question
from core.ocr.baiduocr import get_text_from_image as bai_get_text
from core.utils import save_question_answers_to_file, number_normalize
import sys
import webbrowser
def parse_args():
parser = ArgumentParser(description="Million Hero Assistant")
parser.add_argument(
"-t", "--timeout",
type=int,
default=5,
help="default http request timeout"
)
return parser.parse_args()
def parse_question_and_answer(text_list, answer_num):
question = ""
start = 0
length = len(text_list)
# 假设问题最多三行
print ("text_list")
if length > answer_num + 3:
length = answer_num + 3
text_list = text_list[:length]
print (text_list)
for i, keyword in enumerate(text_list):
question += keyword
if ("?" in keyword) or (i + answer_num >= length - 1):
start = i + 1
break
question = question.split(".")[-1]
return question, text_list[start:]
def main():
args = parse_args()
timeout = args.timeout
get_text_from_image = partial(
bai_get_text,
app_id=app_id,
app_key=app_key,
app_secret=app_secret,
api_version=api_version,
timeout=timeout)
def __inner_job(answer_num):
start = time.time()
text_binary = analyze_current_screen_text(
answer_num,
directory=data_directory,
compress_level=image_compress_level[0]
)
keywords = get_text_from_image(
image_data=text_binary,
)
if not keywords:
print("text not recognize")
return
question, answers = parse_question_and_answer(keywords, answer_num)
answers = answers[:3]
webbrowser.open('https://baidu.com/s?wd='+question)
print('-' * 72)
print(question)
print('-' * 72)
print("\n".join(answers))
search_question = analyze_keyword_from_question(question)
weight_li, final, index = calculate_relation(search_question, answers)
min_member = min(weight_li)
max_member = max(weight_li)
normalize = partial(number_normalize,
max_member=max_member,
min_member=min_member,
c=100)
summary = {
a: b
for a, b in
zip(answers, weight_li)
}
summary_li = sorted(summary.items(), key=operator.itemgetter(1), reverse=True)
data = [("选项", "同比")]
for a, w in summary_li:
data.append((a, "{:.3f}".format(normalize(w) if max_member > min_member else w)))
table = SingleTable(data)
print(table.table)
print("*" * 72)
print("肯定回答: ", summary_li[0][0])
print("否定回答: ", summary_li[-1][0])
print("*" * 72)
end = time.time()
print("use {0} 秒".format(end - start))
save_screen(directory=data_directory)
save_question_answers_to_file(question, answers, directory=data_directory)
while True:
print("""
请在答题开始前就运行程序,
答题开始的时候按Enter预测答案(输入2-4位选项,默认3个选项)
""")
enter = input("按Enter键开始,按ESC键退出...")
if enter == chr(27):
break
answer_num = 3
if len(enter) > 0 and int(enter) >= 2 and int(enter) <= 4:
answer_num = int(enter)
try:
__inner_job(answer_num)
except Exception as e:
print(str(e))
print("欢迎下次使用")
if __name__ == "__main__":
main()
```
#### File: MillionHeroes/test/hero.py
```python
import urllib.request, sys,base64,json,os,time,string,re
from PIL import Image
from aip import AipOcr
from aitext import Ai
start = time.time()
os.system("adb shell /system/bin/screencap -p /sdcard/screenshot.png")
os.system("adb pull /sdcard/screenshot.png ./screenshot.png")
'''
汉王ocr 涨价涨价了。。
host = 'http://text.aliapi.hanvon.com'
path = '/rt/ws/v1/ocr/text/recg'
method = 'POST'
appcode = 'a962e94260ee4043b824d2f40c126d8e' #汉王识别appcode(填你自己的)
querys = 'code=74e51a88-41ec-413e-b162-bd031fe0407e'
bodys = {}
url = host + path + '?' + querys
'''
""" (百度ocr)你的 APPID AK SK """
APP_ID = '10670003'
API_KEY = '<KEY>'
SECRET_KEY = '<KEY>'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
im = Image.open(r"./screenshot.png")
img_size = im.size
w = im.size[0]
h = im.size[1]
print("xx:{}".format(img_size))
region = im.crop((70,200, w-70,1200)) #裁剪的区域
region.save(r"./crop_test1.png")
""" 读取图片 """
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
image = get_file_content(r"./crop_test1.png")
respon = client.basicGeneral(image) #用完500次后可改 respon = client.basicAccurate(image) 这个还可用50次
titles = respon['words_result'] #获取问题
issue = ''
answer = ['','','','','','']
countone = 0
answercount = 0
for title in titles:
countone+=1
if(countone >=len(titles)-2):
answer[answercount] = title['words']
answercount+=1
else:
issue = issue +title['words']
tissue = issue[1:2]
if str.isdigit(tissue): #去掉题目索引
issue = issue[3:]
else:
issue = issue[2:]
print(issue) #打印问题
print(' A:'+answer[0]+' B:'+answer[1]+' C:'+answer[2]) #打印答案
keyword = issue #识别的问题文本
ai=Ai(issue,answer)
ai.search()
'''
convey = 'n'
if convey == 'y' or convey == 'Y':
results = baiduSearch.search(keyword, convey=True)
elif convey == 'n' or convey == 'N' or not convey:
results = baiduSearch.search(keyword)
else:
print('输入错误')
exit(0)
count = 0
for result in results:
#print('{0} {1} {2} {3} {4}'.format(result.index, result.title, result.abstract, result.show_url, result.url)) # 此处应有格式化输出
print('{0}'.format(result.abstract)) # 此处应有格式化输出
count=count+1
if(count == 2): #这里限制了只显示2条结果,可以自己设置
break
'''
end = time.time()
print('程序用时:'+str(end-start)+'秒')
```
#### File: KnowledgeQuizTool/SummitMeeting/TitleBaidu.py
```python
import io
import urllib.parse
import webbrowser
import requests
import base64
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import os
def pull_screenshot():
os.system('adb shell screencap -p /sdcard/screenshot.png')
os.system('adb pull /sdcard/screenshot.png .')
pull_screenshot()
img = Image.open("./screenshot.png")
# 用 matplot 查看测试分辨率,切割
region = img.crop((50, 350, 1000, 560)) # 坚果 pro1
region.save('./crop.png')
#region = img.crop((75, 315, 1167, 789)) # iPhone 7P
#im = plt.imshow(img, animated=True)
#im2 = plt.imshow(region, animated=True)
#plt.show()
# 百度OCR API ,在 https://cloud.baidu.com/product/ocr 上注册新建应用即可
api_key = 'oZokCbcX3unqb4CpGvD873Co'
api_secret = '<KEY>'
# 获取token
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id='+api_key+'&client_secret='+api_secret
headers = {
'Content-Type':'application/json;charset=UTF-8'
}
res = requests.get(url=host,headers=headers).json()
token = res['access_token']
imgByteArr = io.BytesIO()
region.save(imgByteArr, format='PNG')
image_data = imgByteArr.getvalue()
base64_data = base64.b64encode(image_data)
r = requests.post('https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic',
params={'access_token': token}, data={'image': base64_data})
result = ''
for i in r.json()['words_result']:
result += i['words']
result = urllib.parse.quote(result)
webbrowser.open('https://baidu.com/s?wd='+result)
```
#### File: PythonPro/demo/exception.py
```python
class Networkerror(RuntimeError):
def __init__(self, arg):
self.args = arg
try:
raise Networkerror("Bad hostname")
except Networkerror,e:
print e.args
# 定义函数
# def mye( level ):
# if level < 1:
# raise Exception("Invalid level!", level)
# # 触发异常后,后面的代码就不会再执行
#
# try:
# mye(0) // 触发异常
# except "Invalid level!":
# print 1
# else:
# print 2
# 定义函数
# def temp_convert(var):
# try:
# return int(var)
# except ValueError, Argument:
# print "no number\n", Argument
#
# # 调用函数
# temp_convert("xyz");
# try:
# fh = open("testfile", "r")
# try:
# fh.write("这是一个测试文件,用于测试异常!!")
# finally:
# print "close"
# fh.close()
# except IOError:
# print "Error"
# try:
# fh = open("testfile", "w")
# fh.write("这是一个测试文件,用于测试异常!!")
# finally:
# print "Error"
# try:
# fh = open("testfile", "r")
# fh.write("这是一个测试文件,用于测试异常!!")
# except IOError:
# print "Error"
# else:
# print "ok"
# fh.close()
# try:
# fh = open("testfile", "w")
# fh.write("这是一个测试文件,用于测试异常!!")
# except IOError:
# print "Error: 没有找到文件或读取文件失败"
# else:
# print "内容写入文件成功"
# fh.close()
```
#### File: demo/package_runoob/runoob2.py
```python
def runoob2():
print "I'm in runoob2"
``` |
{
"source": "JianMingZhuo/WSGIServer",
"score": 2
} |
#### File: WSGIServer/server/err_code.py
```python
ERR_SUCCESS = 0
ERR_INTERNAL_EXCEPTION = 100
ERR_NULL_REQUEST = 101
ERR_100_CONTINUE_REQUEST = 102
ERR_MSG = {
ERR_SUCCESS: 'Success',
ERR_NULL_REQUEST: 'Blank request',
ERR_INTERNAL_EXCEPTION: 'Server internal exception',
ERR_100_CONTINUE_REQUEST: 'This request has Expect: 100-continue header',
}
def get_err_msg(err):
ERR_MSG.get(err, None)
```
#### File: WSGIServer/server/io_multiplex.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
from __future__ import nested_scopes
from __future__ import generators
import select
from server.log import logging
EPOLLIN = 0x001
EPOLLPRI = 0x002
EPOLLOUT = 0x004
EPOLLRDNORM = 0x040
EPOLLRDBAND = 0x080
EPOLLWRNORM = 0x100
EPOLLWRBAND = 0x200
EPOLLMSG = 0x400
EPOLLERR = 0x008
EPOLLHUP = 0x010
EPOLLONESHOT = (1 << 30)
EPOLLET = (1 << 31)
class IOMultiplex(object):
__multiplex = None
READ = EPOLLIN | EPOLLPRI | EPOLLRDNORM
WRITE = EPOLLOUT | EPOLLWRNORM
ERROR = EPOLLERR | EPOLLHUP | EPOLLMSG
@classmethod
def __initialized(cls):
cls.__multiplex = cls.__multiplex if cls.__multiplex is not None else IOMultiplex()
return cls.__multiplex
@classmethod
def initialized(cls):
return cls.__initialized()
def __init__(self):
self.loop = _epoll()
self.__events = {}
self.__handler = {}
self.running = False
self.timeout = 1
def add_handler(self, fd, handler, eventmask):
self.__handler[fd] = handler
self.loop.register(fd, eventmask)
def remove_handler(self, fd):
del self.__handler[fd]
self.loop.unregister(fd)
def start(self):
self.running = True
while self.running:
events = self.loop.poll(self.timeout)
self.__events = events
for fd, event in self.__events.items():
try:
# if fd not in self.__handler:
# continue
self.__handler[fd](fd, event)
except Exception as ex:
logging.exception(ex)
def stop(self):
self.running = False
class _Select(object):
def __init__(self):
self.read_set = set()
self.write_set = set()
self.error_set = set()
def register(self, fd, eventmask):
if eventmask & IOMultiplex.READ:
self.read_set.add(fd)
elif eventmask & IOMultiplex.WRITE:
self.write_set.add(fd)
elif eventmask & IOMultiplex.ERROR:
self.error_set.add(fd)
def modify(self, fd, eventmask):
if fd in self.read_set and (eventmask & IOMultiplex.READ) is False:
self.read_set.remove(fd)
if fd in self.write_set and (eventmask & IOMultiplex.WRITE) is False:
self.read_set.remove(fd)
if fd in self.error_set and (eventmask & IOMultiplex.ERROR) is False:
self.read_set.remove(fd)
self.register(fd, eventmask)
def unregister(self, fd):
if fd in self.read_set:
self.read_set.remove(fd)
if fd in self.write_set:
self.write_set.remove(fd)
if fd in self.error_set:
self.error_set.remove(fd)
def poll(self, timeout):
read_list, write_list, error_list = select.select(self.read_set, self.write_set, self.error_set, timeout)
events = {}
for fd in read_list:
events[fd] = events.get(fd, 0) | IOMultiplex.READ
for fd in write_list:
events[fd] = events.get(fd, 0) | IOMultiplex.WRITE
for fd in error_list:
events[fd] = events.get(fd, 0) | IOMultiplex.ERROR
return events
if hasattr(select, "epoll"):
_epoll = select.epoll
elif hasattr(select, "poll"):
_epoll = select.poll
else:
_epoll = _Select
if __name__ == "__main__":
m = IOMultiplex.initialized()
m.start()
``` |
{
"source": "jianminLee/jianminLee",
"score": 3
} |
#### File: jianminLee/jianminLee/update.py
```python
import feedparser
import pathlib
import re
import datetime
root = pathlib.Path(__file__).parent.resolve()
def replace_chunk(content, marker, chunk, inline=False):
r = re.compile(
r"<!\-\- {} starts \-\->.*<!\-\- {} ends \-\->".format(marker, marker),
re.DOTALL,
)
if not inline:
chunk = "\n{}\n".format(chunk)
chunk = "<!-- {} starts -->{}<!-- {} ends -->".format(marker, chunk, marker)
return r.sub(chunk, content)
def formatGMTime(timestamp):
GMT_FORMAT = '%a, %d %b %Y %H:%M:%S'
dateStr = datetime.datetime.strptime(timestamp, GMT_FORMAT) + datetime.timedelta(hours=8)
return dateStr.date()
def fetch_blog_entries():
entries = feedparser.parse("https://www.orzlee.com/feed")["entries"]
return [
{
"title": entry["title"],
"url": entry["link"].split("#")[0],
"published": formatGMTime(entry["published"].split(" +")[0]),
}
for entry in entries
]
if __name__ == "__main__":
readme = root / "README.md"
readme_contents = readme.open().read()
entries = fetch_blog_entries()[:5]
entries_md = "\n".join(
["* <a href='{url}' target='_blank'>{title}</a> - {published}".format(**entry) for entry in entries]
)
rewritten = replace_chunk(readme_contents, "blog", entries_md)
try:
readme.open("w").write(rewritten)
except Exception as e:
print(e)
``` |
{
"source": "jianminzhu/chinaese6",
"score": 3
} |
#### File: chinaese6/docs/a.py
```python
import threading
import time
import urllib2
# SELECT "bmember" as type, COUNT(*) AS bmember FROM bmember WHERE isDownPics=0
# UNION ALL SELECT "memberby " as type, COUNT(*) AS memberby FROM memberby
# UNION ALL SELECT "membercontact" as type, COUNT(*) AS membercontact FROM membercontact
# UNION ALL SELECT "memberlevel" as type, COUNT(*) AS memberlevel FROM memberlevel
# UNION ALL SELECT "member " as type, COUNT(*) AS member FROM member
# cd ~/tt
# nohup python a.py> output10.html 2>&1 &
def getHtml(url):
header = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:48.0) Gecko/20100101 Firefox/48.0"}
request = urllib2.Request(url=url, headers=header) # 模拟浏览器进行访问
response = urllib2.urlopen(request)
text = response.read()
return text
def spider():
while True:
html = getHtml("http://travelling.chinesecompanion.com/index.php/index/spby/pics?isShowPic=no&limit=1")
print(html)
time.sleep(1)
if __name__ == '__main__':
t = []
for index in range(10):
t.append(threading.Thread(target=spider))
for index in range(len(t)):
t[index].start()
for index in range(len(t)):
t[index].join()
```
#### File: chinaese6/docs/MutiltheardUtil.py
```python
import queue
import threading
Thread_id = 1
class myThread(threading.Thread):
def __init__(self, q,dealFunc):
global Thread_id
threading.Thread.__init__(self)
self.q = q
self.dealFunc=dealFunc
self.Thread_id = Thread_id
Thread_id = Thread_id + 1
def run(self):
while True:
try:
task = self.q.get(block = True, timeout = 1) #不设置阻塞的话会一直去尝试获取资源
except queue.Empty:
print ('Thread' , self.Thread_id , 'end')
break
print ("Starting " , self.Thread_id)
try:
self.dealFunc(**task)
except:
pass
self.q.task_done()
print ("Ending " , self.Thread_id)
def startThread(startThreadNum, fun, dataArry=[]):
q = queue.Queue(len(dataArry))
#向资源池里面放10个数用作测试
for i in range(len(dataArry)):
q.put(dataArry[i])
for i in range(0, startThreadNum):
worker = myThread(q,fun)
worker.start()
q.join() #等待所有的队列资源都用完
print ("Exiting Main Thread")
def f(a):
print ("ddddddd",a)
if __name__ == '__main__':
startThread(3, f,[1,2,3,4,5])
``` |
{
"source": "jiannanWang/DEEPSEC",
"score": 2
} |
#### File: Attacks/AttackMethods/AttackUtils.py
```python
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.autograd import Variable
def tensor2variable(x=None, device=None, requires_grad=False):
"""
:param x:
:param device:
:param requires_grad:
:return:
"""
x = x.to(device)
return Variable(x, requires_grad=requires_grad)
def predict(model=None, samples=None, device=None):
"""
:param model:
:param samples:
:param device:
:return:
"""
model.eval()
model = model.to(device)
copy_samples = np.copy(samples)
var_samples = tensor2variable(torch.from_numpy(copy_samples), device=device, requires_grad=True)
predictions = model(var_samples.float())
return predictions
```
#### File: Attacks/AttackMethods/JSMA.py
```python
import numpy as np
import torch
from torch.autograd.gradcheck import zero_gradients
from Attacks.AttackMethods.AttackUtils import tensor2variable
from Attacks.AttackMethods.attacks import Attack
class JSMAAttack(Attack):
def __init__(self, model=None, theta=1.0, gamma=0.1):
"""
:param model:
:param theta:
:param gamma:
"""
super(JSMAAttack, self).__init__(model=model)
self.model = model.eval()
self.theta = theta
self.gamma = gamma
def compute_jacobian(self, input, device):
"""
computing the derivative of model with respect to the input features (jacobian matrix)
:param input: input with 1 X C X H X W size
:param device: specified device
:return: jacobian matrix (10 X [H*W])
"""
self.model.eval()
output = self.model(input)
num_features = int(np.prod(input.shape[1:]))
jacobian = torch.zeros([output.size()[1], num_features])
mask = torch.zeros(output.size()).to(device) # chooses the derivative to be calculated
for i in range(output.size()[1]):
mask[:, i] = 1
zero_gradients(input)
output.backward(mask, retain_graph=True)
# copy the derivative to the target place
jacobian[i] = input._grad.squeeze().view(-1, num_features).clone()
mask[:, i] = 0 # reset
return jacobian.to(device)
def saliency_map(self, jacobian, target_index, increasing, search_space, nb_features, device):
"""
:param jacobian: the forward derivative (jacobian matrix)
:param target_index: target class
:param increasing: to increase tor decrease pixel intensities
:param search_space: the features indicate the perturbation search space
:param nb_features: total number of feature
:param device: specified device
:return: a pair of pixel
"""
domain = torch.eq(search_space, 1).float() # The search domain
# the sum of all features' derivative with respect to each class
all_sum = torch.sum(jacobian, dim=0, keepdim=True)
target_grad = jacobian[target_index] # The forward derivative of the target class
others_grad = all_sum - target_grad # The sum of forward derivative of other classes
# this list blanks out those that are not in the search domain
if increasing:
increase_coef = 2 * (torch.eq(domain, 0)).float().to(device)
else:
increase_coef = -1 * 2 * (torch.eq(domain, 0)).float().to(device)
increase_coef = increase_coef.view(-1, nb_features)
# calculate sum of target forward derivative of any 2 features.
target_tmp = target_grad.clone()
target_tmp -= increase_coef * torch.max(torch.abs(target_grad))
alpha = target_tmp.view(-1, 1, nb_features) + target_tmp.view(-1, nb_features, 1) # PyTorch will automatically extend the dimensions
# calculate sum of other forward derivative of any 2 features.
others_tmp = others_grad.clone()
others_tmp += increase_coef * torch.max(torch.abs(others_grad))
beta = others_tmp.view(-1, 1, nb_features) + others_tmp.view(-1, nb_features, 1)
# zero out the situation where a feature sums with itself
tmp = np.ones((nb_features, nb_features), int)
np.fill_diagonal(tmp, 0)
zero_diagonal = torch.from_numpy(tmp).byte().to(device)
# According to the definition of saliency map in the paper (formulas 8 and 9),
# those elements in the saliency map that doesn't satisfy the requirement will be blanked out.
if increasing:
mask1 = torch.gt(alpha, 0.0)
mask2 = torch.lt(beta, 0.0)
else:
mask1 = torch.lt(alpha, 0.0)
mask2 = torch.gt(beta, 0.0)
# apply the mask to the saliency map
mask = torch.mul(torch.mul(mask1, mask2), zero_diagonal.view_as(mask1))
# do the multiplication according to formula 10 in the paper
saliency_map = torch.mul(torch.mul(alpha, torch.abs(beta)), mask.float())
# get the most significant two pixels
max_value, max_idx = torch.max(saliency_map.view(-1, nb_features * nb_features), dim=1)
p = max_idx // nb_features
q = max_idx % nb_features
return p, q
def perturbation_single(self, sample, ys_target, device):
"""
:param sample:
:param ys_target:
:param device:
:return:
"""
copy_sample = np.copy(sample)
var_sample = tensor2variable(torch.from_numpy(copy_sample), device=device, requires_grad=True)
var_target = tensor2variable(torch.LongTensor(ys_target), device=device)
if self.theta > 0:
increasing = True
else:
increasing = False
num_features = int(np.prod(copy_sample.shape[1:]))
shape = var_sample.size()
# perturb two pixels in one iteration, thus max_iters is divided by 2.0
max_iters = int(np.ceil(num_features * self.gamma / 2.0))
# masked search domain, if the pixel has already reached the top or bottom, we don't bother to modify it.
if increasing:
search_domain = torch.lt(var_sample, 0.99).to(device)
else:
search_domain = torch.gt(var_sample, 0.01).to(device)
search_domain = search_domain.view(num_features)
self.model.eval().to(device)
output = self.model(var_sample)
current = torch.max(output.data, 1)[1].cpu().numpy()
iter = 0
while (iter < max_iters) and (current[0] != ys_target[0]) and (search_domain.sum() != 0):
# calculate Jacobian matrix of forward derivative
jacobian = self.compute_jacobian(input=var_sample, device=device)
# get the saliency map and calculate the two pixels that have the greatest influence
p1, p2 = self.saliency_map(jacobian, var_target, increasing, search_domain, num_features, device)
# apply modifications
var_sample_flatten = var_sample.view(-1, num_features)
var_sample_flatten[0, p1] += self.theta
var_sample_flatten[0, p2] += self.theta
new_sample = torch.clamp(var_sample_flatten, min=0.0, max=1.0)
new_sample = new_sample.view(shape)
search_domain[p1] = 0
search_domain[p2] = 0
var_sample = tensor2variable(torch.tensor(new_sample), device=device, requires_grad=True)
output = self.model(var_sample)
current = torch.max(output.data, 1)[1].cpu().numpy()
iter += 1
adv_samples = var_sample.data.cpu().numpy()
return adv_samples
def perturbation(self, xs, ys_target, device):
"""
:param xs:
:param ys_target:
:param device:
:return:
"""
assert len(xs) == len(ys_target), "The lengths of samples and its ys should be equal"
print('The JSMA attack perturbs the samples one by one ...... ')
adv_samples = []
for iter in range(len(xs)):
adv_image = self.perturbation_single(sample=xs[iter: iter + 1], ys_target=ys_target[iter: iter + 1], device=device)
adv_samples.extend(adv_image)
return np.array(adv_samples)
```
#### File: DEEPSEC/Attacks/ILLC_Generation.py
```python
import argparse
import os
import random
import sys
import numpy as np
import torch
sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
from Attacks.Generation import Generation
from Attacks.AttackMethods.ILLC import ILLCAttack
from Attacks.AttackMethods.AttackUtils import predict
class ILLCGeneration(Generation):
def __init__(self, dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device, eps, eps_iter,
num_steps, attack_batch_size):
super(ILLCGeneration, self).__init__(dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device)
self.attack_batch_size = attack_batch_size
self.epsilon = eps
self.epsilon_iter = eps_iter
self.num_steps = num_steps
def generate(self):
attacker = ILLCAttack(model=self.raw_model, epsilon=self.epsilon, eps_iter=self.epsilon_iter, num_steps=self.num_steps)
# prepare the Least Likely Class labels
llc_labels = np.argmax(self.targets_samples, 1)
# generating
adv_samples = attacker.batch_perturbation(xs=self.nature_samples, ys_target=llc_labels, batch_size=self.attack_batch_size,
device=self.device)
adv_labels = predict(model=self.raw_model, samples=adv_samples, device=self.device)
adv_labels = torch.max(adv_labels, 1)[1]
adv_labels = adv_labels.cpu().numpy()
np.save('{}{}_AdvExamples.npy'.format(self.adv_examples_dir, self.attack_name), adv_samples)
np.save('{}{}_AdvLabels.npy'.format(self.adv_examples_dir, self.attack_name), adv_labels)
np.save('{}{}_TrueLabels.npy'.format(self.adv_examples_dir, self.attack_name), self.labels_samples)
mis_target = 0
for i in range(len(adv_samples)):
if llc_labels[i] == adv_labels[i]:
mis_target += 1
print('\nFor the **{}** (targeted attack) on **{}**, the misclassified rate = {}/{} = {:.1f}%\n'.format(
self.attack_name, self.dataset, mis_target, len(adv_samples), mis_target / len(adv_samples) * 100))
def main(args):
# Device configuration
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Set the random seed manually for reproducibility.
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
name = 'ILLC'
targeted = True
illc = ILLCGeneration(dataset=args.dataset, attack_name=name, targeted=targeted, raw_model_location=args.modelDir,
clean_data_location=args.cleanDir, adv_examples_dir=args.adv_saver, device=device,
eps=args.epsilon, attack_batch_size=args.attack_batch_size, eps_iter=args.epsilon_iter, num_steps=args.num_steps)
illc.generate()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='The ILLC Attack Generation')
# common arguments
parser.add_argument('--dataset', type=str, default='MNIST', help='the dataset should be MNIST or CIFAR10')
parser.add_argument('--modelDir', type=str, default='../RawModels/', help='the directory for the raw model')
parser.add_argument('--cleanDir', type=str, default='../CleanDatasets/', help='the directory for the clean dataset that will be attacked')
parser.add_argument('--adv_saver', type=str, default='../AdversarialExampleDatasets/',
help='the directory used to save the generated adversarial examples')
parser.add_argument('--seed', type=int, default=100, help='the default random seed for numpy and torch')
parser.add_argument('--gpu_index', type=str, default='0', help="gpu index to use")
# arguments for the particular attack
parser.add_argument('--epsilon', type=float, default=0.3, help='the max epsilon value that is allowed to be perturbed')
parser.add_argument('--epsilon_iter', type=float, default=0.05, help='the one iterative eps of ILLC')
parser.add_argument('--num_steps', type=int, default=10, help='the number of perturbation steps')
parser.add_argument('--attack_batch_size', type=int, default=100, help='the default batch size for adversarial example generation')
arguments = parser.parse_args()
main(arguments)
```
#### File: DEEPSEC/Attacks/UAP_Generation.py
```python
import argparse
import os
import random
import sys
import numpy as np
import torch
sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
from Attacks.AttackMethods.AttackUtils import predict
from Attacks.AttackMethods.UAP import UniversalAttack
from Attacks.Generation import Generation
from RawModels.Utils.dataset import get_cifar10_train_validate_loader, get_mnist_train_validate_loader
class UAPGeneration(Generation):
def __init__(self, dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device, max_iter_uni, frate,
epsilon, overshoot, max_iter_df):
super(UAPGeneration, self).__init__(dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device)
self.max_iter_uni = max_iter_uni
self.fooling_rate = frate
self.epsilon = epsilon
self.overshoot = overshoot
self.max_iter_df = max_iter_df
def generate(self):
attacker = UniversalAttack(model=self.raw_model, fooling_rate=self.fooling_rate, max_iter_universal=self.max_iter_uni,
epsilon=self.epsilon, overshoot=self.overshoot, max_iter_deepfool=self.max_iter_df)
assert self.dataset.upper() == 'MNIST' or self.dataset.upper() == 'CIFAR10', "dataset should be MNIST or CIFAR10!"
if self.dataset.upper() == 'MNIST':
samples_loader, valid_loader = get_mnist_train_validate_loader(dir_name='../RawModels/MNIST/', batch_size=1, valid_size=0.9,
shuffle=True)
else: # 'CIFAR10':
samples_loader, valid_loader = get_cifar10_train_validate_loader(dir_name='../RawModels/CIFAR10/', batch_size=1, valid_size=0.9,
augment=False, shuffle=True)
universal_perturbation = attacker.universal_perturbation(dataset=samples_loader, validation=valid_loader, device=self.device)
universal_perturbation = universal_perturbation.cpu().numpy()
np.save('{}{}_{}_universal_perturbation'.format(self.adv_examples_dir, self.attack_name, self.dataset), universal_perturbation)
adv_samples = attacker.perturbation(xs=self.nature_samples, uni_pert=universal_perturbation, device=self.device)
adv_labels = predict(model=self.raw_model, samples=adv_samples, device=self.device)
adv_labels = torch.max(adv_labels, 1)[1]
adv_labels = adv_labels.cpu().numpy()
np.save('{}{}_AdvExamples.npy'.format(self.adv_examples_dir, self.attack_name), adv_samples)
np.save('{}{}_AdvLabels.npy'.format(self.adv_examples_dir, self.attack_name), adv_labels)
np.save('{}{}_TrueLabels.npy'.format(self.adv_examples_dir, self.attack_name), self.labels_samples)
mis = 0
for i in range(len(adv_samples)):
if self.labels_samples[i].argmax(axis=0) != adv_labels[i]:
mis = mis + 1
print('\nFor **{}** on **{}**: misclassification ratio is {}/{}={:.1f}%\n'.format(self.attack_name, self.dataset, mis, len(adv_samples),
mis / len(adv_labels) * 100))
def main(args):
# Device configuration
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Set the random seed manually for reproducibility.
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
name = 'UAP'
targeted = False
df = UAPGeneration(dataset=args.dataset, attack_name=name, targeted=targeted, raw_model_location=args.modelDir,
clean_data_location=args.cleanDir, adv_examples_dir=args.adv_saver, device=device, max_iter_uni=args.max_iter_universal,
frate=args.fool_rate, epsilon=args.epsilon, overshoot=args.overshoot, max_iter_df=args.max_iter_deepfool)
df.generate()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='The UAP Attack Generation')
# common arguments
parser.add_argument('--dataset', type=str, default='CIFAR10', help='the dataset should be MNIST or CIFAR10')
parser.add_argument('--modelDir', type=str, default='../RawModels/', help='the directory for the raw model')
parser.add_argument('--cleanDir', type=str, default='../CleanDatasets/', help='the directory for the clean dataset that will be attacked')
parser.add_argument('--adv_saver', type=str, default='../AdversarialExampleDatasets/',
help='the directory used to save the generated adversarial examples')
parser.add_argument('--seed', type=int, default=100, help='the default random seed for numpy and torch')
parser.add_argument('--gpu_index', type=str, default='0', help="gpu index to use")
# arguments for the particular attack
parser.add_argument('--fool_rate', type=float, default=1.0, help="the fooling rate")
parser.add_argument('--epsilon', type=float, default=0.1, help='controls the magnitude of the perturbation')
parser.add_argument('--max_iter_universal', type=int, default=20, help="the maximum iterations for UAP")
parser.add_argument('--overshoot', type=float, default=0.02, help='the overshoot parameter for DeepFool')
parser.add_argument('--max_iter_deepfool', type=int, default=10, help='the maximum iterations for DeepFool')
arguments = parser.parse_args()
main(arguments)
```
#### File: DEEPSEC/CleanDatasets/CandidatesSelection.py
```python
import argparse
import os
import random
import shutil
import sys
import numpy as np
import torch
sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
from RawModels.MNISTConv import MNISTConvNet
from RawModels.ResNet import resnet20_cifar
from RawModels.Utils.dataset import get_cifar10_test_loader, get_mnist_test_loader
def main(args):
# Device configuration
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Set the random seed manually for reproducibility.
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
# prepare the dataset name, candidate num, dataset location and raw model location
dataset = args.dataset.upper()
num = args.number
dataset_location = '../RawModels/{}/'.format(dataset)
raw_model_location = '../RawModels/{}/model/{}_raw.pt'.format(dataset, dataset)
print("\nStarting to select {} {} Candidates Example, which are correctly classified by the Raw Model from {}\n".format(num, dataset,
raw_model_location))
# load the raw model and testing dataset
assert args.dataset == 'MNIST' or args.dataset == 'CIFAR10'
if dataset == 'MNIST':
raw_model = MNISTConvNet().to(device)
raw_model.load(path=raw_model_location, device=device)
test_loader = get_mnist_test_loader(dir_name=dataset_location, batch_size=1, shuffle=False)
else:
raw_model = resnet20_cifar().to(device)
raw_model.load(path=raw_model_location, device=device)
test_loader = get_cifar10_test_loader(dir_name=dataset_location, batch_size=1, shuffle=False)
# get the successfully classified examples
successful = []
raw_model.eval()
with torch.no_grad():
for image, label in test_loader:
image = image.to(device)
label = label.to(device)
output = raw_model(image)
_, predicted = torch.max(output.data, 1)
if predicted == label:
_, least_likely_class = torch.min(output.data, 1)
successful.append([image, label, least_likely_class])
print(len(successful))
candidates = random.sample(successful, num)
candidate_images = []
candidate_labels = []
candidates_llc = []
candidate_targets = []
for index in range(len(candidates)):
image = candidates[index][0].cpu().numpy()
image = np.squeeze(image, axis=0)
candidate_images.append(image)
label = candidates[index][1].cpu().numpy()[0]
llc = candidates[index][2].cpu().numpy()[0]
# selection for the targeted label
classes = [i for i in range(10)]
classes.remove(label)
target = random.sample(classes, 1)[0]
one_hot_label = [0 for i in range(10)]
one_hot_label[label] = 1
one_hot_llc = [0 for i in range(10)]
one_hot_llc[llc] = 1
one_hot_target = [0 for i in range(10)]
one_hot_target[target] = 1
candidate_labels.append(one_hot_label)
candidates_llc.append(one_hot_llc)
candidate_targets.append(one_hot_target)
candidate_images = np.array(candidate_images)
candidate_labels = np.array(candidate_labels)
candidates_llc = np.array(candidates_llc)
candidate_targets = np.array(candidate_targets)
if dataset not in os.listdir('./'):
os.mkdir('./{}/'.format(dataset))
else:
shutil.rmtree('{}'.format(dataset))
os.mkdir('./{}/'.format(dataset))
np.save('./{}/{}_inputs.npy'.format(dataset, dataset), candidate_images)
np.save('./{}/{}_labels.npy'.format(dataset, dataset), candidate_labels)
np.save('./{}/{}_llc.npy'.format(dataset, dataset), candidates_llc)
np.save('./{}/{}_targets.npy'.format(dataset, dataset), candidate_targets)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Candidate Selection for Clean Data set')
parser.add_argument('--dataset', type=str, default='CIFAR10', help='the dataset (MNIST or CIFAR10)')
parser.add_argument('--seed', type=int, default=100, help='the default random seed for numpy and torch')
parser.add_argument('--gpu_index', type=str, default='0', help="gpu index to use")
parser.add_argument('--number', type=int, default=1000, help='the total number of candidate samples that will be randomly selected')
arguments = parser.parse_args()
main(arguments)
```
#### File: Defenses/DefenseMethods/EAT.py
```python
import os
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
# import external model architectures
from Defenses.DefenseMethods.EAT_External_Models import CIFAR10_A, CIFAR10_B, CIFAR10_C, CIFAR10_D
from Defenses.DefenseMethods.EAT_External_Models import MNIST_A, MNIST_B, MNIST_C, MNIST_D
from Defenses.DefenseMethods.defenses import Defense
from RawModels.ResNet import adjust_learning_rate
from RawModels.Utils.TrainTest import testing_evaluation, train_one_epoch, validation_evaluation
class EATDefense(Defense):
def __init__(self, model=None, defense_name=None, dataset=None, training_parameters=None, device=None, **kwargs):
"""
:param model:
:param defense_name:
:param dataset:
:param training_parameters:
:param device:
:param kwargs:
"""
super(EATDefense, self).__init__(model=model, defense_name=defense_name)
self.model = model
self.defense_name = defense_name
self.device = device
self.training_parameters = training_parameters
self.Dataset = dataset.upper()
assert self.Dataset in ['MNIST', 'CIFAR10'], "The data set must be MNIST or CIFAR10"
# make sure to parse the parameters for the defense
assert self._parsing_parameters(**kwargs)
# get the training_parameters, the same as the settings of RawModels
self.num_epochs = training_parameters['num_epochs']
self.batch_size = training_parameters['batch_size']
# prepare the optimizers
if self.Dataset == "MNIST":
self.optimizer_adv = optim.SGD(self.model.parameters(), lr=training_parameters['learning_rate'],
momentum=training_parameters['momentum'], weight_decay=training_parameters['decay'], nesterov=True)
else:
self.optimizer_adv = optim.Adam(self.model.parameters(), lr=training_parameters['lr'])
def _parsing_parameters(self, **kwargs):
"""
:param kwargs:
:return:
"""
assert kwargs is not None, "the parameters should be specified"
print("\nUser configurations for the {} defense".format(self.defense_name))
for key in kwargs:
print('\t{} = {}'.format(key, kwargs[key]))
self.epsilon = kwargs['eps']
self.alpha = kwargs['alpha']
return True
def train_external_model_group(self, train_loader=None, validation_loader=None):
"""
:param train_loader:
:param validation_loader:
:return:
"""
# Set up the model group with 4 static external models
if self.Dataset == 'MNIST':
model_group = [MNIST_A(), MNIST_B(), MNIST_C(), MNIST_D()]
else:
model_group = [CIFAR10_A(), CIFAR10_B(), CIFAR10_C(), CIFAR10_D()]
model_group = [model.to(self.device) for model in model_group]
# training the models in model_group one by one
for i in range(len(model_group)):
# prepare the optimizer for MNIST
if self.Dataset == "MNIST":
optimizer_external = optim.SGD(model_group[i].parameters(), lr=self.training_parameters['learning_rate'],
momentum=self.training_parameters['momentum'], weight_decay=self.training_parameters['decay'],
nesterov=True)
# prepare the optimizer for CIFAR10
else:
if i == 3:
optimizer_external = optim.SGD(model_group[i].parameters(), lr=0.001, momentum=0.9, weight_decay=1e-6)
else:
optimizer_external = optim.Adam(model_group[i].parameters(), lr=self.training_parameters['lr'])
print('\nwe are training the {}-th static external model ......'.format(i))
best_val_acc = None
for index_epoch in range(self.num_epochs):
train_one_epoch(model=model_group[i], train_loader=train_loader, optimizer=optimizer_external, epoch=index_epoch,
device=self.device)
val_acc = validation_evaluation(model=model_group[i], validation_loader=validation_loader, device=self.device)
if self.Dataset == 'CIFAR10':
adjust_learning_rate(epoch=index_epoch, optimizer=optimizer_external)
assert os.path.exists('../DefenseEnhancedModels/{}'.format(self.defense_name))
defense_external_saver = '../DefenseEnhancedModels/{}/{}_EAT_{}.pt'.format(self.defense_name, self.Dataset, str(i))
if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4):
if best_val_acc is not None:
os.remove(defense_external_saver)
best_val_acc = val_acc
model_group[i].save(name=defense_external_saver)
else:
print('Train Epoch {:>3}: validation dataset accuracy did not improve from {:.4f}\n'.format(index_epoch, best_val_acc))
def load_external_model_group(self, model_dir='../DefenseEnhancedModels/EAT/', test_loader=None):
"""
:param model_dir:
:param test_loader:
:return:
"""
print("\n!!! Loading static external models ...")
# Set up 4 static external models
if self.Dataset == 'MNIST':
model_group = [MNIST_A(), MNIST_B(), MNIST_C(), MNIST_D()]
else:
model_group = [CIFAR10_A(), CIFAR10_B(), CIFAR10_C(), CIFAR10_D()]
model_group = [model.to(self.device) for model in model_group]
for i in range(len(model_group)):
print('loading the {}-th static external model'.format(i))
model_path = '{}{}_EAT_{}.pt'.format(model_dir, self.Dataset, str(i))
assert os.path.exists(model_path), "please train the external model first!!!"
model_group[i].load(path=model_path, device=self.device)
testing_evaluation(model=model_group[i], test_loader=test_loader, device=self.device)
return model_group
def random_fgsm_generation(self, model=None, natural_images=None):
"""
A new randomized single step attack (RFGSM)
:param model:
:param natural_images:
:return:
"""
attack_model = model.to(self.device)
attack_model.eval()
with torch.no_grad():
random_sign = torch.sign(torch.randn(*natural_images.size())).to(self.device)
new_images = torch.clamp(natural_images + self.alpha * random_sign, min=0.0, max=1.0)
new_images.requires_grad = True
logits_attack = attack_model(new_images)
# To avoid label leaking, we use the model's output instead of the true labels
labels_attack = torch.max(logits_attack, dim=1)[1]
loss_attack = F.cross_entropy(logits_attack, labels_attack)
gradient = torch.autograd.grad(loss_attack, new_images)[0]
new_images.requires_grad = False
# generation of adversarial examples
with torch.no_grad():
xs_adv = new_images + (self.epsilon - self.alpha) * torch.sign(gradient)
xs_adv = torch.clamp(xs_adv, min=0.0, max=1.0)
return xs_adv
def train_one_epoch_with_adv_from_external_models(self, pre_trained_models=None, train_loader=None, epoch=None):
"""
:param pre_trained_models:
:param train_loader:
:param epoch:
:return:
"""
for index, (images, labels) in enumerate(train_loader):
nat_images = images.to(self.device)
nat_labels = labels.to(self.device)
# in each mini_batch, we randomly choose the attack model which adversarial examples are generated on
idx = np.random.randint(5)
if idx == 0:
attacking_model = self.model
else:
attacking_model = pre_trained_models[idx - 1]
# get corresponding adversarial examples via RFGSM attack on the attack model
adv_images = self.random_fgsm_generation(model=attacking_model, natural_images=nat_images)
# set the model in the training mode
self.model.train()
logits_nat = self.model(nat_images)
loss_nat = F.cross_entropy(logits_nat, nat_labels)
logits_adv = self.model(adv_images)
loss_adv = F.cross_entropy(logits_adv, nat_labels)
loss = 0.5 * (loss_nat + loss_adv)
self.optimizer_adv.zero_grad()
loss.backward()
self.optimizer_adv.step()
print('\rTrain Epoch {:>3}: [{:>5}/{:>5}] \tloss_nat={:.4f}, loss_adv={:.4f}, total_loss={:.4f} ===> '. \
format(epoch, (index + 1) * len(images), len(train_loader) * len(images), loss_nat, loss_adv, loss), end=' ')
def defense(self, pre_trained_models=None, train_loader=None, validation_loader=None):
best_val_acc = None
for epoch in range(self.num_epochs):
# training the model with natural examples and corresponding adversarial examples from external models
self.train_one_epoch_with_adv_from_external_models(pre_trained_models=pre_trained_models, train_loader=train_loader, epoch=epoch)
val_acc = validation_evaluation(model=self.model, validation_loader=validation_loader, device=self.device)
# adjust the learning rate for CIFAR10
if self.Dataset == 'CIFAR10':
adjust_learning_rate(epoch=epoch, optimizer=self.optimizer_adv)
# save the re-trained defense-enhanced model
assert os.path.exists('../DefenseEnhancedModels/{}'.format(self.defense_name))
defense_enhanced_saver = '../DefenseEnhancedModels/{}/{}_{}_enhanced.pt'.format(self.defense_name, self.Dataset, self.defense_name)
if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4):
if best_val_acc is not None:
os.remove(defense_enhanced_saver)
best_val_acc = val_acc
self.model.save(name=defense_enhanced_saver)
else:
print('Train Epoch {:>3}: validation dataset accuracy did not improve from {:.4f}\n'.format(epoch, best_val_acc))
```
#### File: Defenses/DefenseMethods/IGR.py
```python
import os
import torch
import torch.nn.functional as F
import torch.optim as optim
from Defenses.DefenseMethods.defenses import Defense
from RawModels.Utils.TrainTest import validation_evaluation
class IGRDefense(Defense):
def __init__(self, model=None, defense_name=None, dataset=None, lambda_r=None, training_parameters=None, device=None):
"""
:param model:
:param defense_name:
:param dataset:
:param lambda_r:
:param training_parameters:
:param device:
"""
super(IGRDefense, self).__init__(model=model, defense_name=defense_name)
self.model = model
self.defense_name = defense_name
self.device = device
self.Dataset = dataset.upper()
assert self.Dataset in ['MNIST', 'CIFAR10'], "The data set must be MNIST or CIFAR10"
self.lam_r = lambda_r
self.num_epochs = training_parameters['num_epochs']
# keeping use the Adam optimizer for both datasets within the original paper
self.optimizer = optim.Adam(self.model.parameters(), lr=0.0002, eps=1e-4)
def train_one_epoch_with_lambda_regularization(self, train_loader, epoch):
"""
train the model using input gradient regularization
ensure that if any input changes slightly, the KL divergence between predictions and the labels will not change significantly
:param train_loader:
:param epoch:
:return: None
"""
# set the model in the training mode
self.model.train()
for index, (images, labels) in enumerate(train_loader):
images.requires_grad = True
images = images.to(self.device)
labels = labels.to(self.device)
logits = self.model(images)
# calculate the loss1
l1 = F.cross_entropy(logits, labels)
# calculate the loss2
grads = torch.autograd.grad(l1, images, create_graph=True)[0]
l2 = torch.Tensor.norm(grads, p=2) ** 2
# add the two losses
loss = l1 + self.lam_r * l2
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
print('\rTrain Epoch{:>3}: [batch:{:>4}/{:>4}] \tloss1:{:.4f} + lambda:{} * loss2:{:.4f} = {:.4f} ===> '. \
format(epoch, index, len(train_loader), l1, self.lam_r, l2, loss), end=' ')
def defense(self, train_loader=None, validation_loader=None):
best_val_acc = None
for epoch in range(self.num_epochs):
# training the model using input gradient regularization
self.train_one_epoch_with_lambda_regularization(train_loader=train_loader, epoch=epoch)
val_acc = validation_evaluation(model=self.model, validation_loader=validation_loader, device=self.device)
# save the retained defense-enhanced model
assert os.path.exists('../DefenseEnhancedModels/{}'.format(self.defense_name))
defense_enhanced_saver = '../DefenseEnhancedModels/{}/{}_{}_enhanced.pt'.format(self.defense_name, self.Dataset, self.defense_name)
if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4):
if best_val_acc is not None:
os.remove(defense_enhanced_saver)
best_val_acc = val_acc
self.model.save(name=defense_enhanced_saver)
else:
print('Train Epoch{:>3}: validation dataset accuracy of did not improve from {:.4f}\n'.format(epoch, best_val_acc))
```
#### File: Defenses/DefenseMethods/PD.py
```python
import math
import os
import numpy as np
import torch
import torch.nn as nn
from Defenses.DefenseMethods.defenses import Defense
try:
from Defenses.DefenseMethods.External.pixel_cnn_pp.model import PixelCNN
from Defenses.DefenseMethods.External.pixel_cnn_pp.utils import decode, load_part_of_model
except:
print('please git clone the repo [] and train the generative PixelCNN model first')
raise ImportError
rescaling = lambda x: (x - 0.5) * 2
inv_rescaling = lambda x: x * 0.5 + 0.5
res_1_to_255 = lambda x: x * 127.5 + 127.5
res_255_to_1 = lambda x: (x - 127.5) / 127.5
class PixelDefend(Defense):
def __init__(self, model=None, defense_name=None, dataset=None, pixel_cnn_dir=None, device=None):
super(PixelDefend, self).__init__(model=model, defense_name=defense_name)
self.model = model
self.defense_name = defense_name
self.device = device
self.Dataset = dataset.upper()
assert self.Dataset in ['MNIST', 'CIFAR10'], "The data set must be MNIST or CIFAR10"
# load the trained PixelCNN model
# The structure of PixelCNN is fixed as follows in this implementation, the same as https://github.com/SaizhuoWang/pixel-cnn-pp
self.pixel_cnn_model = PixelCNN(nr_resnet=5, nr_filters=160, nr_logistic_mix=10, resnet_nonlinearity='concat_elu',
input_channels=3 if self.Dataset == 'CIFAR10' else 1).to(self.device)
self.pixel_cnn_model = nn.DataParallel(self.pixel_cnn_model)
self.load_pixel_cnn_model(dir=pixel_cnn_dir)
def load_pixel_cnn_model(self, dir=None):
pixel_cnn_model_location = '{}DefenseMethods/External/pixel_cnn_pp/models/{}_pixel_cnn.pth'.format(dir, self.Dataset)
print('\nstarting to load the pixel cnn model from ', pixel_cnn_model_location)
assert os.path.exists(pixel_cnn_model_location), "the pixel cnn model in {} does not exist, please try the model first !".format(
pixel_cnn_model_location)
load_part_of_model(model=self.pixel_cnn_model, path=pixel_cnn_model_location)
def de_noising_samples(self, samples=None, batch_size=20, eps=None):
"""
:param samples:
:param eps:
:return:
"""
# samples.shape = (B, C, W, H)
assert len(samples.shape) == 4 and isinstance(samples, (np.ndarray, np.generic)), \
"input samples should be type of numpy with 4 dimensions"
assert samples.shape[0] == batch_size, 'make sure the batch_size in the first dimension'
channel = samples.shape[1]
assert channel == 1 or channel == 3, "the second dimension should be the channel"
copy_samples = np.copy(samples)
copy_samples = torch.from_numpy(copy_samples).to(self.device).float()
copy_samples = rescaling(copy_samples) # [0, 1] ==> [-1, 1]
assert eps < 1.0 and eps > 0.0
int_epsilon = int(round(eps * 255.0, 0))
width, height = samples.shape[2], samples.shape[3]
for i in range(width):
for j in range(height):
output = self.pixel_cnn_model(copy_samples, sample=True)
out = decode(copy_samples, output, self.Dataset, self.device)
copy_sample_de_norm = res_1_to_255(copy_samples) # [-1, 1] ==> [0, 255]
copy_sample_int = copy_sample_de_norm.clone().int()
lb = torch.clamp(copy_sample_int - int_epsilon, min=0)
ub = torch.clamp(copy_sample_int + int_epsilon, max=255)
template = (torch.range(0, 255, step=1, dtype=torch.int).to(self.device) + torch.zeros_like(copy_sample_int, dtype=torch.int)[
..., None]).to(self.device)
lb = lb[..., None] + torch.zeros_like(template, dtype=torch.int)
ub = ub[..., None] + torch.zeros_like(template, dtype=torch.int)
template = torch.clamp((torch.lt(template, lb) + torch.gt(template, ub)), max=1, min=0).float()
template = template.permute(0, 2, 3, 1, 4)
out = out - template * 1e10 # out.shape = (B, W, H, C, 256)
out = res_255_to_1(torch.argmax(out, dim=4).permute(0, 3, 1, 2).float()) # [0, 255] -> [-1, 1]
# out.shape = (B, C, W, H)
copy_samples[:, :, i, j] = out.data[:, :, i, j]
copy_sample = inv_rescaling(copy_samples)
return copy_sample.data.cpu().numpy()
def de_noising_samples_batch(self, samples=None, batch_size=20, eps=None):
purified_images = []
number_batch = int(math.ceil(len(samples) / batch_size))
for index in range(number_batch):
start = index * batch_size
end = min((index + 1) * batch_size, len(samples))
print('\r===> in batch {:>2}, {:>4} ({:>4} in total) samples are purified ... '.format(index, end - start, end), end=' ')
rtn = self.de_noising_samples(samples=samples[start:end], batch_size=batch_size, eps=eps)
purified_images.extend(rtn)
return np.array(purified_images)
def defense(self):
print('As the defense of PixelDefend does not retrain the model, we do not implement this method')
```
#### File: Defenses/DefenseMethods/RC.py
```python
import numpy as np
import torch
from Defenses.DefenseMethods.defenses import Defense
from RawModels.Utils.TrainTest import validation_evaluation
class RCDefense(Defense):
def __init__(self, model=None, defense_name='RC', dataset=None, device=None, num_points=100):
"""
:param model:
:param defense_name:
:param dataset:
:param device:
:param num_points:
"""
super(RCDefense, self).__init__(model=model, defense_name=defense_name)
self.model = model
self.defense_name = defense_name
self.device = device
self.Dataset = dataset.upper()
assert self.Dataset in ['MNIST', 'CIFAR10'], "The data set must be MNIST or CIFAR10"
# parameters for the region-based classification defense
self.num_points = num_points
def search_best_radius(self, validation_loader=None, radius_min=0.0, radius_max=1.0, radius_step=0.01):
"""
:param validation_loader:
:param radius_min:
:param radius_max:
:param radius_step:
:return:
"""
self.model.eval()
with torch.no_grad():
# compute the original classification accuracy on validation dataset
val_acc = validation_evaluation(model=self.model, validation_loader=validation_loader, device=self.device)
print('<--- original classification accuracy on validation dataset is {:.4f} --->'.format(val_acc))
# learn the radius through a search process
total_step = int((radius_max - radius_min) / radius_step)
for index in range(total_step):
# update the radius
tmp_radius = radius_min + radius_step * (index + 1)
# calculate the accuracy of region-based classification on validation dataset
total = 0.0
correct = 0.0
for images, labels in validation_loader:
rc_preds = self.region_based_classification(samples=images, radius=tmp_radius)
rc_labels = torch.from_numpy(rc_preds)
correct += (rc_labels == labels).sum().item()
total += labels.size(0)
rc_acc = correct / total
print('\tcurrent radius is {:.2f}, validation accuracy is {:.1f}/{:.1f}={:.5f}'.format(tmp_radius, correct, total, rc_acc))
if (val_acc - rc_acc) > 1e-2:
return round(tmp_radius - radius_step, 2)
return radius_max
def region_based_classification_single(self, sample, radius):
"""
:param sample: one sample (1*channel*H*W)
:param radius:
:return:
"""
self.model.eval()
assert sample.shape[0] == 1, "the sample parameter should be one example in numpy format"
copy_sample = np.copy(sample)
with torch.no_grad():
copy_sample = torch.from_numpy(copy_sample).to(self.device)
# prepare the hypercube samples (size=num_points) for the sample (size=1)
hypercube_samples = copy_sample.repeat(self.num_points, 1, 1, 1).to(self.device).float()
random_space = torch.Tensor(*hypercube_samples.size()).to(self.device).float()
random_space.uniform_(-radius, radius)
hypercube_samples = torch.clamp(hypercube_samples + random_space, min=0.0, max=1.0)
# predicting for hypercube samples
hypercube_preds = self.model(hypercube_samples)
hypercube_labels = torch.max(hypercube_preds, dim=1)[1]
# voting for predicted labels
bin_count = torch.bincount(hypercube_labels)
rc_label = torch.max(bin_count, dim=0)[1]
return rc_label.cpu().numpy()
def region_based_classification(self, samples, radius):
"""
:param samples: batch samples (batch_size*channel*H*W)
:param radius:
:return:
"""
self.model.eval()
rc_labels = []
for i in range(samples.shape[0]):
x = samples[i: i + 1]
label = self.region_based_classification_single(sample=x, radius=radius)
rc_labels.append(label)
return np.array(rc_labels)
def defense(self):
print('As the defense of RT does not retrain the model, we do not implement this method')
```
#### File: DEEPSEC/Defenses/TE_Test.py
```python
import argparse
import os
import random
import sys
import numpy as np
import torch
sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
from RawModels.MNISTConv import MNISTConvNet, MNIST_Training_Parameters
from RawModels.ResNet import resnet20_cifar, CIFAR10_Training_Parameters
from RawModels.Utils.dataset import get_mnist_train_validate_loader
from RawModels.Utils.dataset import get_cifar10_train_validate_loader
from Defenses.DefenseMethods.TE import TEDefense
def main(args):
# Device configuration
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Set the random seed manually for reproducibility.
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
# Get training parameters, set up model frameworks and then get the train_loader and test_loader
dataset = args.dataset.upper()
assert dataset == 'MNIST' or dataset == 'CIFAR10'
if dataset == 'MNIST':
training_parameters = MNIST_Training_Parameters
model_framework = MNISTConvNet(thermometer=True, level=args.level).to(device)
batch_size = training_parameters['batch_size']
train_loader, valid_loader = get_mnist_train_validate_loader(dir_name='../RawModels/MNIST/', batch_size=batch_size, valid_size=0.1,
shuffle=True)
else:
training_parameters = CIFAR10_Training_Parameters
model_framework = resnet20_cifar(thermometer=True, level=args.level).to(device)
batch_size = training_parameters['batch_size']
train_loader, valid_loader = get_cifar10_train_validate_loader(dir_name='../RawModels/CIFAR10/', batch_size=batch_size, valid_size=0.1,
shuffle=True)
defense_name = 'TE'
te_params = {
'level': args.level,
'steps': args.steps,
'attack_eps': args.attack_eps,
'attack_step_size': args.attack_step_size
}
te = TEDefense(model=model_framework, defense_name=defense_name, dataset=dataset, training_parameters=training_parameters, device=device,
**te_params)
te.defense(train_loader=train_loader, validation_loader=valid_loader)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='The TE Defenses')
parser.add_argument('--dataset', type=str, default='MNIST', help='the dataset (MNIST or CIFAR10)')
parser.add_argument('--seed', type=int, default=100, help='the default random seed for numpy and torch')
parser.add_argument('--gpu_index', help="gpu index to use", default='0', type=str)
# parameters for the TE Defense
parser.add_argument('--level', type=int, default=16, help='the discretization level of pixel value')
parser.add_argument('--steps', type=int, default=40, help='the total attack steps to perform')
parser.add_argument('--attack_eps', type=float, default=0.3, help='the amplitude of perturbation')
parser.add_argument('--attack_step_size', type=float, default=0.01, help='the step size of each attack iteration')
arguments = parser.parse_args()
main(arguments)
```
#### File: DEEPSEC/RawModels/ResNet.py
```python
import math
import os
import sys
import torch
import torch.nn as nn
sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
from RawModels.basic_module import BasicModule
# Training parameters for CIFAR10
# global CIFAR10_Training_Parameters
CIFAR10_Training_Parameters = {
'num_epochs': 200,
'batch_size': 32,
'lr': 1e-3
}
# adjust the learning rate for CIFAR10 training according to the number of epoch
def adjust_learning_rate(epoch, optimizer):
minimum_learning_rate = 0.5e-6
for param_group in optimizer.param_groups:
lr_temp = param_group["lr"]
if epoch == 80 or epoch == 120 or epoch == 160:
lr_temp = lr_temp * 1e-1
elif epoch == 180:
lr_temp = lr_temp * 5e-1
param_group["lr"] = max(lr_temp, minimum_learning_rate)
print('The **learning rate** of the {} epoch is {}'.format(epoch, param_group["lr"]))
def conv3x3(in_planes, out_planes, stride=1):
# 3x3 convolution with padding
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
class BasicBlock(BasicModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet_Cifar(BasicModule):
def __init__(self, block, layers, num_classes=10, thermometer=False, level=1):
super(ResNet_Cifar, self).__init__()
if thermometer is True:
input_channels = 3 * level
else:
input_channels = 3
self.inplanes = 16
self.conv1 = nn.Conv2d(input_channels, 16, kernel_size=3, stride=1, padding=1, bias=True)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, layers[0])
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
# self.avgpool = nn.AvgPool2d(8, stride=1)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=True),
nn.BatchNorm2d(planes * block.expansion))
layers = list([])
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = x - torch.max(x, dim=1, keepdim=True)[0]
return x
def resnet20_cifar(thermometer=False, level=1):
model = ResNet_Cifar(BasicBlock, [3, 3, 3], thermometer=thermometer, level=level)
return model
``` |
{
"source": "Jianningli/autoimplant",
"score": 2
} |
#### File: skull-processing/src/defectinject.py
```python
from glob import glob
import numpy as np
import nrrd
from scipy.ndimage import zoom
import random
import pymrt.geometry
'''
_note:_** the current code provide functionalities to generate cubic defect
`generate_cude(defect_size)` and spherical dfects `generate_sphere(defect_size)`
'''
#**************************Square Hole Generation**************************************
def generate_hole_implants(data,cube_dim):
x_=data.shape[0]
y_=data.shape[1]
z_=data.shape[2]
full_masking=np.ones(shape=(x_,y_,z_))
x=random.randint(int(cube_dim/2),x_-int(cube_dim/2))
y=random.randint(int(cube_dim/2),y_-int(cube_dim/2))
z=int(z_*(3/4))
cube_masking=np.zeros(shape=(cube_dim,cube_dim,z_-z))
print(cube_masking.shape)
full_masking[x-int(cube_dim/2):x+int(cube_dim/2),y-int(cube_dim/2):y+int(cube_dim/2),z:z_]=cube_masking
return full_masking
def generate_cude(size):
for i in range(len(pair_list)):
print('generating data:',pair_list[i])
temp,header=nrrd.read(pair_list[i])
full_masking=generate_hole_implants(temp,size)
c_masking_1=(full_masking==1)
c_masking_1=c_masking_1+1-1
defected_image=c_masking_1*temp
c_masking=(full_masking==0)
c_masking=c_masking+1-1
implants=c_masking*temp
f1=defected_dir+pair_list[i][-10:-5]+'.nrrd'
f2=implant_dir+pair_list[i][-10:-5]+'.nrrd'
nrrd.write(f1,defected_image,header)
nrrd.write(f2,implants,header)
#****************************Sphere Hole Generation********************************
def sphere(shape, radius, position):
semisizes = (radius,) * 3
grid = [slice(-x0, dim - x0) for x0, dim in zip(position, shape)]
position = np.ogrid[grid]
arr = np.zeros(shape, dtype=float)
for x_i, semisize in zip(position, semisizes):
arr += (np.abs(x_i / semisize) ** 2)
return arr <= 1.0
def generate_sphere_hole_implants(data,size):
x_=data.shape[0]
y_=data.shape[1]
z_=data.shape[2]
z=int(z_*(3/4))
x=random.randint(z_+size-z,x_-(z_+size-z))
y=random.randint(z_+size-z,y_-(z_+size-z))
arr = sphere((x_, y_, z_+size),z_+size-z, (x, y, z))
return arr
def generate_sphere(size1):
for i in range(len(pair_list)):
size=size1
print('generating data:',pair_list[i])
temp=nrrd.read(pair_list[i])[0]
print(temp.shape)
temp_=np.zeros(shape=(temp.shape[0],temp.shape[1],temp.shape[2]+size))
temp_[:,:,0:temp.shape[2]]=temp
arr=generate_sphere_hole_implants(temp,size)
arr=(arr==1)
arr=arr+1-1
implants=arr*temp_
arr=(arr==0)
arr=arr+1-1
defected_image=arr*temp_
f1=defected_dir+pair_list[i][-10:-5]+'.nrrd'
f2=implant_dir+pair_list[i][-10:-5]+'.nrrd'
nrrd.write(f1,defected_image[:,:,0:temp.shape[2]].astype('float64'))
nrrd.write(f2,implants[:,:,0:temp.shape[2]].astype('float64'))
print(defected_image[:,:,0:temp.shape[2]].shape)
if __name__ == "__main__":
# Directory of the healthy skull
pair_list=glob('{}/*.nrrd'.format('C:/Users/Jianning/Desktop'))
defected_dir='C:/Users/Jianning/Desktop/1/'
implant_dir='C:/Users/Jianning/Desktop/2/'
generate_cude(128)
#generate_sphere(20)
```
#### File: autoimplant/src/n2_model.py
```python
from glob import glob
from conv3 import *
import numpy as np
import nrrd
from data_loader import *
#**************************************************************
# the codes are adapted from
# https://link.springer.com/chapter/10.1007/978-3-319-75541-0_23
# the network architecture/data loader/loss function is adapted
#**************************************************************
class auto_encoder(object):
def __init__(self, sess):
self.sess = sess
self.phase = 'train'
self.batch_size = 1
self.inputI_chn = 1
self.output_chn = 2
self.lr = 0.0001
self.beta1 = 0.3
self.epoch = 10000
self.model_name = 'n2.model'
self.save_intval = 100
self.build_model()
# directory where the checkpoint can be saved/loaded
self.chkpoint_dir = "../ckpt"
# directory containing the 100 training defective skulls
self.train_data_dir = "../training_defective_skull"
# ground truth (implants) for the training data
self.train_label_dir = "/implants"
# test data directory
self.test_data_dir = "../testing_defective_skulls"
# directory where the predicted implants from model n1 is stored
self.bbox_dir = "../predictions_n1"
# where to save the predicted implants
self.save_dir = "../predictions_n2/"
# 3D dice loss function
# credits to (https://link.springer.com/chapter/10.1007/978-3-319-75541-0_23)
def dice_loss_fun(self, pred, input_gt):
input_gt = tf.one_hot(input_gt, 2)
dice = 0
for i in range(2):
inse = tf.reduce_mean(pred[:, :, :, :, i]*input_gt[:, :, :, :, i])
l = tf.reduce_sum(pred[:, :, :, :, i]*pred[:, :, :, :, i])
r = tf.reduce_sum(input_gt[:, :, :, :, i] * input_gt[:, :, :, :, i])
dice = dice + 2*inse/(l+r)
return -dice
def build_model(self):
print('building patch based model...')
self.input_I = tf.placeholder(dtype=tf.float32, shape=[self.batch_size,256,256,128, self.inputI_chn], name='inputI')
self.input_gt = tf.placeholder(dtype=tf.int64, shape=[self.batch_size,256,256,128,1], name='target')
self.soft_prob , self.task0_label = self.encoder_decoder(self.input_I)
#3D voxel-wise dice loss
self.main_dice_loss = self.dice_loss_fun(self.soft_prob, self.input_gt[:,:,:,:,0])
#self.main_softmax_loss=self.softmax_crossentropy_loss(self.soft_prob, self.input_gt[:,:,:,:,0])
# final total loss
self.dice_loss=200000000*self.main_dice_loss
self.Loss = self.dice_loss
# create model saver
self.saver = tf.train.Saver()
def encoder_decoder(self, inputI):
phase_flag = (self.phase=='train')
conv1_1 = conv3d(input=inputI, output_chn=16, kernel_size=3, stride=2, use_bias=True, name='conv1')
conv1_bn = tf.contrib.layers.batch_norm(conv1_1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv1_batch_norm")
conv1_relu = tf.nn.relu(conv1_bn, name='conv1_relu')
conv2_1 = conv3d(input=conv1_relu, output_chn=16, kernel_size=3, stride=2, use_bias=True, name='conv2')
conv2_bn = tf.contrib.layers.batch_norm(conv2_1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv2_batch_norm")
conv2_relu = tf.nn.relu(conv2_bn, name='conv2_relu')
conv3_1 = conv3d(input=conv2_relu, output_chn=16, kernel_size=3, stride=2, use_bias=True, name='conv3a')
conv3_bn = tf.contrib.layers.batch_norm(conv3_1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv3_1_batch_norm")
conv3_relu = tf.nn.relu(conv3_bn, name='conv3_1_relu')
conv4_1 = conv3d(input=conv3_relu, output_chn=16, kernel_size=3, stride=2, use_bias=True, name='conv4a')
conv4_bn = tf.contrib.layers.batch_norm(conv4_1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv4_1_batch_norm")
conv4_relu = tf.nn.relu(conv4_bn, name='conv4_1_relu')
conv5_1 = conv3d(input=conv4_relu, output_chn=16, kernel_size=3, stride=2, use_bias=True, name='conv5a')
conv5_bn = tf.contrib.layers.batch_norm(conv5_1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv5_1_batch_norm")
conv5_relu = tf.nn.relu(conv5_bn, name='conv5_1_relu')
conv6_1 = conv3d(input=conv4_relu, output_chn=16, kernel_size=3, stride=2, use_bias=True, name='conv6a')
conv6_bn = tf.contrib.layers.batch_norm(conv6_1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv6_1_batch_norm")
conv6_relu = tf.nn.relu(conv6_bn, name='conv6_1_relu')
conv5_1 = conv3d(input=conv6_relu, output_chn=64, kernel_size=3, stride=1, use_bias=True, name='conv55a')
conv5_bn = tf.contrib.layers.batch_norm(conv5_1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv55_1_batch_norm")
conv5_relu = tf.nn.relu(conv5_bn, name='conv55_1_relu')
feature= conv_bn_relu(input=conv5_relu, output_chn=64, kernel_size=3, stride=1, use_bias=True, is_training=phase_flag, name='conv6_1')
deconv1_1 = deconv_bn_relu(input=feature, output_chn=32, is_training=phase_flag, name='deconv1_1')
deconv1_2 = conv_bn_relu(input=deconv1_1, output_chn=32, kernel_size=3, stride=1, use_bias=True, is_training=phase_flag, name='deconv1_2')
deconv2_1 = deconv_bn_relu(input=deconv1_2, output_chn=32, is_training=phase_flag, name='deconv2_1')
deconv2_2 = conv_bn_relu(input=deconv2_1, output_chn=32, kernel_size=3,stride=1, use_bias=True, is_training=phase_flag, name='deconv2_2')
deconv3_1 = deconv_bn_relu(input=deconv2_2, output_chn=32, is_training=phase_flag, name='deconv3_1')
deconv3_2 = conv_bn_relu(input=deconv3_1, output_chn=32, kernel_size=3, stride=1, use_bias=True, is_training=phase_flag, name='deconv3_2')
deconv4_1 = deconv_bn_relu(input=deconv3_2, output_chn=32, is_training=phase_flag, name='deconv4_1')
deconv4_2 = conv_bn_relu(input=deconv4_1, output_chn=32, kernel_size=3, stride=1, use_bias=True, is_training=phase_flag, name='deconv4_2')
deconv5_1 = deconv_bn_relu(input=deconv4_2, output_chn=16, is_training=phase_flag, name='deconv5_1')
deconv5_2 = conv_bn_relu(input=deconv5_1, output_chn=16, kernel_size=3, stride=1, use_bias=True, is_training=phase_flag, name='deconv5_2')
pred_prob1 = conv_bn_relu(input=deconv5_2, output_chn=self.output_chn, kernel_size=3, stride=1, use_bias=True, is_training=phase_flag, name='pred_prob1')
pred_prob = conv3d(input=pred_prob1, output_chn=self.output_chn, kernel_size=3, stride=1, use_bias=True, name='pred_prob')
pred_prob2 = conv3d(input=pred_prob, output_chn=self.output_chn, kernel_size=3, stride=1, use_bias=True, name='pred_prob2')
pred_prob3 = conv3d(input=pred_prob2, output_chn=self.output_chn, kernel_size=3, stride=1, use_bias=True, name='pred_prob3')
soft_prob=tf.nn.softmax(pred_prob3,name='task_0')
task0_label=tf.argmax(soft_prob,axis=4,name='argmax0')
return soft_prob,task0_label
def train(self):
print('training the n2 model')
u_optimizer = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1).minimize(self.Loss)
init_op = tf.global_variables_initializer()
loss_summary_0 =tf.summary.scalar('dice loss',self.Loss)
self.sess.run(init_op)
self.log_writer = tf.summary.FileWriter("./logs", self.sess.graph)
counter=1
data_list =glob('{}/*.nrrd'.format(self.train_data_dir))
label_list=glob('{}/*.nrrd'.format(self.train_label_dir))
bbox_list=glob('{}/*.nrrd'.format(self.bbox_dir))
i=0
for epoch in np.arange(self.epoch):
i=i+1
print('creating batches for training epoch :',i)
batch_img1, batch_label1,hd,hl= load_bbox_pair(bbox_list,data_list,label_list)
print('epoch:',i )
_, cur_train_loss = self.sess.run([u_optimizer, self.Loss], feed_dict={self.input_I: batch_img1, self.input_gt: batch_label1})
train_output0 = self.sess.run(self.task0_label, feed_dict={self.input_I: batch_img1})
print('sum for current training whole: %.8f, pred whole: %.8f'%(np.sum(batch_label1),np.sum(train_output0)))
summary_0=self.sess.run(loss_summary_0,feed_dict={self.input_I: batch_img1,self.input_gt: batch_label1})
self.log_writer.add_summary(summary_0, counter)
print('current training loss:',cur_train_loss)
counter+=1
if np.mod(counter, self.save_intval) == 0:
self.save_chkpoint(self.chkpoint_dir, self.model_name, counter)
def test(self):
print('testing patch based model...')
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
if self.load_chkpoint(self.chkpoint_dir):
print(" *****Successfully load the checkpoint**********")
else:
print("*******Fail to load the checkpoint***************")
data_list =glob('{}/*.nrrd'.format(self.test_data_dir))
bbox_list=glob('{}/*.nrrd'.format(self.bbox_dir))
k=1
for i in range(len(data_list)):
print('generating result for test sample',k)
test_input,header=load_bbox_pair_test(bbox_list,data_list,i)
test_output = self.sess.run(self.task0_label, feed_dict={self.input_I: test_input})
#implants_post_processed=post_processing(test_output[0,:,:,:])
#filename=self.save_dir+"implants%d.nrrd"%i
filename=self.save_dir+bbox_list[i][-15:-5]+'.nrrd'
nrrd.write(filename,test_output[0,:,:,:].astype('float32'),header)
k+=1
def save_chkpoint(self, checkpoint_dir, model_name, step):
model_dir = "%s" % ('n2_ckpt')
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step=step)
def load_chkpoint(self, checkpoint_dir):
print(" [*] Reading checkpoint...")
model_dir = "%s" % ('n2_ckpt')
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
return True
else:
return False
```
#### File: autoimplant/src/pred_2_org.py
```python
import numpy as np
import nrrd
from glob import glob
import scipy
import scipy.ndimage
import random
from scipy.ndimage import zoom
def resizingbbox(data,z_dim):
a,b,c=data.shape
resized_data = zoom(data,(512/a,512/b,z_dim/c),order=2, mode='constant')
return resized_data
def bbox_cal(data,dim):
a=resizingbbox(data,dim)
a=np.round(a)
x0=np.sum(a,axis=2)
xx=np.sum(x0,axis=1)
yy=np.sum(x0,axis=0)
resx = next(x for x, val in enumerate(list(xx))
if val > 0)
resxx = next(x for x, val in enumerate(list(xx)[::-1])
if val > 0)
resy = next(x for x, val in enumerate(list(yy))
if val > 0)
resyy = next(x for x, val in enumerate(list(yy)[::-1])
if val > 0)
z0=np.sum(a,axis=1)
zz=np.sum(z0,axis=0)
resz = next(x for x, val in enumerate(list(zz))
if val > 0)
reszz = next(x for x, val in enumerate(list(zz)[::-1])
if val > 0)
return resx,resxx,resy,resyy,resz,reszz
bbox_list=glob('{}/*.nrrd'.format('../predictions_n1'))
pred_list=glob('{}/*.nrrd'.format('../predictions_n2'))
original_list=glob('{}/*.nrrd'.format('../testing_defective_skulls'))
save_dir='../final_implants/'
for i in range(len(pred_list)):
data,hd=nrrd.read(original_list[i])
print('original data',original_list[i])
bbox,hb=nrrd.read(bbox_list[i])
print('bbox',bbox_list[i])
pred,hd=nrrd.read(pred_list[i])
print('initial pred',pred_list[i])
resx,resxx,resy,resyy,resz,reszz=bbox_cal(bbox,data.shape[2])
x_len=512+40-(resxx+resx)
y_len=512+40-(resyy+resy)
xl=int(x_len/2)
xl_r=x_len-xl
yl=int(y_len/2)
y1_r=y_len-yl
boundingboximp=pred[128-xl:128+xl_r,128-yl:128+y1_r,]
orig=np.zeros(shape=(512,512,data.shape[2]))
margin=20
orig[resx-margin:512-resxx+margin,resy-margin:512-resyy+margin,data.shape[2]-128:data.shape[2]]=boundingboximp
outfile=save_dir+bbox_list[i][-16:-5]+'.nrrd'
nrrd.write(outfile,orig,hd)
```
#### File: autoimplant/src/pre_post_processing.py
```python
import cc3d
import numpy as np
import nrrd
def skull_id(labels_out):
labels_out=labels_out.reshape((1,-1))
labels_out=labels_out[0,:]
label=np.unique(labels_out)
hist, bin_edges=np.histogram(labels_out,bins=label)
hist=np.ndarray.tolist(hist)
hist_=hist
hist_=np.array(hist_)
hist.sort(reverse = True)
#print('hist',hist)
idx=(hist_==hist[1])
idx=idx+1-1
idx_=np.sum(idx*label[0:len(idx)])
print('idx',idx_)
return idx_
def pre_processing(data):
# original data (512,512,z)
labels_out = cc3d.connected_components(data.astype('int32'))
skull_label=skull_id(labels_out)
skull=(labels_out==skull_label)
skull=skull+1-1
return skull
def skull_id1(labels_out):
labels_out=labels_out.reshape((1,-1))
labels_out=labels_out[0,:]
label=np.unique(labels_out)
hist, bin_edges=np.histogram(labels_out,bins=label)
hist=np.ndarray.tolist(hist)
hist_=hist
hist_=np.array(hist_)
hist.sort(reverse = True)
#print('hist',hist)
idx=(hist_==hist[2])
idx=idx+1-1
idx_=np.sum(idx*label[0:len(idx)])
print('idx',idx_)
return idx_
def pre_processing1(data):
# original data (512,512,z)
labels_out = cc3d.connected_components(data.astype('int32'))
skull_label=skull_id1(labels_out)
skull=(labels_out==skull_label)
skull=skull+1-1
return skull
def post_processing(data):
# original data (512,512,z)
# or implants
labels_out = cc3d.connected_components(data.astype('int32'))
skull_label=skull_id(labels_out)
skull=(labels_out==skull_label)
skull=skull+1-1
return skull
``` |
{
"source": "Jianningli/MIA",
"score": 2
} |
#### File: MIA/source/EncoderDecoder_patch.py
```python
from __future__ import print_function, division
import os
import numpy as np
from keras.layers import BatchNormalization, Activation
from keras.layers import Input, Dense, Flatten, Dropout
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling3D, Conv3D, Deconv3D
from keras.models import Sequential, Model
from keras.models import load_model
from keras.optimizers import Adam
from sklearn.metrics import hamming_loss
from utils import mkdirs
from glob import glob
import random
import nrrd
from scipy.ndimage import zoom
IMAGE_DIR = './32_cube/images'
MODEL_DIR = './32_cube/saved_model/AutoEncoder_patch'
''' Codes adapted from https://github.com/Fdevmsy/3D_shape_inpainting.
Credit goes to the original authors
'''
class EncoderDecoder():
def __init__(self):
self.vol_rows = 128
self.vol_cols = 128
self.vol_height = 128
self.mask_height = 128
self.mask_width = 128
self.mask_length = 128
self.channels = 1
self.num_classes = 2
self.vol_shape = (self.vol_rows, self.vol_cols, self.vol_height, self.channels)
self.missing_shape = (self.mask_height, self.mask_width, self.mask_length, self.channels)
self.input_dir = "../defective_skull_train"
self.gt_imp_dir = "../gt_implants_train"
optimizer = Adam(0.0002, 0.5)
try:
#self.discriminator = load_model(os.path.join(MODEL_DIR, 'discriminator.h5'))
self.generator = load_model(os.path.join(MODEL_DIR, 'encoderdecoder_patch.h5'))
print("Loaded checkpoints")
except:
self.generator = self.build_generator()
#self.discriminator = self.build_discriminator()
print("No checkpoints found")
# discriminator
#self.discriminator.compile(loss='binary_crossentropy',
# optimizer=optimizer,
# metrics=['accuracy'])
# generator
# The generator takes noise as input and generates the missing part
masked_vol = Input(shape=self.vol_shape)
gen_missing = self.generator(masked_vol)
# For the combined model we will only train the generator
#self.discriminator.trainable = False
# The discriminator takes generated voxels as input and determines
# if it is generated or if it is a real voxels
#valid = self.discriminator(gen_missing)
# The combined model (stacked generator and discriminator)
# Trains generator to fool discriminator
self.combined = Model(masked_vol, gen_missing)
self.combined.compile(loss='mse',
#loss=['mse', 'binary_crossentropy'],
#loss_weights=[0.9, 0.1],
optimizer=optimizer)
def build_generator(self):
model = Sequential()
# Encoder
model.add(Conv3D(32, kernel_size=5, strides=2, input_shape=self.vol_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv3D(64, kernel_size=5, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv3D(128, kernel_size=5, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv3D(512, kernel_size=1, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.5))
# Decoder
model.add(UpSampling3D())
model.add(Deconv3D(256, kernel_size=5, padding="same"))
model.add(Activation('relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(Deconv3D(128, kernel_size=5, padding="same"))
model.add(Activation('relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling3D())
model.add(Deconv3D(64, kernel_size=5, padding="same"))
model.add(Activation('relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling3D())
model.add(Deconv3D(self.channels, kernel_size=5, padding="same"))
model.add(Activation('tanh'))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling3D())
model.add(Deconv3D(self.channels, kernel_size=5, padding="same"))
model.add(Activation('tanh'))
model.summary()
masked_vol = Input(shape=self.vol_shape)
gen_missing = model(masked_vol)
return Model(masked_vol, gen_missing)
def train(self, epochs, batch_size=16, sample_interval=50):
#X_train = self.generateWall()
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
print('loading data...')
#(shape=(85,16,1,128,128,128,1))
ipt=np.load('ipt_patch.npy')
gt=np.load('gt_imp_patch.npy')
print('loading data complete...')
for epoch in range(epochs):
idx=random.randrange(0,85,1)
#masked_vols, missing_parts, _ = self.mask_randomly(vols)
masked_vols=ipt[idx]
#nrrd.write('masked_vols.nrrd',masked_vols[0,:,:,:,0],h)
missing_parts=gt[idx]
#nrrd.write('missing_parts.nrrd',missing_parts[0,:,:,:,0],h)
#masked_vols: (5, 32, 32, 32, 1)
print('masked_vols:',masked_vols.shape)
#missing_parts: (5, 16, 16, 16, 1)
print('missing_parts:',missing_parts.shape)
for i in range(16):
# Train Generator
g_loss = self.combined.train_on_batch(masked_vols[i], missing_parts[i])
print(g_loss)
print('epochs:',epoch)
# save generated samples
if epoch % sample_interval == 0:
#idx = np.random.randint(0, X_train.shape[0], 2)
#vols = X_train[idx]
#self.sample_images(epoch, vols)
self.save_model()
def make_patch(self,label):
label_list=[]
for x in range(4):
for y in range(4):
temp_label=np.expand_dims(np.expand_dims(label[x*128:(x+1)*128,y*128:(y+1)*128,:],axis=0),axis=4)
label_list.append(temp_label)
return np.array(label_list)
def evaluate(self, testdir,test_results_dir):
print('evaluating the model...')
test_list=glob('{}/*.nrrd'.format(testdir))
for i in range(len(test_list)):
data,h=nrrd.read(test_list[i])
data=data[:,:,data.shape[2]-128:data.shape[2]]
datap=self.make_patch(data)
reconstructed=np.zeros(shape=(512,512,128))
patch_idx=0
for x in range(4):
for y in range(4):
gen_missing = self.generator.predict(datap[patch_idx])
gen_missing=(gen_missing>0.5)
gen_missing=gen_missing+1-1
reconstructed[x*128:(x+1)*128,y*128:(y+1)*128,:]=gen_missing[0,:,:,:,0]
patch_idx=patch_idx+1
filename=test_results_dir+test_list[i][-10:-5]+'.nrrd'
nrrd.write(filename,reconstructed,h)
def save_model(self):
def save(model, model_name):
model_path = os.path.join(MODEL_DIR, "%s.h5" % model_name)
model.save(model_path)
save(self.generator, "encoderdecoder_patch")
#save(self.discriminator, "discriminator")
if __name__ == '__main__':
test_dir="../defective_skull_test"
test_results_dir="../results/"
context_encoder = EncoderDecoder()
context_encoder.train(epochs=3000, batch_size=4, sample_interval=200)
#context_encoder.evaluate(test_dir,test_results_dir)
``` |
{
"source": "jianoaix/ray",
"score": 2
} |
#### File: modules/reporter/reporter_agent.py
```python
import asyncio
import datetime
import json
import logging
import os
import socket
import subprocess
import sys
import traceback
import warnings
import ray
import ray.dashboard.modules.reporter.reporter_consts as reporter_consts
from ray.dashboard import k8s_utils
import ray.dashboard.utils as dashboard_utils
import ray.experimental.internal_kv as internal_kv
import ray._private.services
import ray._private.utils
from ray.core.generated import reporter_pb2
from ray.core.generated import reporter_pb2_grpc
from ray.ray_constants import DEBUG_AUTOSCALING_STATUS
from ray._private.metrics_agent import MetricsAgent, Gauge, Record
from ray.util.debug import log_once
import psutil
logger = logging.getLogger(__name__)
enable_gpu_usage_check = True
# Are we in a K8s pod?
IN_KUBERNETES_POD = "KUBERNETES_SERVICE_HOST" in os.environ
# Flag to enable showing disk usage when running in a K8s pod,
# disk usage defined as the result of running psutil.disk_usage("/")
# in the Ray container.
ENABLE_K8S_DISK_USAGE = os.environ.get("RAY_DASHBOARD_ENABLE_K8S_DISK_USAGE") == "1"
try:
import gpustat.core as gpustat
except (ModuleNotFoundError, ImportError):
gpustat = None
if log_once("gpustat_import_warning"):
warnings.warn(
"`gpustat` package is not installed. GPU monitoring is "
"not available. To have full functionality of the "
"dashboard please install `pip install ray["
"default]`.)"
)
def recursive_asdict(o):
if isinstance(o, tuple) and hasattr(o, "_asdict"):
return recursive_asdict(o._asdict())
if isinstance(o, (tuple, list)):
L = []
for k in o:
L.append(recursive_asdict(k))
return L
if isinstance(o, dict):
D = {k: recursive_asdict(v) for k, v in o.items()}
return D
return o
def jsonify_asdict(o) -> str:
return json.dumps(dashboard_utils.to_google_style(recursive_asdict(o)))
# A list of gauges to record and export metrics.
METRICS_GAUGES = {
"node_cpu_utilization": Gauge(
"node_cpu_utilization", "Total CPU usage on a ray node", "percentage", ["ip"]
),
"node_cpu_count": Gauge(
"node_cpu_count", "Total CPUs available on a ray node", "cores", ["ip"]
),
"node_mem_used": Gauge(
"node_mem_used", "Memory usage on a ray node", "bytes", ["ip"]
),
"node_mem_available": Gauge(
"node_mem_available", "Memory available on a ray node", "bytes", ["ip"]
),
"node_mem_total": Gauge(
"node_mem_total", "Total memory on a ray node", "bytes", ["ip"]
),
"node_gpus_available": Gauge(
"node_gpus_available",
"Total GPUs available on a ray node",
"percentage",
["ip"],
),
"node_gpus_utilization": Gauge(
"node_gpus_utilization", "Total GPUs usage on a ray node", "percentage", ["ip"]
),
"node_gram_used": Gauge(
"node_gram_used", "Total GPU RAM usage on a ray node", "bytes", ["ip"]
),
"node_gram_available": Gauge(
"node_gram_available", "Total GPU RAM available on a ray node", "bytes", ["ip"]
),
"node_disk_io_read": Gauge(
"node_disk_io_read", "Total read from disk", "bytes", ["ip"]
),
"node_disk_io_write": Gauge(
"node_disk_io_write", "Total written to disk", "bytes", ["ip"]
),
"node_disk_io_read_count": Gauge(
"node_disk_io_read_count", "Total read ops from disk", "io", ["ip"]
),
"node_disk_io_write_count": Gauge(
"node_disk_io_write_count", "Total write ops to disk", "io", ["ip"]
),
"node_disk_io_read_speed": Gauge(
"node_disk_io_read_speed", "Disk read speed", "bytes/sec", ["ip"]
),
"node_disk_io_write_speed": Gauge(
"node_disk_io_write_speed", "Disk write speed", "bytes/sec", ["ip"]
),
"node_disk_read_iops": Gauge(
"node_disk_read_iops", "Disk read iops", "iops", ["ip"]
),
"node_disk_write_iops": Gauge(
"node_disk_write_iops", "Disk write iops", "iops", ["ip"]
),
"node_disk_usage": Gauge(
"node_disk_usage", "Total disk usage (bytes) on a ray node", "bytes", ["ip"]
),
"node_disk_free": Gauge(
"node_disk_free", "Total disk free (bytes) on a ray node", "bytes", ["ip"]
),
"node_disk_utilization_percentage": Gauge(
"node_disk_utilization_percentage",
"Total disk utilization (percentage) on a ray node",
"percentage",
["ip"],
),
"node_network_sent": Gauge(
"node_network_sent", "Total network sent", "bytes", ["ip"]
),
"node_network_received": Gauge(
"node_network_received", "Total network received", "bytes", ["ip"]
),
"node_network_send_speed": Gauge(
"node_network_send_speed", "Network send speed", "bytes/sec", ["ip"]
),
"node_network_receive_speed": Gauge(
"node_network_receive_speed", "Network receive speed", "bytes/sec", ["ip"]
),
"raylet_cpu": Gauge(
"raylet_cpu", "CPU usage of the raylet on a node.", "percentage", ["ip", "pid"]
),
"raylet_mem": Gauge(
"raylet_mem",
"RSS usage of the Raylet on the node.",
"MB",
["ip", "pid"],
),
"raylet_mem_uss": Gauge(
"raylet_mem_uss",
"USS usage of the Raylet on the node. Only available on Linux",
"MB",
["ip", "pid"],
),
"workers_cpu": Gauge(
"workers_cpu",
"Total CPU usage of all workers on a node.",
"percentage",
["ip"],
),
"workers_mem": Gauge(
"workers_mem",
"RSS usage of all workers on the node.",
"MB",
["ip"],
),
"workers_mem_uss": Gauge(
"workers_mem_uss",
"USS usage of all workers on the node. Only available on Linux",
"MB",
["ip"],
),
"cluster_active_nodes": Gauge(
"cluster_active_nodes", "Active nodes on the cluster", "count", ["node_type"]
),
"cluster_failed_nodes": Gauge(
"cluster_failed_nodes", "Failed nodes on the cluster", "count", ["node_type"]
),
"cluster_pending_nodes": Gauge(
"cluster_pending_nodes", "Pending nodes on the cluster", "count", ["node_type"]
),
}
class ReporterAgent(
dashboard_utils.DashboardAgentModule, reporter_pb2_grpc.ReporterServiceServicer
):
"""A monitor process for monitoring Ray nodes.
Attributes:
dashboard_agent: The DashboardAgent object contains global config
"""
def __init__(self, dashboard_agent):
"""Initialize the reporter object."""
super().__init__(dashboard_agent)
if IN_KUBERNETES_POD:
# psutil does not compute this correctly when in a K8s pod.
# Use ray._private.utils instead.
cpu_count = ray._private.utils.get_num_cpus()
self._cpu_counts = (cpu_count, cpu_count)
else:
self._cpu_counts = (psutil.cpu_count(), psutil.cpu_count(logical=False))
self._ip = dashboard_agent.ip
self._is_head_node = self._ip == dashboard_agent.gcs_address.split(":")[0]
self._hostname = socket.gethostname()
self._workers = set()
self._network_stats_hist = [(0, (0.0, 0.0))] # time, (sent, recv)
self._disk_io_stats_hist = [
(0, (0.0, 0.0, 0, 0))
] # time, (bytes read, bytes written, read ops, write ops)
self._metrics_collection_disabled = dashboard_agent.metrics_collection_disabled
self._metrics_agent = None
if not self._metrics_collection_disabled:
self._metrics_agent = MetricsAgent(
"127.0.0.1" if self._ip == "127.0.0.1" else "",
dashboard_agent.metrics_export_port,
)
self._key = (
f"{reporter_consts.REPORTER_PREFIX}" f"{self._dashboard_agent.node_id}"
)
async def GetProfilingStats(self, request, context):
pid = request.pid
duration = request.duration
profiling_file_path = os.path.join(
ray._private.utils.get_ray_temp_dir(), f"{pid}_profiling.txt"
)
sudo = "sudo" if ray._private.utils.get_user() != "root" else ""
process = await asyncio.create_subprocess_shell(
f"{sudo} $(which py-spy) record "
f"-o {profiling_file_path} -p {pid} -d {duration} -f speedscope",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
stdout, stderr = await process.communicate()
if process.returncode != 0:
profiling_stats = ""
else:
with open(profiling_file_path, "r") as f:
profiling_stats = f.read()
return reporter_pb2.GetProfilingStatsReply(
profiling_stats=profiling_stats, std_out=stdout, std_err=stderr
)
async def ReportOCMetrics(self, request, context):
# Do nothing if metrics collection is disabled.
if self._metrics_collection_disabled:
return reporter_pb2.ReportOCMetricsReply()
# This function receives a GRPC containing OpenCensus (OC) metrics
# from a Ray process, then exposes those metrics to Prometheus.
try:
self._metrics_agent.record_metric_points_from_protobuf(request.metrics)
except Exception:
logger.error(traceback.format_exc())
return reporter_pb2.ReportOCMetricsReply()
@staticmethod
def _get_cpu_percent():
if IN_KUBERNETES_POD:
return k8s_utils.cpu_percent()
else:
return psutil.cpu_percent()
@staticmethod
def _get_gpu_usage():
global enable_gpu_usage_check
if gpustat is None or not enable_gpu_usage_check:
return []
gpu_utilizations = []
gpus = []
try:
gpus = gpustat.new_query().gpus
except Exception as e:
logger.debug(f"gpustat failed to retrieve GPU information: {e}")
# gpustat calls pynvml.nvmlInit()
# On machines without GPUs, this can run subprocesses that spew to
# stderr. Then with log_to_driver=True, we get log spew from every
# single raylet. To avoid this, disable the GPU usage check on
# certain errors.
# https://github.com/ray-project/ray/issues/14305
# https://github.com/ray-project/ray/pull/21686
if type(e).__name__ == "NVMLError_DriverNotLoaded":
enable_gpu_usage_check = False
for gpu in gpus:
# Note the keys in this dict have periods which throws
# off javascript so we change .s to _s
gpu_data = {"_".join(key.split(".")): val for key, val in gpu.entry.items()}
gpu_utilizations.append(gpu_data)
return gpu_utilizations
@staticmethod
def _get_boot_time():
if IN_KUBERNETES_POD:
# Return start time of container entrypoint
return psutil.Process(pid=1).create_time()
else:
return psutil.boot_time()
@staticmethod
def _get_network_stats():
ifaces = [
v for k, v in psutil.net_io_counters(pernic=True).items() if k[0] == "e"
]
sent = sum((iface.bytes_sent for iface in ifaces))
recv = sum((iface.bytes_recv for iface in ifaces))
return sent, recv
@staticmethod
def _get_mem_usage():
total = ray._private.utils.get_system_memory()
used = ray._private.utils.get_used_memory()
available = total - used
percent = round(used / total, 3) * 100
return total, available, percent, used
@staticmethod
def _get_disk_usage():
if IN_KUBERNETES_POD and not ENABLE_K8S_DISK_USAGE:
# If in a K8s pod, disable disk display by passing in dummy values.
return {
"/": psutil._common.sdiskusage(total=1, used=0, free=1, percent=0.0)
}
if sys.platform == "win32":
root = psutil.disk_partitions()[0].mountpoint
else:
root = os.sep
tmp = ray._private.utils.get_user_temp_dir()
return {
"/": psutil.disk_usage(root),
tmp: psutil.disk_usage(tmp),
}
@staticmethod
def _get_disk_io_stats():
stats = psutil.disk_io_counters()
return (
stats.read_bytes,
stats.write_bytes,
stats.read_count,
stats.write_count,
)
def _get_workers(self):
raylet_proc = self._get_raylet_proc()
if raylet_proc is None:
return []
else:
workers = set(raylet_proc.children())
# Remove the current process (reporter agent), which is also a child of
# the Raylet.
workers.discard(psutil.Process())
self._workers = workers
return [
w.as_dict(
attrs=[
"pid",
"create_time",
"cpu_percent",
"cpu_times",
"cmdline",
"memory_info",
"memory_full_info",
]
)
for w in self._workers
if w.status() != psutil.STATUS_ZOMBIE
]
@staticmethod
def _get_raylet_proc():
try:
curr_proc = psutil.Process()
# Here, parent is always raylet because the
# dashboard agent is a child of the raylet process.
parent = curr_proc.parent()
if parent is not None:
if parent.pid == 1:
return None
if parent.status() == psutil.STATUS_ZOMBIE:
return None
return parent
except (psutil.AccessDenied, ProcessLookupError):
pass
return None
def _get_raylet(self):
raylet_proc = self._get_raylet_proc()
if raylet_proc is None:
return {}
else:
return raylet_proc.as_dict(
attrs=[
"pid",
"create_time",
"cpu_percent",
"cpu_times",
"cmdline",
"memory_info",
"memory_full_info",
]
)
def _get_load_avg(self):
if sys.platform == "win32":
cpu_percent = psutil.cpu_percent()
load = (cpu_percent, cpu_percent, cpu_percent)
else:
load = os.getloadavg()
per_cpu_load = tuple((round(x / self._cpu_counts[0], 2) for x in load))
return load, per_cpu_load
@staticmethod
def _compute_speed_from_hist(hist):
while len(hist) > 7:
hist.pop(0)
then, prev_stats = hist[0]
now, now_stats = hist[-1]
time_delta = now - then
return tuple((y - x) / time_delta for x, y in zip(prev_stats, now_stats))
def _get_all_stats(self):
now = dashboard_utils.to_posix_time(datetime.datetime.utcnow())
network_stats = self._get_network_stats()
self._network_stats_hist.append((now, network_stats))
network_speed_stats = self._compute_speed_from_hist(self._network_stats_hist)
disk_stats = self._get_disk_io_stats()
self._disk_io_stats_hist.append((now, disk_stats))
disk_speed_stats = self._compute_speed_from_hist(self._disk_io_stats_hist)
return {
"now": now,
"hostname": self._hostname,
"ip": self._ip,
"cpu": self._get_cpu_percent(),
"cpus": self._cpu_counts,
"mem": self._get_mem_usage(),
"workers": self._get_workers(),
"raylet": self._get_raylet(),
"bootTime": self._get_boot_time(),
"loadAvg": self._get_load_avg(),
"disk": self._get_disk_usage(),
"disk_io": disk_stats,
"disk_io_speed": disk_speed_stats,
"gpus": self._get_gpu_usage(),
"network": network_stats,
"network_speed": network_speed_stats,
# Deprecated field, should be removed with frontend.
"cmdline": self._get_raylet().get("cmdline", []),
}
def _record_stats(self, stats, cluster_stats):
records_reported = []
ip = stats["ip"]
# -- Instance count of cluster --
# Only report cluster stats on head node
if "autoscaler_report" in cluster_stats and self._is_head_node:
active_nodes = cluster_stats["autoscaler_report"]["active_nodes"]
for node_type, active_node_count in active_nodes.items():
records_reported.append(
Record(
gauge=METRICS_GAUGES["cluster_active_nodes"],
value=active_node_count,
tags={"node_type": node_type},
)
)
failed_nodes = cluster_stats["autoscaler_report"]["failed_nodes"]
failed_nodes_dict = {}
for node_ip, node_type in failed_nodes:
if node_type in failed_nodes_dict:
failed_nodes_dict[node_type] += 1
else:
failed_nodes_dict[node_type] = 1
for node_type, failed_node_count in failed_nodes_dict.items():
records_reported.append(
Record(
gauge=METRICS_GAUGES["cluster_failed_nodes"],
value=failed_node_count,
tags={"node_type": node_type},
)
)
pending_nodes = cluster_stats["autoscaler_report"]["pending_nodes"]
pending_nodes_dict = {}
for node_ip, node_type, status_message in pending_nodes:
if node_type in pending_nodes_dict:
pending_nodes_dict[node_type] += 1
else:
pending_nodes_dict[node_type] = 1
for node_type, pending_node_count in pending_nodes_dict.items():
records_reported.append(
Record(
gauge=METRICS_GAUGES["cluster_pending_nodes"],
value=pending_node_count,
tags={"node_type": node_type},
)
)
# -- CPU per node --
cpu_usage = float(stats["cpu"])
cpu_record = Record(
gauge=METRICS_GAUGES["node_cpu_utilization"],
value=cpu_usage,
tags={"ip": ip},
)
cpu_count, _ = stats["cpus"]
cpu_count_record = Record(
gauge=METRICS_GAUGES["node_cpu_count"], value=cpu_count, tags={"ip": ip}
)
# -- Mem per node --
mem_total, mem_available, _, mem_used = stats["mem"]
mem_used_record = Record(
gauge=METRICS_GAUGES["node_mem_used"], value=mem_used, tags={"ip": ip}
)
mem_available_record = Record(
gauge=METRICS_GAUGES["node_mem_available"],
value=mem_available,
tags={"ip": ip},
)
mem_total_record = Record(
gauge=METRICS_GAUGES["node_mem_total"], value=mem_total, tags={"ip": ip}
)
# -- GPU per node --
gpus = stats["gpus"]
gpus_available = len(gpus)
if gpus_available:
gpus_utilization, gram_used, gram_total = 0, 0, 0
for gpu in gpus:
# Consume GPU may not report its utilization.
if gpu["utilization_gpu"] is not None:
gpus_utilization += gpu["utilization_gpu"]
gram_used += gpu["memory_used"]
gram_total += gpu["memory_total"]
gram_available = gram_total - gram_used
gpus_available_record = Record(
gauge=METRICS_GAUGES["node_gpus_available"],
value=gpus_available,
tags={"ip": ip},
)
gpus_utilization_record = Record(
gauge=METRICS_GAUGES["node_gpus_utilization"],
value=gpus_utilization,
tags={"ip": ip},
)
gram_used_record = Record(
gauge=METRICS_GAUGES["node_gram_used"], value=gram_used, tags={"ip": ip}
)
gram_available_record = Record(
gauge=METRICS_GAUGES["node_gram_available"],
value=gram_available,
tags={"ip": ip},
)
records_reported.extend(
[
gpus_available_record,
gpus_utilization_record,
gram_used_record,
gram_available_record,
]
)
# -- Disk per node --
disk_io_stats = stats["disk_io"]
disk_read_record = Record(
gauge=METRICS_GAUGES["node_disk_io_read"],
value=disk_io_stats[0],
tags={"ip": ip},
)
disk_write_record = Record(
gauge=METRICS_GAUGES["node_disk_io_write"],
value=disk_io_stats[1],
tags={"ip": ip},
)
disk_read_count_record = Record(
gauge=METRICS_GAUGES["node_disk_io_read_count"],
value=disk_io_stats[2],
tags={"ip": ip},
)
disk_write_count_record = Record(
gauge=METRICS_GAUGES["node_disk_io_write_count"],
value=disk_io_stats[3],
tags={"ip": ip},
)
disk_io_speed_stats = stats["disk_io_speed"]
disk_read_speed_record = Record(
gauge=METRICS_GAUGES["node_disk_io_read_speed"],
value=disk_io_speed_stats[0],
tags={"ip": ip},
)
disk_write_speed_record = Record(
gauge=METRICS_GAUGES["node_disk_io_write_speed"],
value=disk_io_speed_stats[1],
tags={"ip": ip},
)
disk_read_iops_record = Record(
gauge=METRICS_GAUGES["node_disk_read_iops"],
value=disk_io_speed_stats[2],
tags={"ip": ip},
)
disk_write_iops_record = Record(
gauge=METRICS_GAUGES["node_disk_write_iops"],
value=disk_io_speed_stats[3],
tags={"ip": ip},
)
used, free = 0, 0
for entry in stats["disk"].values():
used += entry.used
free += entry.free
disk_utilization = float(used / (used + free)) * 100
disk_usage_record = Record(
gauge=METRICS_GAUGES["node_disk_usage"], value=used, tags={"ip": ip}
)
disk_free_record = Record(
gauge=METRICS_GAUGES["node_disk_free"], value=free, tags={"ip": ip}
)
disk_utilization_percentage_record = Record(
gauge=METRICS_GAUGES["node_disk_utilization_percentage"],
value=disk_utilization,
tags={"ip": ip},
)
# -- Network speed (send/receive) stats per node --
network_stats = stats["network"]
network_sent_record = Record(
gauge=METRICS_GAUGES["node_network_sent"],
value=network_stats[0],
tags={"ip": ip},
)
network_received_record = Record(
gauge=METRICS_GAUGES["node_network_received"],
value=network_stats[1],
tags={"ip": ip},
)
# -- Network speed (send/receive) per node --
network_speed_stats = stats["network_speed"]
network_send_speed_record = Record(
gauge=METRICS_GAUGES["node_network_send_speed"],
value=network_speed_stats[0],
tags={"ip": ip},
)
network_receive_speed_record = Record(
gauge=METRICS_GAUGES["node_network_receive_speed"],
value=network_speed_stats[1],
tags={"ip": ip},
)
raylet_stats = stats["raylet"]
if raylet_stats:
raylet_pid = str(raylet_stats["pid"])
# -- raylet CPU --
raylet_cpu_usage = float(raylet_stats["cpu_percent"]) * 100
records_reported.append(
Record(
gauge=METRICS_GAUGES["raylet_cpu"],
value=raylet_cpu_usage,
tags={"ip": ip, "pid": raylet_pid},
)
)
# -- raylet mem --
raylet_rss = float(raylet_stats["memory_info"].rss) / 1.0e6
records_reported.append(
Record(
gauge=METRICS_GAUGES["raylet_mem"],
value=raylet_rss,
tags={"ip": ip, "pid": raylet_pid},
)
)
raylet_mem_full_info = raylet_stats.get("memory_full_info")
if raylet_mem_full_info is not None:
raylet_uss = float(raylet_mem_full_info.uss) / 1.0e6
records_reported.append(
Record(
gauge=METRICS_GAUGES["raylet_mem_uss"],
value=raylet_uss,
tags={"ip": ip, "pid": raylet_pid},
)
)
workers_stats = stats["workers"]
if workers_stats:
total_workers_cpu_percentage = 0.0
total_workers_rss = 0.0
total_workers_uss = 0.0
for worker in workers_stats:
total_workers_cpu_percentage += float(worker["cpu_percent"]) * 100.0
total_workers_rss += float(worker["memory_info"].rss) / 1.0e6
worker_mem_full_info = worker.get("memory_full_info")
if worker_mem_full_info is not None:
total_workers_uss += float(worker_mem_full_info.uss) / 1.0e6
records_reported.append(
Record(
gauge=METRICS_GAUGES["workers_cpu"],
value=total_workers_cpu_percentage,
tags={"ip": ip},
)
)
records_reported.append(
Record(
gauge=METRICS_GAUGES["workers_mem"],
value=total_workers_rss,
tags={"ip": ip},
)
)
if total_workers_uss > 0.0:
records_reported.append(
Record(
gauge=METRICS_GAUGES["workers_mem_uss"],
value=total_workers_uss,
tags={"ip": ip},
)
)
records_reported.extend(
[
cpu_record,
cpu_count_record,
mem_used_record,
mem_available_record,
mem_total_record,
disk_read_record,
disk_write_record,
disk_read_count_record,
disk_write_count_record,
disk_read_speed_record,
disk_write_speed_record,
disk_read_iops_record,
disk_write_iops_record,
disk_usage_record,
disk_free_record,
disk_utilization_percentage_record,
network_sent_record,
network_received_record,
network_send_speed_record,
network_receive_speed_record,
]
)
return records_reported
async def _perform_iteration(self, publisher):
"""Get any changes to the log files and push updates to kv."""
while True:
try:
formatted_status_string = internal_kv._internal_kv_get(
DEBUG_AUTOSCALING_STATUS
)
stats = self._get_all_stats()
# Report stats only when metrics collection is enabled.
if not self._metrics_collection_disabled:
cluster_stats = (
json.loads(formatted_status_string.decode())
if formatted_status_string
else {}
)
records_reported = self._record_stats(stats, cluster_stats)
self._metrics_agent.record_reporter_stats(records_reported)
await publisher.publish_resource_usage(self._key, jsonify_asdict(stats))
except Exception:
logger.exception("Error publishing node physical stats.")
await asyncio.sleep(reporter_consts.REPORTER_UPDATE_INTERVAL_MS / 1000)
async def run(self, server):
if server:
reporter_pb2_grpc.add_ReporterServiceServicer_to_server(self, server)
await self._perform_iteration(self._dashboard_agent.publisher)
@staticmethod
def is_minimal_module():
return False
```
#### File: modules/state/state_head.py
```python
import logging
import aiohttp.web
import dataclasses
from typing import Callable
from ray.dashboard.datacenter import DataSource
from ray.dashboard.utils import Change
import ray.dashboard.utils as dashboard_utils
import ray.dashboard.optional_utils as dashboard_optional_utils
from ray.dashboard.optional_utils import rest_response
from ray.dashboard.modules.log.log_manager import (
LogsManager,
)
from ray.dashboard.state_aggregator import StateAPIManager
from ray.experimental.state.common import (
ListApiOptions,
GetLogOptions,
DEFAULT_RPC_TIMEOUT,
DEFAULT_LIMIT,
)
from ray.experimental.state.exception import DataSourceUnavailable
from ray.experimental.state.state_manager import StateDataSourceClient
logger = logging.getLogger(__name__)
routes = dashboard_optional_utils.ClassMethodRouteTable
class StateHead(dashboard_utils.DashboardHeadModule):
"""Module to obtain state information from the Ray cluster.
It is responsible for state observability APIs such as
ray.list_actors(), ray.get_actor(), ray.summary_actors().
"""
def __init__(self, dashboard_head):
super().__init__(dashboard_head)
self._state_api_data_source_client = None
self._state_api = None
self._log_api = None
DataSource.nodes.signal.append(self._update_raylet_stubs)
DataSource.agents.signal.append(self._update_agent_stubs)
def _options_from_req(self, req) -> ListApiOptions:
"""Obtain `ListApiOptions` from the aiohttp request."""
limit = int(req.query.get("limit"))
timeout = int(req.query.get("timeout"))
filter_keys = req.query.getall("filter_keys", [])
filter_values = req.query.getall("filter_values", [])
assert len(filter_keys) == len(filter_values)
filters = []
for key, val in zip(filter_keys, filter_values):
filters.append((key, val))
return ListApiOptions(limit=limit, timeout=timeout, filters=filters)
def _reply(self, success: bool, error_message: str, result: dict, **kwargs):
"""Reply to the client."""
return rest_response(
success=success,
message=error_message,
result=result,
convert_google_style=False,
**kwargs,
)
async def _update_raylet_stubs(self, change: Change):
"""Callback that's called when a new raylet is added to Datasource.
Datasource is a api-server-specific module that's updated whenever
api server adds/removes a new node.
Args:
change: The change object. Whenever a new node is added
or removed, this callback is invoked.
When new node is added: information is in `change.new`.
When a node is removed: information is in `change.old`.
When a node id is overwritten by a new node with the same node id:
`change.old` contains the old node info, and
`change.new` contains the new node info.
"""
if change.old:
# When a node is deleted from the DataSource or it is overwritten.
node_id, node_info = change.old
self._state_api_data_source_client.unregister_raylet_client(node_id)
if change.new:
# When a new node information is written to DataSource.
node_id, node_info = change.new
self._state_api_data_source_client.register_raylet_client(
node_id,
node_info["nodeManagerAddress"],
int(node_info["nodeManagerPort"]),
)
async def _update_agent_stubs(self, change: Change):
"""Callback that's called when a new agent is added to Datasource."""
if change.old:
node_id, _ = change.old
self._state_api_data_source_client.unregister_agent_client(node_id)
if change.new:
# When a new node information is written to DataSource.
node_id, ports = change.new
ip = DataSource.node_id_to_ip[node_id]
self._state_api_data_source_client.register_agent_client(
node_id,
ip,
int(ports[1]),
)
async def _handle_list_api(
self, list_api_fn: Callable[[ListApiOptions], dict], req: aiohttp.web.Request
):
try:
result = await list_api_fn(option=self._options_from_req(req))
return self._reply(
success=True,
error_message="",
result=result.result,
partial_failure_warning=result.partial_failure_warning,
)
except DataSourceUnavailable as e:
return self._reply(success=False, error_message=str(e), result=None)
@routes.get("/api/v0/actors")
async def list_actors(self, req: aiohttp.web.Request) -> aiohttp.web.Response:
return await self._handle_list_api(self._state_api.list_actors, req)
@routes.get("/api/v0/jobs")
async def list_jobs(self, req: aiohttp.web.Request) -> aiohttp.web.Response:
try:
result = self._state_api.list_jobs(option=self._options_from_req(req))
return self._reply(
success=True,
error_message="",
result={
job_id: dataclasses.asdict(job_info)
for job_id, job_info in result.result.items()
},
partial_failure_warning=result.partial_failure_warning,
)
except DataSourceUnavailable as e:
return self._reply(success=False, error_message=str(e), result=None)
@routes.get("/api/v0/nodes")
async def list_nodes(self, req: aiohttp.web.Request) -> aiohttp.web.Response:
return await self._handle_list_api(self._state_api.list_nodes, req)
@routes.get("/api/v0/placement_groups")
async def list_placement_groups(
self, req: aiohttp.web.Request
) -> aiohttp.web.Response:
return await self._handle_list_api(self._state_api.list_placement_groups, req)
@routes.get("/api/v0/workers")
async def list_workers(self, req: aiohttp.web.Request) -> aiohttp.web.Response:
return await self._handle_list_api(self._state_api.list_workers, req)
@routes.get("/api/v0/tasks")
async def list_tasks(self, req: aiohttp.web.Request) -> aiohttp.web.Response:
return await self._handle_list_api(self._state_api.list_tasks, req)
@routes.get("/api/v0/objects")
async def list_objects(self, req: aiohttp.web.Request) -> aiohttp.web.Response:
return await self._handle_list_api(self._state_api.list_objects, req)
@routes.get("/api/v0/runtime_envs")
async def list_runtime_envs(self, req: aiohttp.web.Request) -> aiohttp.web.Response:
return await self._handle_list_api(self._state_api.list_runtime_envs, req)
@routes.get("/api/v0/logs")
async def list_logs(self, req: aiohttp.web.Request) -> aiohttp.web.Response:
"""Return a list of log files on a given node id.
Unlike other list APIs that display all existing resources in the cluster,
this API always require to specify node id and node ip.
"""
glob_filter = req.query.get("glob", "*")
node_id = req.query.get("node_id", None)
node_ip = req.query.get("node_ip", None)
timeout = req.query.get("timeout", DEFAULT_RPC_TIMEOUT)
# TODO(sang): Do input validation from the middleware instead.
if not node_id and not node_ip:
return self._reply(
success=False,
error_message=(
"Both node id and node ip are not provided. "
"Please provide at least one of them."
),
result=None,
)
node_id = node_id or self._log_api.ip_to_node_id(node_ip)
if not node_id:
return self._reply(
success=False,
error_message=(
f"Cannot find matching node_id for a given node ip {node_ip}"
),
result=None,
)
try:
result = await self._log_api.list_logs(
node_id, timeout, glob_filter=glob_filter
)
except DataSourceUnavailable as e:
return self._reply(success=False, error_message=str(e), result=None)
return self._reply(success=True, error_message="", result=result)
@routes.get("/api/v0/logs/{media_type}")
async def get_logs(self, req: aiohttp.web.Request):
# TODO(sang): We need a better error handling for streaming
# when we refactor the server framework.
options = GetLogOptions(
timeout=int(req.query.get("timeout", DEFAULT_RPC_TIMEOUT)),
node_id=req.query.get("node_id", None),
node_ip=req.query.get("node_ip", None),
media_type=req.match_info.get("media_type", "file"),
filename=req.query.get("filename", None),
actor_id=req.query.get("actor_id", None),
task_id=req.query.get("task_id", None),
pid=req.query.get("pid", None),
lines=req.query.get("lines", DEFAULT_LIMIT),
interval=req.query.get("interval", None),
)
response = aiohttp.web.StreamResponse()
response.content_type = "text/plain"
await response.prepare(req)
# NOTE: The first byte indicates the success / failure of individual
# stream. If the first byte is b"1", it means the stream was successful.
# If it is b"0", it means it is failed.
try:
async for logs_in_bytes in self._log_api.stream_logs(options):
logs_to_stream = bytearray(b"1")
logs_to_stream.extend(logs_in_bytes)
await response.write(bytes(logs_to_stream))
await response.write_eof()
return response
except Exception as e:
logger.exception(e)
error_msg = bytearray(b"0")
error_msg.extend(
f"Closing HTTP stream due to internal server error.\n{e}".encode()
)
await response.write(bytes(error_msg))
await response.write_eof()
return response
async def run(self, server):
gcs_channel = self._dashboard_head.aiogrpc_gcs_channel
self._state_api_data_source_client = StateDataSourceClient(gcs_channel)
self._state_api = StateAPIManager(self._state_api_data_source_client)
self._log_api = LogsManager(self._state_api_data_source_client)
@staticmethod
def is_minimal_module():
return False
```
#### File: ray-air/doc_code/pytorch_starter.py
```python
from torchvision import datasets
from torchvision.transforms import ToTensor
# Download training data from open datasets.
training_data = datasets.FashionMNIST(
root="~/data",
train=True,
download=True,
transform=ToTensor(),
)
# Download test data from open datasets.
test_data = datasets.FashionMNIST(
root="~/data",
train=False,
download=True,
transform=ToTensor(),
)
# __air_pytorch_preprocess_end__
# __air_pytorch_train_start__
import torch
from torch import nn
from torch.utils.data import DataLoader
import ray.train as train
from ray.train.torch import TorchTrainer
# Define model
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28 * 28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
nn.ReLU(),
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
def train_epoch(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset) // train.world_size()
model.train()
for batch, (X, y) in enumerate(dataloader):
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def validate_epoch(dataloader, model, loss_fn):
size = len(dataloader.dataset) // train.world_size()
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(
f"Test Error: \n "
f"Accuracy: {(100 * correct):>0.1f}%, "
f"Avg loss: {test_loss:>8f} \n"
)
return test_loss
def train_func(config):
batch_size = config["batch_size"]
lr = config["lr"]
epochs = config["epochs"]
worker_batch_size = batch_size // train.world_size()
# Create data loaders.
train_dataloader = DataLoader(training_data, batch_size=worker_batch_size)
test_dataloader = DataLoader(test_data, batch_size=worker_batch_size)
train_dataloader = train.torch.prepare_data_loader(train_dataloader)
test_dataloader = train.torch.prepare_data_loader(test_dataloader)
# Create model.
model = NeuralNetwork()
model = train.torch.prepare_model(model)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
for _ in range(epochs):
train_epoch(train_dataloader, model, loss_fn, optimizer)
loss = validate_epoch(test_dataloader, model, loss_fn)
train.report(loss=loss)
num_workers = 2
use_gpu = False
trainer = TorchTrainer(
train_loop_per_worker=train_func,
train_loop_config={"lr": 1e-3, "batch_size": 64, "epochs": 4},
scaling_config={"num_workers": num_workers, "use_gpu": use_gpu},
)
result = trainer.fit()
print(f"Last result: {result.metrics}")
# __air_pytorch_train_end__
# # __air_pytorch_batchpred_start__
# import random
# from ray.train.batch_predictor import BatchPredictor
# from ray.train.torch import TorchPredictor
# batch_predictor = BatchPredictor.from_checkpoint(result.checkpoint, TorchPredictor)
# items = [{"x": random.uniform(0, 1) for _ in range(10)}]
# prediction_dataset = ray.data.from_items(items)
# predictions = batch_predictor.predict(prediction_dataset, dtype=torch.float)
# # __air_pytorch_batchpred_end__
```
#### File: serve/doc_code/deployment_graph_same_class_different_args.py
```python
import ray
from ray import serve
from ray.serve.deployment_graph import InputNode
ray.init()
@serve.deployment
class Model:
def __init__(self, weight):
self.weight = weight
def forward(self, input):
return input + self.weight
# 3 nodes chain in a line
num_nodes = 3
nodes = [Model.bind(w) for w in range(num_nodes)]
outputs = [None] * num_nodes
with InputNode() as dag_input:
for i in range(num_nodes):
if i == 0:
# first node
outputs[i] = nodes[i].forward.bind(dag_input)
else:
outputs[i] = nodes[i].forward.bind(outputs[i - 1])
print(ray.get(outputs[-1].execute(0)))
```
#### File: ray/air/checkpoint.py
```python
import contextlib
import io
import logging
import os
import platform
import shutil
import tarfile
import tempfile
import traceback
from pathlib import Path
from typing import Any, Dict, Iterator, Optional, Tuple, Union
import ray
from ray import cloudpickle as pickle
from ray.air._internal.remote_storage import (
download_from_uri,
fs_hint,
is_non_local_path_uri,
upload_to_uri,
)
from ray.util.annotations import DeveloperAPI, PublicAPI
from ray.util.ml_utils.filelock import TempFileLock
_DICT_CHECKPOINT_FILE_NAME = "dict_checkpoint.pkl"
_METADATA_CHECKPOINT_SUFFIX = ".meta.pkl"
_FS_CHECKPOINT_KEY = "fs_checkpoint"
_BYTES_DATA_KEY = "bytes_data"
_CHECKPOINT_DIR_PREFIX = "checkpoint_tmp_"
logger = logging.getLogger(__name__)
@PublicAPI(stability="alpha")
class Checkpoint:
"""Ray ML Checkpoint.
This implementation provides methods to translate between
different checkpoint storage locations: Local storage, external storage
(e.g. cloud storage), and data dict representations.
The constructor is a private API, instead the ``from_`` methods should
be used to create checkpoint objects
(e.g. ``Checkpoint.from_directory()``).
When converting between different checkpoint formats, it is guaranteed
that a full round trip of conversions (e.g. directory --> dict -->
obj ref --> directory) will recover the original checkpoint data.
There are no guarantees made about compatibility of intermediate
representations.
New data can be added to Checkpoint during conversion. Consider the
following conversion: directory --> dict (adding dict["foo"] = "bar")
--> directory --> dict (expect to see dict["foo"] = "bar").
Examples:
Example for an arbitrary data checkpoint:
.. code-block:: python
from ray.air.checkpoint import Checkpoint
# Create checkpoint data dict
checkpoint_data = {"data": 123}
# Create checkpoint object from data
checkpoint = Checkpoint.from_dict(checkpoint_data)
# Save checkpoint to temporary location
path = checkpoint.to_directory()
# This path can then be passed around, e.g. to a different function
# At some other location, recover Checkpoint object from path
checkpoint = Checkpoint.from_directory(path)
# Convert into dictionary again
recovered_data = checkpoint.to_dict()
# It is guaranteed that the original data has been recovered
assert recovered_data == checkpoint_data
Example using MLflow for saving and loading a classifier:
.. code-block:: python
from ray.air.checkpoint import Checkpoint
from sklearn.ensemble import RandomForestClassifier
import mlflow.sklearn
# Create an sklearn classifier
clf = RandomForestClassifier(max_depth=7, random_state=0)
# ... e.g. train model with clf.fit()
# Save model using MLflow
mlflow.sklearn.save_model(clf, "model_directory")
# Create checkpoint object from path
checkpoint = Checkpoint.from_directory("model_directory")
# Convert into dictionary
checkpoint_dict = checkpoint.to_dict()
# This dict can then be passed around, e.g. to a different function
# At some other location, recover checkpoint object from dict
checkpoint = Checkpoint.from_dict(checkpoint_dict)
# Convert into a directory again
checkpoint.to_directory("other_directory")
# We can now use MLflow to re-load the model
clf = mlflow.sklearn.load_model("other_directory")
# It is guaranteed that the original data was recovered
assert isinstance(clf, RandomForestClassifier)
Checkpoints can be pickled and sent to remote processes.
Please note that checkpoints pointing to local directories will be
pickled as data representations, so the full checkpoint data will be
contained in the checkpoint object. If you want to avoid this,
consider passing only the checkpoint directory to the remote task
and re-construct your checkpoint object in that function. Note that
this will only work if the "remote" task is scheduled on the
same node or a node that also has access to the local data path (e.g.
on a shared file system like NFS).
Checkpoints pointing to object store references will keep the
object reference in tact - this means that these checkpoints cannot
be properly deserialized on other Ray clusters or outside a Ray
cluster. If you need persistence across clusters, use the ``to_uri()``
or ``to_directory()`` methods to persist your checkpoints to disk.
"""
@DeveloperAPI
def __init__(
self,
local_path: Optional[str] = None,
data_dict: Optional[dict] = None,
uri: Optional[str] = None,
obj_ref: Optional[ray.ObjectRef] = None,
):
# First, resolve file:// URIs to local paths
if uri:
local_path = _get_local_path(uri)
if local_path:
uri = None
# Only one data type can be set at any time
if local_path:
assert not data_dict and not uri and not obj_ref
if not isinstance(local_path, (str, os.PathLike)) or not os.path.exists(
local_path
):
raise RuntimeError(
f"Cannot create checkpoint from path as it does "
f"not exist on local node: {local_path}"
)
elif not os.path.isdir(local_path):
raise RuntimeError(
f"Cannot create checkpoint from path as it does "
f"not point to a directory: {local_path}. If your checkpoint "
f"is a single file, consider passing the enclosing directory "
f"instead."
)
elif data_dict:
assert not local_path and not uri and not obj_ref
if not isinstance(data_dict, dict):
raise RuntimeError(
f"Cannot create checkpoint from dict as no "
f"dict was passed: {data_dict}"
)
elif obj_ref:
assert not local_path and not data_dict and not uri
if not isinstance(obj_ref, ray.ObjectRef):
raise RuntimeError(
f"Cannot create checkpoint from object ref as no "
f"object ref was passed: {obj_ref}"
)
elif uri:
assert not local_path and not data_dict and not obj_ref
resolved = _get_external_path(uri)
if not resolved:
raise RuntimeError(
f"Cannot create checkpoint from URI as it is not "
f"supported: {resolved}"
)
uri = resolved
else:
raise ValueError("Cannot create checkpoint without data.")
self._local_path: Optional[str] = local_path
self._data_dict: Optional[Dict[str, Any]] = data_dict
self._uri: Optional[str] = uri
self._obj_ref: Optional[ray.ObjectRef] = obj_ref
@classmethod
def from_bytes(cls, data: bytes) -> "Checkpoint":
"""Create a checkpoint from the given byte string.
Args:
data: Data object containing pickled checkpoint data.
Returns:
Checkpoint: checkpoint object.
"""
bytes_data = pickle.loads(data)
if isinstance(bytes_data, dict):
data_dict = bytes_data
else:
data_dict = {_BYTES_DATA_KEY: bytes_data}
return cls.from_dict(data_dict)
def to_bytes(self) -> bytes:
"""Return Checkpoint serialized as bytes object.
Returns:
bytes: Bytes object containing checkpoint data.
"""
# Todo: Add support for stream in the future (to_bytes(file_like))
data_dict = self.to_dict()
if "bytes_data" in data_dict:
return data_dict["bytes_data"]
return pickle.dumps(self.to_dict())
@classmethod
def from_dict(cls, data: dict) -> "Checkpoint":
"""Create checkpoint object from dictionary.
Args:
data: Dictionary containing checkpoint data.
Returns:
Checkpoint: checkpoint object.
"""
return cls(data_dict=data)
def to_dict(self) -> dict:
"""Return checkpoint data as dictionary.
Returns:
dict: Dictionary containing checkpoint data.
"""
if self._data_dict:
# If the checkpoint data is already a dict, return
return self._data_dict
elif self._obj_ref:
# If the checkpoint data is an object reference, resolve
return ray.get(self._obj_ref)
elif self._local_path or self._uri:
# Else, checkpoint is either on FS or external storage
with self.as_directory() as local_path:
checkpoint_data_path = os.path.join(
local_path, _DICT_CHECKPOINT_FILE_NAME
)
if os.path.exists(checkpoint_data_path):
# If we are restoring a dict checkpoint, load the dict
# from the checkpoint file.
with open(checkpoint_data_path, "rb") as f:
checkpoint_data = pickle.load(f)
else:
files = [
f
for f in os.listdir(local_path)
if os.path.isfile(os.path.join(local_path, f))
and f.endswith(_METADATA_CHECKPOINT_SUFFIX)
]
metadata = {}
for file in files:
with open(os.path.join(local_path, file), "rb") as f:
key = file[: -len(_METADATA_CHECKPOINT_SUFFIX)]
value = pickle.load(f)
metadata[key] = value
data = _pack(local_path)
checkpoint_data = {
_FS_CHECKPOINT_KEY: data,
}
checkpoint_data.update(metadata)
return checkpoint_data
else:
raise RuntimeError(f"Empty data for checkpoint {self}")
@classmethod
def from_object_ref(cls, obj_ref: ray.ObjectRef) -> "Checkpoint":
"""Create checkpoint object from object reference.
Args:
obj_ref: ObjectRef pointing to checkpoint data.
Returns:
Checkpoint: checkpoint object.
"""
return cls(obj_ref=obj_ref)
def to_object_ref(self) -> ray.ObjectRef:
"""Return checkpoint data as object reference.
Returns:
ray.ObjectRef: ObjectRef pointing to checkpoint data.
"""
if self._obj_ref:
return self._obj_ref
else:
return ray.put(self.to_dict())
@classmethod
def from_directory(cls, path: str) -> "Checkpoint":
"""Create checkpoint object from directory.
Args:
path: Directory containing checkpoint data. The caller promises to
not delete the directory (gifts ownership of the directory to this
Checkpoint).
Returns:
Checkpoint: checkpoint object.
"""
return cls(local_path=path)
def _get_temporary_checkpoint_dir(self) -> str:
"""Return the name for the temporary checkpoint dir."""
if self._obj_ref:
tmp_dir_path = tempfile.gettempdir()
checkpoint_dir_name = _CHECKPOINT_DIR_PREFIX + self._obj_ref.hex()
if platform.system() == "Windows":
# Max path on Windows is 260 chars, -1 for joining \
# Also leave a little for the del lock
del_lock_name = _get_del_lock_path("")
checkpoint_dir_name = (
_CHECKPOINT_DIR_PREFIX
+ self._obj_ref.hex()[
-259
+ len(_CHECKPOINT_DIR_PREFIX)
+ len(tmp_dir_path)
+ len(del_lock_name) :
]
)
if not checkpoint_dir_name.startswith(_CHECKPOINT_DIR_PREFIX):
raise RuntimeError(
"Couldn't create checkpoint directory due to length "
"constraints. Try specifing a shorter checkpoint path."
)
return os.path.join(tmp_dir_path, checkpoint_dir_name)
return _temporary_checkpoint_dir()
def _to_directory(self, path: str) -> None:
if self._data_dict or self._obj_ref:
# This is a object ref or dict
data_dict = self.to_dict()
if _FS_CHECKPOINT_KEY in data_dict:
for key in data_dict.keys():
if key == _FS_CHECKPOINT_KEY:
continue
metadata_path = os.path.join(
path, f"{key}{_METADATA_CHECKPOINT_SUFFIX}"
)
with open(metadata_path, "wb") as f:
pickle.dump(data_dict[key], f)
# This used to be a true fs checkpoint, so restore
_unpack(data_dict[_FS_CHECKPOINT_KEY], path)
else:
# This is a dict checkpoint. Dump data into checkpoint.pkl
checkpoint_data_path = os.path.join(path, _DICT_CHECKPOINT_FILE_NAME)
with open(checkpoint_data_path, "wb") as f:
pickle.dump(data_dict, f)
else:
# This is either a local fs, remote node fs, or external fs
local_path = self._local_path
external_path = _get_external_path(self._uri)
if local_path:
if local_path != path:
# If this exists on the local path, just copy over
if path and os.path.exists(path):
shutil.rmtree(path)
shutil.copytree(local_path, path)
elif external_path:
# If this exists on external storage (e.g. cloud), download
download_from_uri(uri=external_path, local_path=path, filelock=False)
else:
raise RuntimeError(
f"No valid location found for checkpoint {self}: {self._uri}"
)
def to_directory(self, path: Optional[str] = None) -> str:
"""Write checkpoint data to directory.
Args:
path: Target directory to restore data in. If not specified,
will create a temporary directory.
Returns:
str: Directory containing checkpoint data.
"""
user_provided_path = path is not None
path = path if user_provided_path else self._get_temporary_checkpoint_dir()
path = os.path.normpath(path)
_make_dir(path, acquire_del_lock=not user_provided_path)
try:
# Timeout 0 means there will be only one attempt to acquire
# the file lock. If it cannot be aquired, a TimeoutError
# will be thrown.
with TempFileLock(f"{path}.lock", timeout=0):
self._to_directory(path)
except TimeoutError:
# if the directory is already locked, then wait but do not do anything.
with TempFileLock(f"{path}.lock", timeout=-1):
pass
if not os.path.exists(path):
raise RuntimeError(
f"Checkpoint directory {path} does not exist, "
"even though it should have been created by "
"another process. Please raise an issue on GitHub: "
"https://github.com/ray-project/ray/issues"
)
return path
@contextlib.contextmanager
def as_directory(self) -> Iterator[str]:
"""Return checkpoint directory path in a context.
This function makes checkpoint data available as a directory while avoiding
unnecessary copies and left-over temporary data.
If the checkpoint is already a directory checkpoint, it will return
the existing path. If it is not, it will create a temporary directory,
which will be deleted after the context is exited.
If the checkpoint has been created from an object reference, the directory name
will be constant and equal to the object reference ID. This allows for multiple
processes to use the same files for improved performance. The directory
will be deleted after exiting the context only if no other processes are using
it.
In any other case, a new temporary directory will be created with each call
to ``as_directory``.
Users should treat the returned checkpoint directory as read-only and avoid
changing any data within it, as it might get deleted when exiting the context.
Example:
with checkpoint.as_directory() as checkpoint_dir:
# Do some read-only processing of files within checkpoint_dir
pass
# At this point, if a temporary directory was created, it will have
# been deleted.
"""
if self._local_path:
yield self._local_path
else:
temp_dir = self.to_directory()
del_lock_path = _get_del_lock_path(temp_dir)
yield temp_dir
# Cleanup
try:
os.remove(del_lock_path)
except Exception:
logger.warning(
f"Could not remove {del_lock_path} deletion file lock. "
f"Traceback:\n{traceback.format_exc()}"
)
# In the edge case (process crash before del lock file is removed),
# we do not remove the directory at all.
# Since it's in /tmp, this is not that big of a deal.
# check if any lock files are remaining
temp_dir_base_name = Path(temp_dir).name
if not list(
Path(temp_dir).parent.glob(_get_del_lock_path(temp_dir_base_name, "*"))
):
try:
# Timeout 0 means there will be only one attempt to acquire
# the file lock. If it cannot be aquired, a TimeoutError
# will be thrown.
with TempFileLock(f"{temp_dir}.lock", timeout=0):
shutil.rmtree(temp_dir, ignore_errors=True)
except TimeoutError:
pass
@classmethod
def from_uri(cls, uri: str) -> "Checkpoint":
"""Create checkpoint object from location URI (e.g. cloud storage).
Valid locations currently include AWS S3 (``s3://``),
Google cloud storage (``gs://``), HDFS (``hdfs://``), and
local files (``file://``).
Args:
uri: Source location URI to read data from.
Returns:
Checkpoint: checkpoint object.
"""
return cls(uri=uri)
def to_uri(self, uri: str) -> str:
"""Write checkpoint data to location URI (e.g. cloud storage).
Args:
uri: Target location URI to write data to.
Returns:
str: Cloud location containing checkpoint data.
"""
if uri.startswith("file://"):
local_path = uri[7:]
return self.to_directory(local_path)
if not is_non_local_path_uri(uri):
raise RuntimeError(
f"Cannot upload checkpoint to URI: Provided URI "
f"does not belong to a registered storage provider: `{uri}`. "
f"Hint: {fs_hint(uri)}"
)
with self.as_directory() as local_path:
upload_to_uri(local_path=local_path, uri=uri)
return uri
@DeveloperAPI
def get_internal_representation(
self,
) -> Tuple[str, Union[dict, str, ray.ObjectRef]]:
"""Return tuple of (type, data) for the internal representation.
The internal representation can be used e.g. to compare checkpoint
objects for equality or to access the underlying data storage.
The returned type is a string and one of
``["local_path", "data_dict", "uri", "object_ref"]``.
The data is the respective data value.
Note that paths converted from ``file://...`` will be returned
as ``local_path`` (without the ``file://`` prefix) and not as ``uri``.
Returns:
Tuple of type and data.
"""
if self._local_path:
return "local_path", self._local_path
elif self._data_dict:
return "data_dict", self._data_dict
elif self._uri:
return "uri", self._uri
elif self._obj_ref:
return "object_ref", self._obj_ref
else:
raise RuntimeError(
"Cannot get internal representation of empty checkpoint."
)
def __getstate__(self):
if self._local_path:
blob = self.to_bytes()
return self.__class__.from_bytes(blob).__getstate__()
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def _get_local_path(path: Optional[str]) -> Optional[str]:
"""Check if path is a local path. Otherwise return None."""
if path is None or is_non_local_path_uri(path):
return None
if path.startswith("file://"):
path = path[7:]
if os.path.exists(path):
return path
return None
def _get_external_path(path: Optional[str]) -> Optional[str]:
"""Check if path is an external path. Otherwise return None."""
if not isinstance(path, str) or not is_non_local_path_uri(path):
return None
return path
def _temporary_checkpoint_dir() -> str:
"""Create temporary checkpoint dir."""
return tempfile.mkdtemp(prefix=_CHECKPOINT_DIR_PREFIX)
def _pack(path: str) -> bytes:
"""Pack directory in ``path`` into an archive, return as bytes string."""
stream = io.BytesIO()
def filter_function(tarinfo):
if tarinfo.name.endswith(_METADATA_CHECKPOINT_SUFFIX):
return None
else:
return tarinfo
with tarfile.open(fileobj=stream, mode="w", format=tarfile.PAX_FORMAT) as tar:
tar.add(path, arcname="", filter=filter_function)
return stream.getvalue()
def _unpack(stream: bytes, path: str) -> str:
"""Unpack archive in bytes string into directory in ``path``."""
with tarfile.open(fileobj=io.BytesIO(stream)) as tar:
tar.extractall(path)
return path
def _get_del_lock_path(path: str, pid: str = None) -> str:
"""Get the path to the deletion lock file."""
pid = pid if pid is not None else os.getpid()
return f"{path}.del_lock_{pid}"
def _make_dir(path: str, acquire_del_lock: bool = True) -> None:
"""Create the temporary checkpoint dir in ``path``."""
if acquire_del_lock:
# Each process drops a deletion lock file it then cleans up.
# If there are no lock files left, the last process
# will remove the entire directory.
del_lock_path = _get_del_lock_path(path)
open(del_lock_path, "a").close()
os.makedirs(path, exist_ok=True)
# Drop marker
open(os.path.join(path, ".is_checkpoint"), "a").close()
```
#### File: air/tests/test_data_batch_conversion.py
```python
import pytest
import pandas as pd
import numpy as np
import pyarrow as pa
from ray.air.constants import TENSOR_COLUMN_NAME
from ray.air.util.data_batch_conversion import convert_batch_type_to_pandas
from ray.air.util.data_batch_conversion import convert_pandas_to_batch_type
from ray.air.util.data_batch_conversion import DataType
from ray.air.util.tensor_extensions.pandas import TensorArray
from ray.air.util.tensor_extensions.arrow import ArrowTensorArray
def test_pandas_pandas():
input_data = pd.DataFrame({"x": [1, 2, 3]})
expected_output = input_data
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
assert convert_pandas_to_batch_type(actual_output, type=DataType.PANDAS).equals(
input_data
)
def test_numpy_pandas():
input_data = np.array([1, 2, 3])
expected_output = pd.DataFrame({TENSOR_COLUMN_NAME: TensorArray([1, 2, 3])})
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
assert np.array_equal(
convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY), input_data
)
def test_numpy_multi_dim_pandas():
input_data = np.arange(12).reshape((3, 2, 2))
expected_output = pd.DataFrame({TENSOR_COLUMN_NAME: TensorArray(input_data)})
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
assert np.array_equal(
convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY), input_data
)
def test_numpy_object_pandas():
input_data = np.array([[1, 2, 3], [1]], dtype=object)
expected_output = pd.DataFrame({TENSOR_COLUMN_NAME: TensorArray(input_data)})
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
assert np.array_equal(
convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY), input_data
)
def test_dict_fail():
input_data = {"x": "y"}
with pytest.raises(ValueError):
convert_batch_type_to_pandas(input_data)
def test_dict_pandas():
input_data = {"x": np.array([1, 2, 3])}
expected_output = pd.DataFrame({"x": TensorArray(input_data["x"])})
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
output_array = convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY)
assert np.array_equal(output_array, input_data["x"])
def test_dict_multi_dim_to_pandas():
tensor = np.arange(12).reshape((3, 2, 2))
input_data = {"x": tensor}
expected_output = pd.DataFrame({"x": TensorArray(tensor)})
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
output_array = convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY)
assert np.array_equal(output_array, input_data["x"])
def test_dict_pandas_multi_column():
array_dict = {"x": np.array([1, 2, 3]), "y": np.array([4, 5, 6])}
expected_output = pd.DataFrame({k: TensorArray(v) for k, v in array_dict.items()})
actual_output = convert_batch_type_to_pandas(array_dict)
assert expected_output.equals(actual_output)
output_dict = convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY)
for k, v in output_dict.items():
assert np.array_equal(v, array_dict[k])
def test_arrow_pandas():
df = pd.DataFrame({"x": [1, 2, 3]})
input_data = pa.Table.from_pandas(df)
expected_output = df
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
assert convert_pandas_to_batch_type(actual_output, type=DataType.ARROW).equals(
input_data
)
def test_arrow_tensor_pandas():
np_array = np.array([1, 2, 3])
df = pd.DataFrame({"x": TensorArray(np_array)})
input_data = pa.Table.from_arrays(
[ArrowTensorArray.from_numpy(np_array)], names=["x"]
)
expected_output = df
actual_output = convert_batch_type_to_pandas(input_data)
assert expected_output.equals(actual_output)
assert convert_pandas_to_batch_type(actual_output, type=DataType.ARROW).equals(
input_data
)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
```
#### File: air/tests/test_keras_callback.py
```python
import os
import tensorflow as tf
from ray.air import session
from ray.air.callbacks.keras import Callback
from ray.air.examples.tf.tensorflow_linear_dataset_example import (
build_model,
get_dataset,
)
from ray.train.constants import TRAIN_DATASET_KEY
from ray.train.tensorflow import TensorflowTrainer, prepare_dataset_shard
def train_func(config: dict):
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
# Model building/compiling need to be within `strategy.scope()`.
multi_worker_model = build_model()
multi_worker_model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=config.get("lr", 1e-3)),
loss=tf.keras.losses.mean_squared_error,
metrics=[tf.keras.metrics.mean_squared_error],
)
dataset = session.get_dataset_shard("train")
for _ in range(config.get("epoch", 3)):
tf_dataset = prepare_dataset_shard(
dataset.to_tf(
label_column="y",
output_signature=(
tf.TensorSpec(shape=(None, 1), dtype=tf.float32),
tf.TensorSpec(shape=(None), dtype=tf.float32),
),
batch_size=32,
)
)
multi_worker_model.fit(tf_dataset, callbacks=[Callback()])
def test_keras_callback():
epochs = 3
scaling_config = {"num_workers": 2}
config = {
"epochs": epochs,
}
trainer = TensorflowTrainer(
train_loop_per_worker=train_func,
train_loop_config=config,
scaling_config=scaling_config,
datasets={TRAIN_DATASET_KEY: get_dataset()},
)
checkpoint = trainer.fit().checkpoint
with checkpoint.as_directory() as ckpt_dir:
assert os.path.exists(os.path.join(ckpt_dir, "saved_model.pb"))
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", "-x", __file__]))
```
#### File: air/util/data_batch_conversion.py
```python
from enum import Enum, auto
import numpy as np
import pandas as pd
from ray.air.data_batch_type import DataBatchType
from ray.air.constants import TENSOR_COLUMN_NAME
from ray.util.annotations import DeveloperAPI
try:
import pyarrow
except ImportError:
pyarrow = None
@DeveloperAPI
class DataType(Enum):
PANDAS = auto()
ARROW = auto()
NUMPY = auto() # Either a single numpy array or a Dict of numpy arrays.
@DeveloperAPI
def convert_batch_type_to_pandas(data: DataBatchType) -> pd.DataFrame:
"""Convert the provided data to a Pandas DataFrame.
Args:
data: Data of type DataBatchType
Returns:
A pandas Dataframe representation of the input data.
"""
from ray.air.util.tensor_extensions.pandas import TensorArray
if isinstance(data, pd.DataFrame):
return data
elif isinstance(data, np.ndarray):
return pd.DataFrame({TENSOR_COLUMN_NAME: TensorArray(data)})
elif isinstance(data, dict):
tensor_dict = {}
for k, v in data.items():
if not isinstance(v, np.ndarray):
raise ValueError(
"All values in the provided dict must be of type "
f"np.ndarray. Found type {type(v)} for key {k} "
f"instead."
)
# Convert numpy arrays to TensorArray.
tensor_dict[k] = TensorArray(v)
return pd.DataFrame(tensor_dict)
elif pyarrow is not None and isinstance(data, pyarrow.Table):
return data.to_pandas()
else:
raise ValueError(
f"Received data of type: {type(data)}, but expected it to be one "
f"of {DataBatchType}"
)
@DeveloperAPI
def convert_pandas_to_batch_type(data: pd.DataFrame, type: DataType) -> DataBatchType:
"""Convert the provided Pandas dataframe to the provided ``type``.
Args:
data: A Pandas DataFrame
type: The specific ``DataBatchType`` to convert to.
Returns:
The input data represented with the provided type.
"""
if type == DataType.PANDAS:
return data
elif type == DataType.NUMPY:
if len(data.columns) == 1:
# If just a single column, return as a single numpy array.
return data.iloc[:, 0].to_numpy()
else:
# Else return as a dict of numpy arrays.
output_dict = {}
for column in data:
output_dict[column] = data[column].to_numpy()
return output_dict
elif type == DataType.ARROW:
if not pyarrow:
raise ValueError(
"Attempted to convert data to Pyarrow Table but Pyarrow "
"is not installed. Please do `pip install pyarrow` to "
"install Pyarrow."
)
return pyarrow.Table.from_pandas(data)
else:
raise ValueError(
f"Received type {type}, but expected it to be one of {DataType}"
)
```
#### File: data/_internal/table_block.py
```python
import collections
from typing import Dict, Iterator, List, Union, Any, TypeVar, TYPE_CHECKING
import numpy as np
from ray.data.block import Block, BlockAccessor
from ray.data.row import TableRow
from ray.data._internal.block_builder import BlockBuilder
from ray.data._internal.size_estimator import SizeEstimator
if TYPE_CHECKING:
from ray.data._internal.sort import SortKeyT
# The internal column name used for pure-tensor datasets, represented as
# single-tensor-column tables.
VALUE_COL_NAME = "__value__"
T = TypeVar("T")
# The max size of Python tuples to buffer before compacting them into a
# table in the BlockBuilder.
MAX_UNCOMPACTED_SIZE_BYTES = 50 * 1024 * 1024
class TableBlockBuilder(BlockBuilder[T]):
def __init__(self, block_type):
# The set of uncompacted Python values buffered.
self._columns = collections.defaultdict(list)
# The set of compacted tables we have built so far.
self._tables: List[Any] = []
self._tables_size_bytes = 0
# Size estimator for un-compacted table values.
self._uncompacted_size = SizeEstimator()
self._num_rows = 0
self._num_compactions = 0
self._block_type = block_type
def add(self, item: Union[dict, TableRow, np.ndarray]) -> None:
if isinstance(item, TableRow):
item = item.as_pydict()
elif isinstance(item, np.ndarray):
item = {VALUE_COL_NAME: item}
if not isinstance(item, dict):
raise ValueError(
"Returned elements of an TableBlock must be of type `dict`, "
"got {} (type {}).".format(item, type(item))
)
for key, value in item.items():
self._columns[key].append(value)
self._num_rows += 1
self._compact_if_needed()
self._uncompacted_size.add(item)
def add_block(self, block: Any) -> None:
if not isinstance(block, self._block_type):
raise TypeError(
f"Got a block of type {type(block)}, expected {self._block_type}."
"If you are mapping a function, ensure it returns an "
"object with the expected type. Block:\n"
f"{block}"
)
accessor = BlockAccessor.for_block(block)
self._tables.append(block)
self._tables_size_bytes += accessor.size_bytes()
self._num_rows += accessor.num_rows()
def _table_from_pydict(self, columns: Dict[str, List[Any]]) -> Block:
raise NotImplementedError
def _concat_tables(self, tables: List[Block]) -> Block:
raise NotImplementedError
@staticmethod
def _empty_table() -> Any:
raise NotImplementedError
def build(self) -> Block:
if self._columns:
tables = [self._table_from_pydict(self._columns)]
else:
tables = []
tables.extend(self._tables)
if len(tables) > 1:
return self._concat_tables(tables)
elif len(tables) > 0:
return tables[0]
else:
return self._empty_table()
def num_rows(self) -> int:
return self._num_rows
def get_estimated_memory_usage(self) -> int:
if self._num_rows == 0:
return 0
return self._tables_size_bytes + self._uncompacted_size.size_bytes()
def _compact_if_needed(self) -> None:
assert self._columns
if self._uncompacted_size.size_bytes() < MAX_UNCOMPACTED_SIZE_BYTES:
return
block = self._table_from_pydict(self._columns)
self.add_block(block)
self._uncompacted_size = SizeEstimator()
self._columns.clear()
self._num_compactions += 1
class TableBlockAccessor(BlockAccessor):
ROW_TYPE: TableRow = TableRow
def __init__(self, table: Any):
self._table = table
def _get_row(self, index: int, copy: bool = False) -> Union[TableRow, np.ndarray]:
row = self.slice(index, index + 1, copy=copy)
if self.is_tensor_wrapper():
row = self._build_tensor_row(row)
else:
row = self.ROW_TYPE(row)
return row
@staticmethod
def _build_tensor_row(row: TableRow) -> np.ndarray:
raise NotImplementedError
def to_native(self) -> Block:
if self.is_tensor_wrapper():
native = self.to_numpy()
else:
# Always promote Arrow blocks to pandas for consistency, since
# we lazily convert pandas->Arrow internally for efficiency.
native = self.to_pandas()
return native
def column_names(self) -> List[str]:
raise NotImplementedError
def to_block(self) -> Block:
return self._table
def is_tensor_wrapper(self) -> bool:
return self.column_names() == [VALUE_COL_NAME]
def iter_rows(self) -> Iterator[Union[TableRow, np.ndarray]]:
outer = self
class Iter:
def __init__(self):
self._cur = -1
def __iter__(self):
return self
def __next__(self):
self._cur += 1
if self._cur < outer.num_rows():
return outer._get_row(self._cur)
raise StopIteration
return Iter()
def _zip(self, acc: BlockAccessor) -> "Block[T]":
raise NotImplementedError
def zip(self, other: "Block[T]") -> "Block[T]":
acc = BlockAccessor.for_block(other)
if not isinstance(acc, type(self)):
raise ValueError(
"Cannot zip {} with block of type {}".format(type(self), type(other))
)
if acc.num_rows() != self.num_rows():
raise ValueError(
"Cannot zip self (length {}) with block of length {}".format(
self.num_rows(), acc.num_rows()
)
)
return self._zip(acc)
@staticmethod
def _empty_table() -> Any:
raise NotImplementedError
def _sample(self, n_samples: int, key: "SortKeyT") -> Any:
raise NotImplementedError
def sample(self, n_samples: int, key: "SortKeyT") -> Any:
if key is None or callable(key):
raise NotImplementedError(
f"Table sort key must be a column name, was: {key}"
)
if self.num_rows() == 0:
# If the pyarrow table is empty we may not have schema
# so calling table.select() will raise an error.
return self._empty_table()
k = min(n_samples, self.num_rows())
return self._sample(k, key)
```
#### File: data/preprocessors/encoder.py
```python
from functools import partial
from typing import List, Dict, Optional, Union
from collections import Counter, OrderedDict
import pandas as pd
import pandas.api.types
from ray.data import Dataset
from ray.data.preprocessor import Preprocessor
class OrdinalEncoder(Preprocessor):
"""Encode values within columns as ordered integer values.
Currently, order within a column is based on the values from the fitted
dataset in sorted order.
Transforming values not included in the fitted dataset will be encoded as ``None``.
All column values must be hashable scalars or lists of hashable values. Those
two types cannot be mixed.
Example:
.. code-block:: python
import ray.data
from ray.data.preprocessors import OrdinalEncoder
import pandas as pd
batch = pd.DataFrame(
{
"A": [["warm"], [], ["hot", "warm", "cold"], ["cold", "cold"]],
"B": ["warm", "cold", "hot", "cold"],
},
)
oe = OrdinalEncoder(columns=["A", "B"], encode_lists=True)
oe.fit(ray.data.from_pandas(batch))
transformed_batch = oe.transform_batch(batch)
expected_batch = pd.DataFrame(
{
"A": [[2], [], [1, 2, 0], [0, 0]],
"B": [2, 0, 1, 0],
}
)
assert transformed_batch.equals(expected_batch)
oe = OrdinalEncoder(columns=["A", "B"], encode_lists=False)
oe.fit(ray.data.from_pandas(batch))
transformed_batch = oe.transform_batch(batch)
expected_batch = pd.DataFrame(
{
"A": [3, 0, 2, 1],
"B": [2, 0, 1, 0],
}
)
assert transformed_batch.equals(expected_batch)
Args:
columns: The columns that will individually be encoded.
encode_lists: If True, each element of lists inside list
columns will be encoded. If False, each list will
be treated as a whole separate category. True
by default.
"""
def __init__(self, columns: List[str], *, encode_lists: bool = True):
# TODO: allow user to specify order of values within each column.
self.columns = columns
self.encode_lists = encode_lists
def _fit(self, dataset: Dataset) -> Preprocessor:
self.stats_ = _get_unique_value_indices(
dataset, self.columns, encode_lists=self.encode_lists
)
return self
def _transform_pandas(self, df: pd.DataFrame):
_validate_df(df, *self.columns)
def encode_list(element: list, *, name: str):
return [self.stats_[f"unique_values({name})"].get(x) for x in element]
def column_ordinal_encoder(s: pd.Series):
if _is_series_composed_of_lists(s):
if self.encode_lists:
return s.map(partial(encode_list, name=s.name))
# cannot simply use map here due to pandas thinking
# tuples are to be used for indices
def list_as_category(element):
element = tuple(element)
return self.stats_[f"unique_values({s.name})"].get(element)
return s.apply(list_as_category)
s_values = self.stats_[f"unique_values({s.name})"]
return s.map(s_values)
df[self.columns] = df[self.columns].apply(column_ordinal_encoder)
return df
def __repr__(self):
stats = getattr(self, "stats_", None)
return (
f"OrdinalEncoder(columns={self.columns}, stats={stats}, "
f"encode_lists={self.encode_lists})"
)
class OneHotEncoder(Preprocessor):
"""Encode columns as new columns using one-hot encoding.
The transformed dataset will have a new column in the form ``{column}_{value}``
for each of the values from the fitted dataset. The value of a column will
be set to 1 if the value matches, otherwise 0.
Transforming values not included in the fitted dataset or not among
the top popular values (see ``limit``) will result in all of the encoded column
values being 0.
All column values must be hashable or lists. Lists will be treated as separate
categories. If you would like to encode list elements,
use :class:`MultiHotEncoder`.
Example:
.. code-block:: python
ohe = OneHotEncoder(
columns=[
"trip_start_hour",
"trip_start_day",
"trip_start_month",
"dropoff_census_tract",
"pickup_community_area",
"dropoff_community_area",
"payment_type",
"company",
],
limit={
"dropoff_census_tract": 25,
"pickup_community_area": 20,
"dropoff_community_area": 20,
"payment_type": 2,
"company": 7,
},
)
Args:
columns: The columns that will individually be encoded.
limit: If set, only the top "limit" number of most popular values become
categorical variables. The less frequent ones will result in all
the encoded column values being 0. This is a dict of column to
its corresponding limit. The column in this dictionary has to be
in ``columns``.
"""
def __init__(self, columns: List[str], *, limit: Optional[Dict[str, int]] = None):
# TODO: add `drop` parameter.
self.columns = columns
self.limit = limit
def _fit(self, dataset: Dataset) -> Preprocessor:
self.stats_ = _get_unique_value_indices(
dataset, self.columns, limit=self.limit, encode_lists=False
)
return self
def _transform_pandas(self, df: pd.DataFrame):
_validate_df(df, *self.columns)
columns_to_drop = set(self.columns)
# Compute new one-hot encoded columns
for column in self.columns:
column_values = self.stats_[f"unique_values({column})"]
if _is_series_composed_of_lists(df[column]):
df[column] = df[column].map(lambda x: tuple(x))
for column_value in column_values:
df[f"{column}_{column_value}"] = (df[column] == column_value).astype(
int
)
# Drop original unencoded columns.
df = df.drop(columns=list(columns_to_drop))
return df
def __repr__(self):
stats = getattr(self, "stats_", None)
return f"OneHotEncoder(columns={self.columns}, stats={stats})"
class MultiHotEncoder(Preprocessor):
"""Encode columns using multi-hot encoding.
A column of lists or scalars (treated as one element lists) will be
encoded as a column of one-hot encoded lists. This is useful for eg.
generating embeddings for recommender systems.
Example:
.. code-block:: python
import ray.data
from ray.data.preprocessors import MultiHotEncoder
import pandas as pd
mhe = MultiHotEncoder(columns=["A", "B"])
batch = pd.DataFrame(
{
"A": [["warm"], [], ["hot", "warm", "cold"], ["cold", "cold"]],
"B": ["warm", "cold", "hot", "cold"],
},
)
mhe.fit(ray.data.from_pandas(batch))
transformed_batch = mhe.transform_batch(batch)
expected_batch = pd.DataFrame(
{
"A": [[0, 0, 1], [0, 0, 0], [1, 1, 1], [2, 0, 0]],
"B": [[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]],
}
)
assert transformed_batch.equals(expected_batch)
Transforming values not included in the fitted dataset or not among
the top popular values (see ``limit``) will result in all of the encoded column
values being 0.
The logic is similar to scikit-learn's `MultiLabelBinarizer \
<https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing\
.MultiLabelBinarizer.html>`_.
All column values must be hashable scalars or lists of hashable values. Those
two types cannot be mixed.
See also: :class:`OneHotEncoder`.
Args:
columns: The columns that will individually be encoded.
limit: If set, only the top "limit" number of most popular values become
categorical variables. The less frequent ones will result in all
the encoded values being 0. This is a dict of column to
its corresponding limit. The column in this dictionary has to be
in ``columns``.
"""
def __init__(self, columns: List[str], *, limit: Optional[Dict[str, int]] = None):
# TODO: add `drop` parameter.
self.columns = columns
self.limit = limit
def _fit(self, dataset: Dataset) -> Preprocessor:
self.stats_ = _get_unique_value_indices(
dataset, self.columns, limit=self.limit, encode_lists=True
)
return self
def _transform_pandas(self, df: pd.DataFrame):
_validate_df(df, *self.columns)
def encode_list(element: list, *, name: str):
if not isinstance(element, list):
element = [element]
stats = self.stats_[f"unique_values({name})"]
counter = Counter(element)
return [counter.get(x, 0) for x in stats]
for column in self.columns:
df[column] = df[column].map(partial(encode_list, name=column))
return df
def __repr__(self):
stats = getattr(self, "stats_", None)
return f"MultiHotEncoder(columns={self.columns}, stats={stats})"
class LabelEncoder(Preprocessor):
"""Encode values within a label column as ordered integer values.
Currently, order within a column is based on the values from the fitted
dataset in sorted order.
Transforming values not included in the fitted dataset will be encoded as ``None``.
All column values must be hashable.
Args:
label_column: The label column that will be encoded.
"""
def __init__(self, label_column: str):
self.label_column = label_column
def _fit(self, dataset: Dataset) -> Preprocessor:
self.stats_ = _get_unique_value_indices(dataset, [self.label_column])
return self
def _transform_pandas(self, df: pd.DataFrame):
_validate_df(df, self.label_column)
def column_label_encoder(s: pd.Series):
s_values = self.stats_[f"unique_values({s.name})"]
return s.map(s_values)
df[self.label_column] = df[self.label_column].transform(column_label_encoder)
return df
def __repr__(self):
stats = getattr(self, "stats_", None)
return f"LabelEncoder(label_column={self.label_column}, stats={stats})"
class Categorizer(Preprocessor):
"""Transform Dataset columns to Categorical data type.
Note that in case of automatic inferrence, you will most
likely want to run this preprocessor on the entire dataset
before splitting it (e.g. into train and test sets), so
that all of the categories are inferred. There is no risk
of data leakage when using this preprocessor.
Args:
columns: The columns whose data type to change. Can be
either a list of columns, in which case the categories
will be inferred automatically from the data, or
a dict of `column:pd.CategoricalDtype or None` -
if specified, the dtype will be applied, and if not,
it will be automatically inferred.
"""
def __init__(
self, columns: Union[List[str], Dict[str, Optional[pd.CategoricalDtype]]]
):
self.columns = columns
def _fit(self, dataset: Dataset) -> Preprocessor:
columns_to_get = (
self.columns
if isinstance(self.columns, list)
else [
column for column, cat_type in self.columns.items() if cat_type is None
]
)
if columns_to_get:
unique_indices = _get_unique_value_indices(
dataset, columns_to_get, drop_na_values=True, key_format="{0}"
)
unique_indices = {
column: pd.CategoricalDtype(values_indices.keys())
for column, values_indices in unique_indices.items()
}
else:
unique_indices = {}
if isinstance(self.columns, dict):
unique_indices = {**self.columns, **unique_indices}
self.stats_: Dict[str, pd.CategoricalDtype] = unique_indices
return self
def _transform_pandas(self, df: pd.DataFrame):
df = df.astype(self.stats_)
return df
def __repr__(self):
stats = getattr(self, "stats_", None)
return f"<Categorizer columns={self.columns} stats={stats}>"
def _get_unique_value_indices(
dataset: Dataset,
columns: List[str],
drop_na_values: bool = False,
key_format: str = "unique_values({0})",
limit: Optional[Dict[str, int]] = None,
encode_lists: bool = True,
) -> Dict[str, Dict[str, int]]:
"""If drop_na_values is True, will silently drop NA values."""
limit = limit or {}
for column in limit:
if column not in columns:
raise ValueError(
f"You set limit for {column}, which is not present in {columns}."
)
def get_pd_value_counts_per_column(col: pd.Series):
# special handling for lists
if _is_series_composed_of_lists(col):
if encode_lists:
counter = Counter()
def update_counter(element):
counter.update(element)
return element
col.map(update_counter)
return counter
else:
# convert to tuples to make lists hashable
col = col.map(lambda x: tuple(x))
return Counter(col.value_counts(dropna=False).to_dict())
def get_pd_value_counts(df: pd.DataFrame) -> List[Dict[str, Counter]]:
result = [{col: get_pd_value_counts_per_column(df[col]) for col in columns}]
return result
value_counts = dataset.map_batches(get_pd_value_counts, batch_format="pandas")
final_counters = {col: Counter() for col in columns}
for batch in value_counts.iter_batches():
for col_value_counts in batch:
for col, value_counts in col_value_counts.items():
final_counters[col] += value_counts
# Inspect if there is any NA values.
for col in columns:
if drop_na_values:
counter = final_counters[col]
counter_dict = dict(counter)
sanitized_dict = {k: v for k, v in counter_dict.items() if not pd.isnull(k)}
final_counters[col] = Counter(sanitized_dict)
else:
if any(pd.isnull(k) for k in final_counters[col]):
raise ValueError(
f"Unable to fit column '{col}' because it contains null"
f" values. Consider imputing missing values first."
)
unique_values_with_indices = OrderedDict()
for column in columns:
if column in limit:
# Output sorted by freq.
unique_values_with_indices[key_format.format(column)] = {
k[0]: j
for j, k in enumerate(final_counters[column].most_common(limit[column]))
}
else:
# Output sorted by column name.
unique_values_with_indices[key_format.format(column)] = {
k: j for j, k in enumerate(sorted(dict(final_counters[column]).keys()))
}
return unique_values_with_indices
def _validate_df(df: pd.DataFrame, *columns: str) -> None:
null_columns = [column for column in columns if df[column].isnull().values.any()]
if null_columns:
raise ValueError(
f"Unable to transform columns {null_columns} because they contain "
f"null values. Consider imputing missing values first."
)
def _is_series_composed_of_lists(series: pd.Series) -> bool:
# we assume that all elements are a list here
first_not_none_element = next(
(element for element in series if element is not None), None
)
return pandas.api.types.is_object_dtype(series.dtype) and isinstance(
first_not_none_element, list
)
```
#### File: data/preprocessors/hasher.py
```python
import collections
from typing import List
import pandas as pd
from ray.data.preprocessor import Preprocessor
from ray.data.preprocessors.utils import simple_hash
class FeatureHasher(Preprocessor):
"""Hash the features of the specified columns.
The created columns will have names in the format ``hash_{column_names}_{hash}``,
e.g. ``hash_column1_column2_0``, ``hash_column1_column2_1``, ...
Note: Currently sparse matrices are not supported.
Therefore, it is recommended to **not** use a large ``num_features``.
Args:
columns: The columns of features that should be projected
onto a single hashed feature vector.
num_features: The size of the hashed feature vector.
"""
_is_fittable = False
def __init__(self, columns: List[str], num_features: int):
self.columns = columns
# TODO(matt): Set default number of features.
# This likely requires sparse matrix support to avoid explosion of columns.
self.num_features = num_features
def _transform_pandas(self, df: pd.DataFrame):
# TODO(matt): Use sparse matrix for efficiency.
joined_columns = "_".join(self.columns)
def row_feature_hasher(row):
hash_counts = collections.defaultdict(int)
for column in self.columns:
hashed_value = simple_hash(row[column], self.num_features)
hash_counts[hashed_value] = hash_counts[hashed_value] + 1
return {
f"hash_{joined_columns}_{i}": hash_counts[i]
for i in range(self.num_features)
}
feature_columns = df.loc[:, self.columns].apply(
row_feature_hasher, axis=1, result_type="expand"
)
df = df.join(feature_columns)
# Drop original unhashed columns.
df.drop(columns=self.columns, inplace=True)
return df
def __repr__(self):
return (
f"FeatureHasher(columns={self.columns}, num_features={self.num_features})"
)
```
#### File: data/preprocessors/tokenizer.py
```python
from typing import List, Callable, Optional
import pandas as pd
from ray.data.preprocessor import Preprocessor
from ray.data.preprocessors.utils import simple_split_tokenizer
class Tokenizer(Preprocessor):
"""Tokenize string columns.
Each string entry will be replaced with a list of tokens.
Args:
columns: The columns that will individually be tokenized.
tokenization_fn: The tokenization function to use.
If not specified, a simple ``string.split(" ")`` will be used.
"""
_is_fittable = False
def __init__(
self,
columns: List[str],
tokenization_fn: Optional[Callable[[str], List[str]]] = None,
):
self.columns = columns
# TODO(matt): Add a more robust default tokenizer.
self.tokenization_fn = tokenization_fn or simple_split_tokenizer
def _transform_pandas(self, df: pd.DataFrame):
def column_tokenizer(s: pd.Series):
return s.map(self.tokenization_fn)
df.loc[:, self.columns] = df.loc[:, self.columns].transform(column_tokenizer)
return df
def __repr__(self):
name = getattr(self.tokenization_fn, "__name__", self.tokenization_fn)
return f"Tokenizer(columns={self.columns}, tokenization_fn={name})"
```
#### File: ray/serve/client.py
```python
import asyncio
import atexit
import logging
import random
import time
from functools import wraps
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union
import ray
from ray.actor import ActorHandle
from ray.serve.common import DeploymentInfo, DeploymentStatus, StatusOverview
from ray.serve.config import DeploymentConfig, HTTPOptions, ReplicaConfig
from ray.serve.constants import (
CLIENT_POLLING_INTERVAL_S,
MAX_CACHED_HANDLES,
SERVE_NAMESPACE,
)
from ray.serve.controller import ServeController
from ray.serve.exceptions import RayServeException
from ray.serve.generated.serve_pb2 import DeploymentRoute, DeploymentRouteList
from ray.serve.generated.serve_pb2 import StatusOverview as StatusOverviewProto
from ray.serve.handle import RayServeHandle, RayServeSyncHandle
from ray.serve.schema import ServeApplicationSchema
logger = logging.getLogger(__file__)
# Whether to issue warnings about using sync handles in async context
# or using async handle in sync context.
_WARN_SYNC_ASYNC_HANDLE_CONTEXT: bool = True
def _ensure_connected(f: Callable) -> Callable:
@wraps(f)
def check(self, *args, **kwargs):
if self._shutdown:
raise RayServeException("Client has already been shut down.")
return f(self, *args, **kwargs)
return check
class ServeControllerClient:
def __init__(
self,
controller: ActorHandle,
controller_name: str,
detached: bool = False,
):
self._controller: ServeController = controller
self._controller_name = controller_name
self._detached = detached
self._shutdown = False
self._http_config: HTTPOptions = ray.get(controller.get_http_config.remote())
self._root_url = ray.get(controller.get_root_url.remote())
self._checkpoint_path = ray.get(controller.get_checkpoint_path.remote())
# Each handle has the overhead of long poll client, therefore cached.
self.handle_cache = dict()
self._evicted_handle_keys = set()
# NOTE(edoakes): Need this because the shutdown order isn't guaranteed
# when the interpreter is exiting so we can't rely on __del__ (it
# throws a nasty stacktrace).
if not self._detached:
def shutdown_serve_client():
self.shutdown()
atexit.register(shutdown_serve_client)
@property
def root_url(self):
return self._root_url
@property
def http_config(self):
return self._http_config
@property
def checkpoint_path(self):
return self._checkpoint_path
def __del__(self):
if not self._detached:
logger.debug(
"Shutting down Ray Serve because client went out of "
"scope. To prevent this, either keep a reference to "
"the client or use serve.start(detached=True)."
)
self.shutdown()
def __reduce__(self):
raise RayServeException(("Ray Serve client cannot be serialized."))
def shutdown(self) -> None:
"""Completely shut down the connected Serve instance.
Shuts down all processes and deletes all state associated with the
instance.
"""
# Shut down handles
for k in list(self.handle_cache):
self.handle_cache[k].stop_metrics_pusher()
del self.handle_cache[k]
if ray.is_initialized() and not self._shutdown:
ray.get(self._controller.shutdown.remote())
self._wait_for_deployments_shutdown()
ray.kill(self._controller, no_restart=True)
# Wait for the named actor entry gets removed as well.
started = time.time()
while True:
try:
ray.get_actor(self._controller_name, namespace=SERVE_NAMESPACE)
if time.time() - started > 5:
logger.warning(
"Waited 5s for Serve to shutdown gracefully but "
"the controller is still not cleaned up. "
"You can ignore this warning if you are shutting "
"down the Ray cluster."
)
break
except ValueError: # actor name is removed
break
self._shutdown = True
def _wait_for_deployments_shutdown(self, timeout_s: int = 60):
"""Waits for all deployments to be shut down and deleted.
Raises TimeoutError if this doesn't happen before timeout_s.
"""
start = time.time()
while time.time() - start < timeout_s:
deployment_statuses = self.get_serve_status().deployment_statuses
if len(deployment_statuses) == 0:
break
else:
logger.debug(
f"Waiting for shutdown, {len(deployment_statuses)} "
"deployments still alive."
)
time.sleep(CLIENT_POLLING_INTERVAL_S)
else:
live_names = [
deployment_status.name for deployment_status in deployment_statuses
]
raise TimeoutError(
f"Shutdown didn't complete after {timeout_s}s. "
f"Deployments still alive: {live_names}."
)
def _wait_for_deployment_healthy(self, name: str, timeout_s: int = -1):
"""Waits for the named deployment to enter "HEALTHY" status.
Raises RuntimeError if the deployment enters the "UNHEALTHY" status
instead.
Raises TimeoutError if this doesn't happen before timeout_s.
"""
start = time.time()
while time.time() - start < timeout_s or timeout_s < 0:
status = self.get_serve_status().get_deployment_status(name)
if status is None:
raise RuntimeError(
f"Waiting for deployment {name} to be HEALTHY, "
"but deployment doesn't exist."
)
if status.status == DeploymentStatus.HEALTHY:
break
elif status.status == DeploymentStatus.UNHEALTHY:
raise RuntimeError(
f"Deployment {name} is UNHEALTHY: " f"{status.message}"
)
else:
# Guard against new unhandled statuses being added.
assert status.status == DeploymentStatus.UPDATING
logger.debug(
f"Waiting for {name} to be healthy, current status: "
f"{status.status}."
)
time.sleep(CLIENT_POLLING_INTERVAL_S)
else:
raise TimeoutError(
f"Deployment {name} did not become HEALTHY after {timeout_s}s."
)
def _wait_for_deployment_deleted(self, name: str, timeout_s: int = 60):
"""Waits for the named deployment to be shut down and deleted.
Raises TimeoutError if this doesn't happen before timeout_s.
"""
start = time.time()
while time.time() - start < timeout_s:
curr_status = self.get_serve_status().get_deployment_status(name)
if curr_status is None:
break
logger.debug(
f"Waiting for {name} to be deleted, current status: {curr_status}."
)
time.sleep(CLIENT_POLLING_INTERVAL_S)
else:
raise TimeoutError(f"Deployment {name} wasn't deleted after {timeout_s}s.")
@_ensure_connected
def deploy(
self,
name: str,
deployment_def: Union[Callable, Type[Callable], str],
init_args: Tuple[Any],
init_kwargs: Dict[Any, Any],
ray_actor_options: Optional[Dict] = None,
config: Optional[Union[DeploymentConfig, Dict[str, Any]]] = None,
version: Optional[str] = None,
prev_version: Optional[str] = None,
route_prefix: Optional[str] = None,
url: Optional[str] = None,
_blocking: Optional[bool] = True,
):
controller_deploy_args = self.get_deploy_args(
name=name,
deployment_def=deployment_def,
init_args=init_args,
init_kwargs=init_kwargs,
ray_actor_options=ray_actor_options,
config=config,
version=version,
prev_version=prev_version,
route_prefix=route_prefix,
)
updating = ray.get(self._controller.deploy.remote(**controller_deploy_args))
tag = self.log_deployment_update_status(name, version, updating)
if _blocking:
self._wait_for_deployment_healthy(name)
self.log_deployment_ready(name, version, url, tag)
@_ensure_connected
def deploy_group(
self,
deployments: List[Dict],
_blocking: bool = True,
remove_past_deployments: bool = True,
):
deployment_args_list = []
for deployment in deployments:
deployment_args_list.append(
self.get_deploy_args(
deployment["name"],
deployment["func_or_class"],
deployment["init_args"],
deployment["init_kwargs"],
ray_actor_options=deployment["ray_actor_options"],
config=deployment["config"],
version=deployment["version"],
prev_version=deployment["prev_version"],
route_prefix=deployment["route_prefix"],
)
)
updating_list = ray.get(
self._controller.deploy_group.remote(deployment_args_list)
)
tags = []
for i, updating in enumerate(updating_list):
deployment = deployments[i]
name, version = deployment["name"], deployment["version"]
tags.append(self.log_deployment_update_status(name, version, updating))
for i, deployment in enumerate(deployments):
name = deployment["name"]
url = deployment["url"]
if _blocking:
self._wait_for_deployment_healthy(name)
self.log_deployment_ready(name, version, url, tags[i])
if remove_past_deployments:
# clean up the old deployments
new_deployments_names = set()
for deployment in deployments:
new_deployments_names.add(deployment["name"])
all_deployments_names = set(self.list_deployments().keys())
deployment_names_to_delete = all_deployments_names.difference(
new_deployments_names
)
self.delete_deployments(deployment_names_to_delete, blocking=_blocking)
@_ensure_connected
def deploy_app(self, config: ServeApplicationSchema) -> None:
ray.get(self._controller.deploy_app.remote(config))
@_ensure_connected
def delete_deployments(self, names: Iterable[str], blocking: bool = True) -> None:
ray.get(self._controller.delete_deployments.remote(names))
if blocking:
for name in names:
self._wait_for_deployment_deleted(name)
@_ensure_connected
def get_deployment_info(self, name: str) -> Tuple[DeploymentInfo, str]:
deployment_route = DeploymentRoute.FromString(
ray.get(self._controller.get_deployment_info.remote(name))
)
return (
DeploymentInfo.from_proto(deployment_route.deployment_info),
deployment_route.route if deployment_route.route != "" else None,
)
@_ensure_connected
def list_deployments(self) -> Dict[str, Tuple[DeploymentInfo, str]]:
deployment_route_list = DeploymentRouteList.FromString(
ray.get(self._controller.list_deployments.remote())
)
return {
deployment_route.deployment_info.name: (
DeploymentInfo.from_proto(deployment_route.deployment_info),
deployment_route.route if deployment_route.route != "" else None,
)
for deployment_route in deployment_route_list.deployment_routes
}
@_ensure_connected
def get_app_config(self) -> Dict:
"""Returns the most recently requested Serve config."""
return ray.get(self._controller.get_app_config.remote())
@_ensure_connected
def get_serve_status(self) -> StatusOverview:
proto = StatusOverviewProto.FromString(
ray.get(self._controller.get_serve_status.remote())
)
return StatusOverview.from_proto(proto)
@_ensure_connected
def get_handle(
self,
deployment_name: str,
missing_ok: Optional[bool] = False,
sync: bool = True,
_internal_pickled_http_request: bool = False,
) -> Union[RayServeHandle, RayServeSyncHandle]:
"""Retrieve RayServeHandle for service deployment to invoke it from Python.
Args:
deployment_name: A registered service deployment.
missing_ok: If true, then Serve won't check the deployment
is registered. False by default.
sync: If true, then Serve will return a ServeHandle that
works everywhere. Otherwise, Serve will return a ServeHandle
that's only usable in asyncio loop.
Returns:
RayServeHandle
"""
cache_key = (deployment_name, missing_ok, sync)
if cache_key in self.handle_cache:
cached_handle = self.handle_cache[cache_key]
if cached_handle.is_polling and cached_handle.is_same_loop:
return cached_handle
all_endpoints = ray.get(self._controller.get_all_endpoints.remote())
if not missing_ok and deployment_name not in all_endpoints:
raise KeyError(f"Deployment '{deployment_name}' does not exist.")
try:
asyncio_loop_running = asyncio.get_event_loop().is_running()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
asyncio_loop_running = False
else:
raise ex
if asyncio_loop_running and sync and _WARN_SYNC_ASYNC_HANDLE_CONTEXT:
logger.warning(
"You are retrieving a sync handle inside an asyncio loop. "
"Try getting Deployment.get_handle(.., sync=False) to get better "
"performance. Learn more at https://docs.ray.io/en/latest/serve/"
"handle-guide.html#sync-and-async-handles"
)
if not asyncio_loop_running and not sync and _WARN_SYNC_ASYNC_HANDLE_CONTEXT:
logger.warning(
"You are retrieving an async handle outside an asyncio loop. "
"You should make sure Deployment.get_handle is called inside a "
"running event loop. Or call Deployment.get_handle(.., sync=True) "
"to create sync handle. Learn more at https://docs.ray.io/en/latest/"
"serve/handle-guide.html#sync-and-async-handles"
)
if sync:
handle = RayServeSyncHandle(
self._controller,
deployment_name,
_internal_pickled_http_request=_internal_pickled_http_request,
)
else:
handle = RayServeHandle(
self._controller,
deployment_name,
_internal_pickled_http_request=_internal_pickled_http_request,
)
self.handle_cache[cache_key] = handle
if cache_key in self._evicted_handle_keys:
logger.warning(
"You just got a ServeHandle that was evicted from internal "
"cache. This means you are getting too many ServeHandles in "
"the same process, this will bring down Serve's performance. "
"Please post a github issue at "
"https://github.com/ray-project/ray/issues to let the Serve "
"team to find workaround for your use case."
)
if len(self.handle_cache) > MAX_CACHED_HANDLES:
# Perform random eviction to keep the handle cache from growing
# infinitely. We used use WeakValueDictionary but hit
# https://github.com/ray-project/ray/issues/18980.
evict_key = random.choice(list(self.handle_cache.keys()))
self._evicted_handle_keys.add(evict_key)
self.handle_cache.pop(evict_key)
return handle
@_ensure_connected
def get_deploy_args(
self,
name: str,
deployment_def: Union[Callable, Type[Callable], str],
init_args: Tuple[Any],
init_kwargs: Dict[Any, Any],
ray_actor_options: Optional[Dict] = None,
config: Optional[Union[DeploymentConfig, Dict[str, Any]]] = None,
version: Optional[str] = None,
prev_version: Optional[str] = None,
route_prefix: Optional[str] = None,
) -> Dict:
"""
Takes a deployment's configuration, and returns the arguments needed
for the controller to deploy it.
"""
if config is None:
config = {}
if ray_actor_options is None:
ray_actor_options = {}
curr_job_env = ray.get_runtime_context().runtime_env
if "runtime_env" in ray_actor_options:
# It is illegal to set field working_dir to None.
if curr_job_env.get("working_dir") is not None:
ray_actor_options["runtime_env"].setdefault(
"working_dir", curr_job_env.get("working_dir")
)
else:
ray_actor_options["runtime_env"] = curr_job_env
replica_config = ReplicaConfig.create(
deployment_def,
init_args=init_args,
init_kwargs=init_kwargs,
ray_actor_options=ray_actor_options,
)
if isinstance(config, dict):
deployment_config = DeploymentConfig.parse_obj(config)
elif isinstance(config, DeploymentConfig):
deployment_config = config
else:
raise TypeError("config must be a DeploymentConfig or a dictionary.")
deployment_config.version = version
deployment_config.prev_version = prev_version
if (
deployment_config.autoscaling_config is not None
and deployment_config.max_concurrent_queries
< deployment_config.autoscaling_config.target_num_ongoing_requests_per_replica # noqa: E501
):
logger.warning(
"Autoscaling will never happen, "
"because 'max_concurrent_queries' is less than "
"'target_num_ongoing_requests_per_replica' now."
)
controller_deploy_args = {
"name": name,
"deployment_config_proto_bytes": deployment_config.to_proto_bytes(),
"replica_config_proto_bytes": replica_config.to_proto_bytes(),
"route_prefix": route_prefix,
"deployer_job_id": ray.get_runtime_context().job_id,
}
return controller_deploy_args
@_ensure_connected
def log_deployment_update_status(
self, name: str, version: str, updating: bool
) -> str:
tag = f"component=serve deployment={name}"
if updating:
msg = f"Updating deployment '{name}'"
if version is not None:
msg += f" to version '{version}'"
logger.info(f"{msg}. {tag}")
else:
logger.info(
f"Deployment '{name}' is already at version "
f"'{version}', not updating. {tag}"
)
return tag
@_ensure_connected
def log_deployment_ready(self, name: str, version: str, url: str, tag: str) -> None:
if url is not None:
url_part = f" at `{url}`"
else:
url_part = ""
logger.info(
f"Deployment '{name}{':'+version if version else ''}' is ready"
f"{url_part}. {tag}"
)
```
#### File: ray/serve/config.py
```python
import inspect
import json
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import pydantic
from google.protobuf.json_format import MessageToDict
from pydantic import (
BaseModel,
NonNegativeFloat,
PositiveFloat,
NonNegativeInt,
PositiveInt,
validator,
)
from ray import cloudpickle
from ray.serve.constants import (
DEFAULT_GRACEFUL_SHUTDOWN_TIMEOUT_S,
DEFAULT_GRACEFUL_SHUTDOWN_WAIT_LOOP_S,
DEFAULT_HEALTH_CHECK_PERIOD_S,
DEFAULT_HEALTH_CHECK_TIMEOUT_S,
DEFAULT_HTTP_HOST,
DEFAULT_HTTP_PORT,
)
from ray.serve.generated.serve_pb2 import (
DeploymentConfig as DeploymentConfigProto,
DeploymentLanguage,
AutoscalingConfig as AutoscalingConfigProto,
ReplicaConfig as ReplicaConfigProto,
)
from ray._private import ray_option_utils
from ray._private.utils import resources_from_ray_options
class AutoscalingConfig(BaseModel):
# Please keep these options in sync with those in
# `src/ray/protobuf/serve.proto`.
# Publicly exposed options
min_replicas: NonNegativeInt = 1
max_replicas: PositiveInt = 1
target_num_ongoing_requests_per_replica: NonNegativeInt = 1
# Private options below.
# Metrics scraping options
# How often to scrape for metrics
metrics_interval_s: PositiveFloat = 10.0
# Time window to average over for metrics.
look_back_period_s: PositiveFloat = 30.0
# Internal autoscaling configuration options
# Multiplicative "gain" factor to limit scaling decisions
smoothing_factor: PositiveFloat = 1.0
# How frequently to make autoscaling decisions
# loop_period_s: float = CONTROL_LOOP_PERIOD_S
# How long to wait before scaling down replicas
downscale_delay_s: NonNegativeFloat = 600.0
# How long to wait before scaling up replicas
upscale_delay_s: NonNegativeFloat = 30.0
@validator("max_replicas")
def max_replicas_greater_than_or_equal_to_min_replicas(cls, v, values):
if "min_replicas" in values and v < values["min_replicas"]:
raise ValueError(
f"""max_replicas ({v}) must be greater than """
f"""or equal to min_replicas """
f"""({values["min_replicas"]})!"""
)
return v
# TODO(architkulkarni): implement below
# The number of replicas to start with when creating the deployment
# initial_replicas: int = 1
# The num_ongoing_requests_per_replica error ratio (desired / current)
# threshold for overriding `upscale_delay_s`
# panic_mode_threshold: float = 2.0
# TODO(architkulkarni): Add reasonable defaults
class DeploymentConfig(BaseModel):
"""Configuration options for a deployment, to be set by the user.
Args:
num_replicas (Optional[int]): The number of processes to start up that
will handle requests to this deployment. Defaults to 1.
max_concurrent_queries (Optional[int]): The maximum number of queries
that will be sent to a replica of this deployment without receiving
a response. Defaults to 100.
user_config (Optional[Any]): Arguments to pass to the reconfigure
method of the deployment. The reconfigure method is called if
user_config is not None.
graceful_shutdown_wait_loop_s (Optional[float]): Duration
that deployment replicas will wait until there is no more work to
be done before shutting down.
graceful_shutdown_timeout_s (Optional[float]):
Controller waits for this duration to forcefully kill the replica
for shutdown.
health_check_period_s (Optional[float]):
Frequency at which the controller will health check replicas.
health_check_timeout_s (Optional[float]):
Timeout that the controller will wait for a response from the
replica's health check before marking it unhealthy.
"""
num_replicas: NonNegativeInt = 1
max_concurrent_queries: Optional[int] = None
user_config: Any = None
graceful_shutdown_timeout_s: NonNegativeFloat = (
DEFAULT_GRACEFUL_SHUTDOWN_TIMEOUT_S # noqa: E501
)
graceful_shutdown_wait_loop_s: NonNegativeFloat = (
DEFAULT_GRACEFUL_SHUTDOWN_WAIT_LOOP_S # noqa: E501
)
health_check_period_s: PositiveFloat = DEFAULT_HEALTH_CHECK_PERIOD_S
health_check_timeout_s: PositiveFloat = DEFAULT_HEALTH_CHECK_TIMEOUT_S
autoscaling_config: Optional[AutoscalingConfig] = None
# This flag is used to let replica know they are deplyed from
# a different language.
is_cross_language: bool = False
# This flag is used to let controller know which language does
# the deploymnent use.
deployment_language: Any = DeploymentLanguage.PYTHON
version: Optional[str] = None
prev_version: Optional[str] = None
class Config:
validate_assignment = True
extra = "forbid"
arbitrary_types_allowed = True
# Dynamic default for max_concurrent_queries
@validator("max_concurrent_queries", always=True)
def set_max_queries_by_mode(cls, v, values): # noqa 805
if v is None:
v = 100
else:
if v <= 0:
raise ValueError("max_concurrent_queries must be >= 0")
return v
def to_proto(self):
data = self.dict()
if data.get("user_config"):
data["user_config"] = cloudpickle.dumps(data["user_config"])
if data.get("autoscaling_config"):
data["autoscaling_config"] = AutoscalingConfigProto(
**data["autoscaling_config"]
)
return DeploymentConfigProto(**data)
def to_proto_bytes(self):
return self.to_proto().SerializeToString()
@classmethod
def from_proto(cls, proto: DeploymentConfigProto):
data = MessageToDict(
proto,
including_default_value_fields=True,
preserving_proto_field_name=True,
use_integers_for_enums=True,
)
if "user_config" in data:
if data["user_config"] != "":
data["user_config"] = cloudpickle.loads(proto.user_config)
else:
data["user_config"] = None
if "autoscaling_config" in data:
data["autoscaling_config"] = AutoscalingConfig(**data["autoscaling_config"])
if "prev_version" in data:
if data["prev_version"] == "":
data["prev_version"] = None
if "version" in data:
if data["version"] == "":
data["version"] = None
return cls(**data)
@classmethod
def from_proto_bytes(cls, proto_bytes: bytes):
proto = DeploymentConfigProto.FromString(proto_bytes)
return cls.from_proto(proto)
@classmethod
def from_default(cls, ignore_none: bool = False, **kwargs):
"""Creates a default DeploymentConfig and overrides it with kwargs.
Only accepts the same keywords as the class. Passing in any other
keyword raises a ValueError.
Args:
ignore_none: When True, any valid keywords with value None
are ignored, and their values stay default. Invalid keywords
still raise a TypeError.
Raises:
TypeError: when a keyword that's not an argument to the class is
passed in.
"""
config = cls()
valid_config_options = set(config.dict().keys())
# Friendly error if a non-DeploymentConfig kwarg was passed in
for key, val in kwargs.items():
if key not in valid_config_options:
raise TypeError(
f'Got invalid Deployment config option "{key}" '
f"(with value {val}) as keyword argument. All Deployment "
"config options must come from this list: "
f"{list(valid_config_options)}."
)
if ignore_none:
kwargs = {key: val for key, val in kwargs.items() if val is not None}
for key, val in kwargs.items():
config.__setattr__(key, val)
return config
class ReplicaConfig:
"""Configuration for a deployment's replicas.
Provides five main properties (see property docstrings for more info):
deployment_def: the code, or a reference to the code, that this
replica should run.
init_args: the deployment_def's init_args.
init_kwargs: the deployment_def's init_kwargs.
ray_actor_options: the Ray actor options to pass into the replica's
actor.
resource_dict: contains info on this replica's actor's resource needs.
Offers a serialized equivalent (e.g. serialized_deployment_def) for
deployment_def, init_args, and init_kwargs. Deserializes these properties
when they're first accessed, if they were not passed in directly through
create().
Use the classmethod create() to make a ReplicaConfig with the deserialized
properties.
Note: overwriting or setting any property after the ReplicaConfig has been
constructed is currently undefined behavior. The config's fields should not
be modified externally after it is created.
"""
def __init__(
self,
deployment_def_name: str,
serialized_deployment_def: bytes,
serialized_init_args: bytes,
serialized_init_kwargs: bytes,
ray_actor_options: Dict,
):
"""Construct a ReplicaConfig with serialized properties.
All parameters are required. See classmethod create() for defaults.
"""
self.deployment_def_name = deployment_def_name
# Store serialized versions of code properties.
self.serialized_deployment_def = serialized_deployment_def
self.serialized_init_args = serialized_init_args
self.serialized_init_kwargs = serialized_init_kwargs
# Deserialize properties when first accessed. See @property methods.
self._deployment_def = None
self._init_args = None
self._init_kwargs = None
# Configure ray_actor_options. These are the Ray options ultimately
# passed into the replica's actor when it's created.
self.ray_actor_options = ray_actor_options
self._validate_ray_actor_options()
# Create resource_dict. This contains info about the replica's resource
# needs. It does NOT set the replica's resource usage. That's done by
# the ray_actor_options.
self.resource_dict = resources_from_ray_options(self.ray_actor_options)
@classmethod
def create(
cls,
deployment_def: Union[Callable, str],
init_args: Optional[Tuple[Any]] = None,
init_kwargs: Optional[Dict[Any, Any]] = None,
ray_actor_options: Optional[Dict] = None,
deployment_def_name: Optional[str] = None,
):
"""Create a ReplicaConfig from deserialized parameters."""
if inspect.isfunction(deployment_def):
if init_args:
raise ValueError("init_args not supported for function deployments.")
elif init_kwargs:
raise ValueError("init_kwargs not supported for function deployments.")
if not isinstance(deployment_def, (Callable, str)):
raise TypeError(
f'Got invalid type "{type(deployment_def)}" for '
"deployment_def. Expected deployment_def to be a "
"class, function, or string."
)
# Set defaults
if init_args is None:
init_args = ()
if init_kwargs is None:
init_kwargs = {}
if ray_actor_options is None:
ray_actor_options = {}
if deployment_def_name is None:
if isinstance(deployment_def, str):
deployment_def_name = deployment_def
else:
deployment_def_name = deployment_def.__name__
config = cls(
deployment_def_name,
cloudpickle.dumps(deployment_def),
cloudpickle.dumps(init_args),
cloudpickle.dumps(init_kwargs),
ray_actor_options,
)
config._deployment_def = deployment_def
config._init_args = init_args
config._init_kwargs = init_kwargs
return config
def _validate_ray_actor_options(self) -> None:
if not isinstance(self.ray_actor_options, dict):
raise TypeError(
f'Got invalid type "{type(self.ray_actor_options)}" for '
"ray_actor_options. Expected a dictionary."
)
allowed_ray_actor_options = {
# Resource options
"accelerator_type",
"memory",
"num_cpus",
"num_gpus",
"object_store_memory",
"resources",
# Other options
"runtime_env",
}
for option in self.ray_actor_options:
if option not in allowed_ray_actor_options:
raise ValueError(
f"Specifying '{option}' in ray_actor_options is not allowed. "
f"Allowed options: {allowed_ray_actor_options}"
)
ray_option_utils.validate_actor_options(self.ray_actor_options, in_options=True)
# Set Serve replica defaults
if self.ray_actor_options.get("num_cpus") is None:
self.ray_actor_options["num_cpus"] = 1
@property
def deployment_def(self) -> Union[Callable, str]:
"""The code, or a reference to the code, that this replica runs.
For Python replicas, this can be one of the following:
- Function (Callable)
- Class (Callable)
- Import path (str)
For Java replicas, this can be one of the following:
- Class path (str)
"""
if self._deployment_def is None:
self._deployment_def = cloudpickle.loads(self.serialized_deployment_def)
return self._deployment_def
@property
def init_args(self) -> Optional[Tuple[Any]]:
"""The init_args for a Python class.
This property is only meaningful if deployment_def is a Python class.
Otherwise, it is None.
"""
if self._init_args is None:
self._init_args = cloudpickle.loads(self.serialized_init_args)
return self._init_args
@property
def init_kwargs(self) -> Optional[Tuple[Any]]:
"""The init_kwargs for a Python class.
This property is only meaningful if deployment_def is a Python class.
Otherwise, it is None.
"""
if self._init_kwargs is None:
self._init_kwargs = cloudpickle.loads(self.serialized_init_kwargs)
return self._init_kwargs
@classmethod
def from_proto(
cls, proto: ReplicaConfigProto, deployment_language: DeploymentLanguage
):
if deployment_language == DeploymentLanguage.PYTHON:
deployment_def = proto.deployment_def
else:
# TODO use messagepack
deployment_def = proto.deployment_def
return ReplicaConfig(
proto.deployment_def_name,
deployment_def,
proto.init_args,
proto.init_kwargs,
json.loads(proto.ray_actor_options),
)
@classmethod
def from_proto_bytes(
cls, proto_bytes: bytes, deployment_language: DeploymentLanguage
):
proto = ReplicaConfigProto.FromString(proto_bytes)
return cls.from_proto(proto, deployment_language)
def to_proto(self):
return ReplicaConfigProto(
deployment_def_name=self.deployment_def_name,
deployment_def=self.serialized_deployment_def,
init_args=self.serialized_init_args,
init_kwargs=self.serialized_init_kwargs,
ray_actor_options=json.dumps(self.ray_actor_options),
)
def to_proto_bytes(self):
return self.to_proto().SerializeToString()
class DeploymentMode(str, Enum):
NoServer = "NoServer"
HeadOnly = "HeadOnly"
EveryNode = "EveryNode"
FixedNumber = "FixedNumber"
class HTTPOptions(pydantic.BaseModel):
# Documentation inside serve.start for user's convenience.
host: Optional[str] = DEFAULT_HTTP_HOST
port: int = DEFAULT_HTTP_PORT
middlewares: List[Any] = []
location: Optional[DeploymentMode] = DeploymentMode.HeadOnly
num_cpus: int = 0
root_url: str = ""
root_path: str = ""
fixed_number_replicas: Optional[int] = None
fixed_number_selection_seed: int = 0
@validator("location", always=True)
def location_backfill_no_server(cls, v, values):
if values["host"] is None or v is None:
return DeploymentMode.NoServer
return v
@validator("fixed_number_replicas", always=True)
def fixed_number_replicas_should_exist(cls, v, values):
if values["location"] == DeploymentMode.FixedNumber and v is None:
raise ValueError(
"When location='FixedNumber', you must specify "
"the `fixed_number_replicas` parameter."
)
return v
class Config:
validate_assignment = True
extra = "forbid"
arbitrary_types_allowed = True
```
#### File: ray/serve/deployment_function_node.py
```python
import inspect
from typing import Any, Callable, Dict, List, Union
from ray.dag.dag_node import DAGNode
from ray.dag.format_utils import get_dag_node_str
from ray.serve.deployment import Deployment, schema_to_deployment
from ray.serve.config import DeploymentConfig
from ray.serve.handle import RayServeLazySyncHandle
from ray.serve.schema import DeploymentSchema
class DeploymentFunctionNode(DAGNode):
"""Represents a function node decorated by @serve.deployment in a serve DAG."""
def __init__(
self,
func_body: Union[Callable, str],
deployment_name,
func_args,
func_kwargs,
func_options,
other_args_to_resolve=None,
):
self._body = func_body
self._deployment_name = deployment_name
super().__init__(
func_args,
func_kwargs,
func_options,
other_args_to_resolve=other_args_to_resolve,
)
if "deployment_schema" in self._bound_other_args_to_resolve:
deployment_schema: DeploymentSchema = self._bound_other_args_to_resolve[
"deployment_schema"
]
deployment_shell = schema_to_deployment(deployment_schema)
# Prefer user specified name to override the generated one.
if (
inspect.isfunction(func_body)
and deployment_shell.name != func_body.__name__
):
self._deployment_name = deployment_shell.name
# Set the route prefix, prefer the one user supplied,
# otherwise set it to /deployment_name
if (
deployment_shell.route_prefix is None
or deployment_shell.route_prefix != f"/{deployment_shell.name}"
):
route_prefix = deployment_shell.route_prefix
else:
route_prefix = f"/{deployment_name}"
self._deployment = deployment_shell.options(
func_or_class=func_body,
name=self._deployment_name,
init_args=(),
init_kwargs={},
route_prefix=route_prefix,
)
else:
self._deployment: Deployment = Deployment(
func_body,
deployment_name,
DeploymentConfig(),
init_args=tuple(),
init_kwargs=dict(),
ray_actor_options=func_options,
_internal=True,
)
# TODO (jiaodong): Polish with async handle support later
self._deployment_handle = RayServeLazySyncHandle(self._deployment.name)
def _copy_impl(
self,
new_args: List[Any],
new_kwargs: Dict[str, Any],
new_options: Dict[str, Any],
new_other_args_to_resolve: Dict[str, Any],
):
return DeploymentFunctionNode(
self._body,
self._deployment_name,
new_args,
new_kwargs,
new_options,
other_args_to_resolve=new_other_args_to_resolve,
)
def __str__(self) -> str:
return get_dag_node_str(self, str(self._body))
def get_deployment_name(self):
return self._deployment_name
```
#### File: tests/gcp/test_gcp_node_provider.py
```python
from typing import Dict
from threading import RLock
import pytest
from unittest.mock import MagicMock, patch
from ray.autoscaler._private.gcp.node import (
GCPCompute,
GCPNode,
GCPNodeType,
GCPResource,
)
from python.ray.autoscaler._private.gcp.node_provider import GCPNodeProvider
_PROJECT_NAME = "project-one"
_AZ = "us-west1-b"
def test_create_node_returns_dict():
mock_node_config = {"machineType": "n2-standard-8"}
mock_results = [({"dict": 1}, "instance_id1"), ({"dict": 2}, "instance_id2")]
mock_resource = MagicMock()
mock_resource.create_instances.return_value = mock_results
expected_return_value = {"instance_id1": {"dict": 1}, "instance_id2": {"dict": 2}}
def __init__(self, provider_config: dict, cluster_name: str):
self.lock = RLock()
self.cached_nodes: Dict[str, GCPNode] = {}
self.resources: Dict[GCPNodeType, GCPResource] = {}
self.resources[GCPNodeType.COMPUTE] = mock_resource
with patch.object(GCPNodeProvider, "__init__", __init__):
node_provider = GCPNodeProvider({}, "")
create_node_return_value = node_provider.create_node(mock_node_config, {}, 1)
assert create_node_return_value == expected_return_value
@pytest.mark.parametrize(
"test_case",
[
("n1-standard-4", f"zones/{_AZ}/machineTypes/n1-standard-4"),
(
f"zones/{_AZ}/machineTypes/n1-standard-4",
f"zones/{_AZ}/machineTypes/n1-standard-4",
),
],
)
def test_convert_resources_to_urls_machine(test_case):
gcp_compute = GCPCompute(None, _PROJECT_NAME, _AZ, "cluster_name")
base_machine, result_machine = test_case
modified_config = gcp_compute._convert_resources_to_urls(
{"machineType": base_machine}
)
assert modified_config["machineType"] == result_machine
@pytest.mark.parametrize(
"test_case",
[
(
"nvidia-tesla-k80",
f"projects/{_PROJECT_NAME}/zones/{_AZ}/acceleratorTypes/nvidia-tesla-k80",
),
(
f"projects/{_PROJECT_NAME}/zones/{_AZ}/acceleratorTypes/nvidia-tesla-k80",
f"projects/{_PROJECT_NAME}/zones/{_AZ}/acceleratorTypes/nvidia-tesla-k80",
),
],
)
def test_convert_resources_to_urls_accelerators(test_case):
gcp_compute = GCPCompute(None, _PROJECT_NAME, _AZ, "cluster_name")
base_accel, result_accel = test_case
base_config = {
"machineType": "n1-standard-4",
"guestAccelerators": [{"acceleratorCount": 1, "acceleratorType": base_accel}],
}
modified_config = gcp_compute._convert_resources_to_urls(base_config)
assert modified_config["guestAccelerators"][0]["acceleratorType"] == result_accel
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
```
#### File: ray/tests/test_actor_out_of_order.py
```python
import sys
import ray
import ray.cluster_utils
from ray._private.test_utils import SignalActor
def test_threaded_actor_execute_out_of_order(shutdown_only):
ray.init()
@ray.remote
class A:
def echo(self, inp):
print(inp)
return inp
actor = SignalActor.remote()
inp_ref_1 = actor.wait.remote()
inp_ref_2 = ray.put(2)
a = A.options(max_concurrency=2).remote()
a.echo.remote(inp_ref_1)
out_ref_2 = a.echo.remote(inp_ref_2)
assert ray.get(out_ref_2, timeout=5) == 2
def test_async_actor_execute_out_of_order(shutdown_only):
ray.init()
@ray.remote
class A:
async def echo(self, inp):
print(inp)
return inp
actor = SignalActor.remote()
inp_ref_1 = actor.wait.remote()
inp_ref_2 = ray.put(2)
a = A.options(max_concurrency=2).remote()
a.echo.remote(inp_ref_1)
out_ref_2 = a.echo.remote(inp_ref_2)
assert ray.get(out_ref_2, timeout=5) == 2
if __name__ == "__main__":
import os
import pytest
# Test suite is timing out. Disable on windows for now.
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
```
#### File: ray/tests/test_autoscaler_fake_scaledown.py
```python
import pytest
import platform
import numpy as np
import re
import ray
from ray._private.test_utils import wait_for_condition
from ray.cluster_utils import AutoscalingCluster
# Triggers the addition of a worker node.
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
self.data = []
def f(self):
pass
def recv(self, obj):
pass
def create(self, size):
return np.zeros(size)
# Tests that we scale down even if secondary copies of objects are present on
# idle nodes: https://github.com/ray-project/ray/issues/21870
@pytest.mark.skipif(platform.system() == "Windows", reason="Failing on Windows.")
def test_scaledown_shared_objects(shutdown_only):
cluster = AutoscalingCluster(
head_resources={"CPU": 0},
worker_node_types={
"cpu_node": {
"resources": {
"CPU": 1,
"object_store_memory": 100 * 1024 * 1024,
},
"node_config": {},
"min_workers": 0,
"max_workers": 5,
},
},
idle_timeout_minutes=0.05,
)
try:
cluster.start(_system_config={"scheduler_report_pinned_bytes_only": True})
ray.init("auto")
actors = [Actor.remote() for _ in range(5)]
ray.get([a.f.remote() for a in actors])
print("All five nodes launched")
# Verify scale-up.
wait_for_condition(lambda: ray.cluster_resources().get("CPU", 0) == 5)
data = actors[0].create.remote(1024 * 1024 * 5)
ray.get([a.recv.remote(data) for a in actors])
print("Data broadcast successfully, deleting actors.")
del actors
# Verify scale-down.
wait_for_condition(
lambda: ray.cluster_resources().get("CPU", 0) == 1, timeout=30
)
finally:
cluster.shutdown()
def check_memory(local_objs, num_spilled_objects=None, num_plasma_objects=None):
def ok():
s = ray.internal.internal_api.memory_summary()
print(f"\n\nMemory Summary:\n{s}\n")
actual_objs = re.findall(r"LOCAL_REFERENCE[\s|\|]+([0-9a-f]+)", s)
if sorted(actual_objs) != sorted(local_objs):
raise RuntimeError(
f"Expect local objects={local_objs}, actual={actual_objs}"
)
if num_spilled_objects is not None:
m = re.search(r"Spilled (\d+) MiB, (\d+) objects", s)
if m is not None:
actual_spilled_objects = int(m.group(2))
if actual_spilled_objects < num_spilled_objects:
raise RuntimeError(
f"Expected spilled objects={num_spilled_objects} "
f"greater than actual={actual_spilled_objects}"
)
if num_plasma_objects is not None:
m = re.search(r"Plasma memory usage (\d+) MiB, (\d+) objects", s)
if m is None:
raise RuntimeError(
"Memory summary does not contain Plasma memory objects count"
)
actual_plasma_objects = int(m.group(2))
if actual_plasma_objects != num_plasma_objects:
raise RuntimeError(
f"Expected plasma objects={num_plasma_objects} not equal "
f"to actual={actual_plasma_objects}"
)
return True
wait_for_condition(ok, timeout=30, retry_interval_ms=5000)
# Tests that node with live spilled object does not get scaled down.
@pytest.mark.skipif(platform.system() == "Windows", reason="Failing on Windows.")
def test_no_scaledown_with_spilled_objects(shutdown_only):
cluster = AutoscalingCluster(
head_resources={"CPU": 0},
worker_node_types={
"cpu_node": {
"resources": {
"CPU": 1,
"object_store_memory": 75 * 1024 * 1024,
},
"node_config": {},
"min_workers": 0,
"max_workers": 2,
},
},
idle_timeout_minutes=0.05,
)
try:
cluster.start(
_system_config={
"scheduler_report_pinned_bytes_only": True,
"min_spilling_size": 0,
}
)
ray.init("auto")
actors = [Actor.remote() for _ in range(2)]
ray.get([a.f.remote() for a in actors])
# Verify scale-up.
wait_for_condition(lambda: ray.cluster_resources().get("CPU", 0) == 2)
print("All nodes launched")
# Put 10 x 80MiB objects into the object store with 75MiB memory limit.
obj_size = 10 * 1024 * 1024
objs = []
for i in range(10):
obj = actors[0].create.remote(obj_size)
ray.get(actors[1].recv.remote(obj))
objs.append(obj)
print(f"obj {i}={obj.hex()}")
del obj
# At least 9 out of the 10 objects should have spilled.
check_memory([obj.hex() for obj in objs], num_spilled_objects=9)
print("Objects spilled, deleting actors and object references.")
# Assume the 1st object always gets spilled.
spilled_obj = objs[0]
del objs
del actors
# Verify scale-down to 1 node.
def scaledown_to_one():
cpu = ray.cluster_resources().get("CPU", 0)
assert cpu > 0, "Scale-down should keep at least 1 node"
return cpu == 1
wait_for_condition(scaledown_to_one, timeout=30)
# Verify the spilled object still exists, and there is no object in the
# plasma store.
check_memory([spilled_obj.hex()], num_plasma_objects=0)
# Delete the spilled object, the remaining worker node should be scaled
# down.
del spilled_obj
wait_for_condition(lambda: ray.cluster_resources().get("CPU", 0) == 0)
check_memory([], num_plasma_objects=0)
finally:
cluster.shutdown()
if __name__ == "__main__":
import os
import sys
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
```
#### File: ray/tests/test_list_actors.py
```python
import pytest
import sys
import ray
from ray._private.test_utils import wait_for_condition
def test_list_named_actors_basic(ray_start_regular):
@ray.remote
class A:
pass
a = A.remote()
assert not ray.util.list_named_actors()
a = A.options(name="hi").remote()
assert len(ray.util.list_named_actors()) == 1
assert "hi" in ray.util.list_named_actors()
b = A.options(name="hi2").remote()
assert len(ray.util.list_named_actors()) == 2
assert "hi" in ray.util.list_named_actors()
assert "hi2" in ray.util.list_named_actors()
def one_actor():
actors = ray.util.list_named_actors()
return actors == ["hi2"]
del a
wait_for_condition(one_actor)
del b
wait_for_condition(lambda: not ray.util.list_named_actors())
@pytest.mark.parametrize("ray_start_regular", [{"local_mode": True}], indirect=True)
def test_list_named_actors_basic_local_mode(ray_start_regular):
@ray.remote
class A:
pass
a = A.remote()
assert not ray.util.list_named_actors()
a = A.options(name="hi").remote() # noqa: F841
assert len(ray.util.list_named_actors()) == 1
assert "hi" in ray.util.list_named_actors()
b = A.options(name="hi2").remote() # noqa: F841
assert len(ray.util.list_named_actors()) == 2
assert "hi" in ray.util.list_named_actors()
assert "hi2" in ray.util.list_named_actors()
if __name__ == "__main__":
import os
# Test suite is timing out. Disable on windows for now.
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
```
#### File: ray/tests/test_logging.py
```python
import os
import re
import sys
import time
from unittest.mock import MagicMock
from collections import defaultdict, Counter
from pathlib import Path
import subprocess
import tempfile
import pytest
import ray
from ray.cross_language import java_actor_class
from ray import ray_constants
from ray._private.test_utils import (
get_log_batch,
wait_for_condition,
init_log_pubsub,
get_log_message,
run_string_as_driver,
)
from ray._private.log_monitor import (
LogMonitor,
LOG_NAME_UPDATE_INTERVAL_S,
RAY_LOG_MONITOR_MANY_FILES_THRESHOLD,
)
def set_logging_config(monkeypatch, max_bytes, backup_count):
monkeypatch.setenv("RAY_ROTATION_MAX_BYTES", str(max_bytes))
monkeypatch.setenv("RAY_ROTATION_BACKUP_COUNT", str(backup_count))
def test_log_rotation_config(ray_start_cluster, monkeypatch):
cluster = ray_start_cluster
max_bytes = 100
backup_count = 3
# Create a cluster.
set_logging_config(monkeypatch, max_bytes, backup_count)
head_node = cluster.add_node(num_cpus=0)
# Set a different env var for a worker node.
set_logging_config(monkeypatch, 0, 0)
worker_node = cluster.add_node(num_cpus=0)
cluster.wait_for_nodes()
config = head_node.logging_config
assert config["log_rotation_max_bytes"] == max_bytes
assert config["log_rotation_backup_count"] == backup_count
config = worker_node.logging_config
assert config["log_rotation_max_bytes"] == 0
assert config["log_rotation_backup_count"] == 0
def test_log_rotation(shutdown_only, monkeypatch):
max_bytes = 1
backup_count = 3
set_logging_config(monkeypatch, max_bytes, backup_count)
ray.init(num_cpus=1)
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
log_dir_path = session_path / "logs"
log_rotating_component = [
ray_constants.PROCESS_TYPE_DASHBOARD,
ray_constants.PROCESS_TYPE_DASHBOARD_AGENT,
ray_constants.PROCESS_TYPE_LOG_MONITOR,
ray_constants.PROCESS_TYPE_MONITOR,
ray_constants.PROCESS_TYPE_PYTHON_CORE_WORKER_DRIVER,
ray_constants.PROCESS_TYPE_PYTHON_CORE_WORKER,
# Below components are not log rotating now.
# ray_constants.PROCESS_TYPE_RAYLET,
# ray_constants.PROCESS_TYPE_GCS_SERVER,
# ray_constants.PROCESS_TYPE_WORKER,
]
# Run the basic workload.
@ray.remote
def f():
for i in range(10):
print(f"test {i}")
# Create a runtime env to make sure dashboard agent is alive.
ray.get(f.options(runtime_env={"env_vars": {"A": "a", "B": "b"}}).remote())
paths = list(log_dir_path.iterdir())
def component_exist(component, paths):
for path in paths:
filename = path.stem
if component in filename:
return True
return False
def component_file_only_one_log_entry(component):
"""Since max_bytes is 1, the log file should
only have at most one log entry.
"""
for path in paths:
if not component_exist(component, [path]):
continue
with open(path) as file:
found = False
for line in file:
if re.match(r"^\[?\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d", line):
if found:
return False
found = True
return True
for component in log_rotating_component:
assert component_exist(component, paths), paths
assert component_file_only_one_log_entry(component)
# Check if the backup count is respected.
file_cnts = defaultdict(int)
for path in paths:
filename = path.name
parts = filename.split(".")
if len(parts) == 3:
filename_without_suffix = parts[0]
file_cnts[filename_without_suffix] += 1
for filename, file_cnt in file_cnts.items():
assert file_cnt <= backup_count, (
f"{filename} has files that are more than "
f"backup count {backup_count}, file count: {file_cnt}"
)
def test_periodic_event_stats(shutdown_only):
ray.init(
num_cpus=1,
_system_config={"event_stats_print_interval_ms": 100, "event_stats": True},
)
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
log_dir_path = session_path / "logs"
# Run the basic workload.
@ray.remote
def f():
pass
ray.get(f.remote())
paths = list(log_dir_path.iterdir())
def is_event_loop_stats_found(path):
found = False
with open(path) as f:
event_loop_stats_identifier = "Event stats"
for line in f.readlines():
if event_loop_stats_identifier in line:
found = True
return found
for path in paths:
# Need to remove suffix to avoid reading log rotated files.
if "python-core-driver" in str(path):
wait_for_condition(lambda: is_event_loop_stats_found(path))
if "raylet.out" in str(path):
wait_for_condition(lambda: is_event_loop_stats_found(path))
if "gcs_server.out" in str(path):
wait_for_condition(lambda: is_event_loop_stats_found(path))
def test_worker_id_names(shutdown_only):
ray.init(
num_cpus=1,
_system_config={"event_stats_print_interval_ms": 100, "event_stats": True},
)
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
log_dir_path = session_path / "logs"
# Run the basic workload.
@ray.remote
def f():
print("hello")
ray.get(f.remote())
paths = list(log_dir_path.iterdir())
ids = []
for path in paths:
if "python-core-worker" in str(path):
pattern = ".*-([a-f0-9]*).*"
elif "worker" in str(path):
pattern = ".*worker-([a-f0-9]*)-.*-.*"
else:
continue
worker_id = re.match(pattern, str(path)).group(1)
ids.append(worker_id)
counts = Counter(ids).values()
for count in counts:
# There should be a "python-core-.*.log", "worker-.*.out",
# and "worker-.*.err"
assert count == 3
def test_log_pid_with_hex_job_id(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=4)
def submit_job():
# Connect a driver to the Ray cluster.
ray.init(address=cluster.address, ignore_reinit_error=True)
p = init_log_pubsub()
# It always prints the monitor messages.
logs = get_log_message(p, 1)
@ray.remote
def f():
print("remote func")
ray.get(f.remote())
def matcher(log_batch):
return log_batch["task_name"] == "f"
logs = get_log_batch(p, 1, matcher=matcher)
# It should logs with pid of hex job id instead of None
assert logs[0]["pid"] is not None
ray.shutdown()
# NOTE(xychu): loop ten times to make job id from 01000000 to 0a000000,
# in order to trigger hex pattern
for _ in range(10):
submit_job()
def test_ignore_windows_access_violation(ray_start_regular_shared):
@ray.remote
def print_msg():
print("Windows fatal exception: access violation\n")
@ray.remote
def print_after(_obj):
print("done")
p = init_log_pubsub()
print_after.remote(print_msg.remote())
msgs = get_log_message(
p, num=3, timeout=1, job_id=ray.get_runtime_context().job_id.hex()
)
assert len(msgs) == 1, msgs
assert msgs[0][0] == "done"
def test_log_redirect_to_stderr(shutdown_only, capfd):
log_components = {
ray_constants.PROCESS_TYPE_DASHBOARD: "Dashboard head grpc address",
ray_constants.PROCESS_TYPE_DASHBOARD_AGENT: "Dashboard agent grpc address",
ray_constants.PROCESS_TYPE_GCS_SERVER: "Loading job table data",
# No log monitor output if all components are writing to stderr.
ray_constants.PROCESS_TYPE_LOG_MONITOR: "",
ray_constants.PROCESS_TYPE_MONITOR: "Starting monitor using ray installation",
ray_constants.PROCESS_TYPE_PYTHON_CORE_WORKER: "worker server started",
ray_constants.PROCESS_TYPE_PYTHON_CORE_WORKER_DRIVER: "driver server started",
# TODO(Clark): Add coverage for Ray Client.
# ray_constants.PROCESS_TYPE_RAY_CLIENT_SERVER: "Starting Ray Client server",
ray_constants.PROCESS_TYPE_RAY_CLIENT_SERVER: "",
ray_constants.PROCESS_TYPE_RAYLET: "Starting object store with directory",
# No reaper process run (kernel fate-sharing).
ray_constants.PROCESS_TYPE_REAPER: "",
# No reporter process run.
ray_constants.PROCESS_TYPE_REPORTER: "",
# No web UI process run.
ray_constants.PROCESS_TYPE_WEB_UI: "",
# Unused.
ray_constants.PROCESS_TYPE_WORKER: "",
}
script = """
import os
from pathlib import Path
import ray
os.environ["RAY_LOG_TO_STDERR"] = "1"
ray.init()
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
log_dir_path = session_path / "logs"
# Run the basic workload.
@ray.remote
def f():
for i in range(10):
print(f"test {{i}}")
ray.get(f.remote())
log_component_names = {}
# Confirm that no log files are created for any of the components.
paths = list(path.stem for path in log_dir_path.iterdir())
assert set(log_component_names).isdisjoint(set(paths)), paths
""".format(
str(list(log_components.keys()))
)
stderr = run_string_as_driver(script)
# Make sure that the expected startup log records for each of the
# components appears in the stderr stream.
# stderr = capfd.readouterr().err
for component, canonical_record in log_components.items():
if not canonical_record:
# Process not run or doesn't generate logs; skip.
continue
assert canonical_record in stderr, stderr
if component == ray_constants.PROCESS_TYPE_REDIS_SERVER:
# Redis doesn't expose hooks for custom log formats, so we aren't able to
# inject the Redis server component name into the log records.
continue
# NOTE: We do a prefix match instead of including the enclosing right
# parentheses since some components, like the core driver and worker, add a
# unique ID suffix.
assert f"({component}" in stderr, stderr
def test_segfault_stack_trace(ray_start_cluster, capsys):
@ray.remote
def f():
import ctypes
ctypes.string_at(0)
with pytest.raises(
ray.exceptions.WorkerCrashedError, match="The worker died unexpectedly"
):
ray.get(f.remote())
stderr = capsys.readouterr().err
assert (
"*** SIGSEGV received at" in stderr
), f"C++ stack trace not found in stderr: {stderr}"
assert (
"Fatal Python error: Segmentation fault" in stderr
), f"Python stack trace not found in stderr: {stderr}"
@pytest.mark.skipif(
sys.platform == "win32" or sys.platform == "darwin",
reason="TODO(simon): Failing on Windows and OSX.",
)
def test_log_java_worker_logs(shutdown_only, capsys):
tmp_dir = tempfile.mkdtemp()
print("using tmp_dir", tmp_dir)
with open(os.path.join(tmp_dir, "MyClass.java"), "w") as f:
f.write(
"""
public class MyClass {
public int printToLog(String line) {
System.err.println(line);
return 0;
}
}
"""
)
subprocess.check_call(["javac", "MyClass.java"], cwd=tmp_dir)
subprocess.check_call(["jar", "-cf", "myJar.jar", "MyClass.class"], cwd=tmp_dir)
ray.init(
job_config=ray.job_config.JobConfig(code_search_path=[tmp_dir]),
)
handle = java_actor_class("MyClass").remote()
ray.get(handle.printToLog.remote("here's my random line!"))
def check():
out, err = capsys.readouterr()
out += err
with capsys.disabled():
print(out)
return "here's my random line!" in out
wait_for_condition(check)
"""
Unit testing log monitor.
"""
def create_file(dir, filename, content):
f = dir / filename
f.write_text(content)
@pytest.mark.skipif(
sys.platform == "win32",
reason="Failing on Windows",
)
def test_log_monitor(tmp_path):
log_dir = tmp_path / "logs"
log_dir.mkdir()
# Create an old dir.
(log_dir / "old").mkdir()
worker_id = "6df6d5dd8ca5215658e4a8f9a569a9d98e27094f9cc35a4ca43d272c"
job_id = "01000000"
dead_pid = "47660"
alive_pid = "12345"
def proc_alive(pid):
return pid != int(dead_pid)
mock_publisher = MagicMock()
log_monitor = LogMonitor(str(log_dir), mock_publisher, proc_alive, max_files_open=5)
# files
worker_out_log_file = f"worker-{worker_id}-{job_id}-{dead_pid}.out"
worker_err_log_file = f"worker-{worker_id}-{job_id}-{dead_pid}.err"
monitor = "monitor.log"
raylet_out = "raylet.out"
raylet_err = "raylet.err"
gcs_server_err = "gcs_server.1.err"
contents = "123"
create_file(log_dir, raylet_err, contents)
create_file(log_dir, raylet_out, contents)
create_file(log_dir, gcs_server_err, contents)
create_file(log_dir, monitor, contents)
create_file(log_dir, worker_out_log_file, contents)
create_file(log_dir, worker_err_log_file, contents)
"""
Test files are updated.
"""
log_monitor.update_log_filenames()
assert len(log_monitor.open_file_infos) == 0
assert len(log_monitor.closed_file_infos) == 5
assert log_monitor.can_open_more_files is True
assert len(log_monitor.log_filenames) == 5
def file_exists(log_filenames, filename):
for f in log_filenames:
if filename in f:
return True
return False
assert file_exists(log_monitor.log_filenames, raylet_err)
assert not file_exists(log_monitor.log_filenames, raylet_out)
assert file_exists(log_monitor.log_filenames, gcs_server_err)
assert file_exists(log_monitor.log_filenames, monitor)
assert file_exists(log_monitor.log_filenames, worker_out_log_file)
assert file_exists(log_monitor.log_filenames, worker_err_log_file)
def get_file_info(file_infos, filename):
for file_info in file_infos:
if filename in file_info.filename:
return file_info
assert False, "Shouldn't reach."
raylet_err_info = get_file_info(log_monitor.closed_file_infos, raylet_err)
gcs_server_err_info = get_file_info(log_monitor.closed_file_infos, gcs_server_err)
monitor_info = get_file_info(log_monitor.closed_file_infos, monitor)
worker_out_log_file_info = get_file_info(
log_monitor.closed_file_infos, worker_out_log_file
)
worker_err_log_file_info = get_file_info(
log_monitor.closed_file_infos, worker_err_log_file
)
assert raylet_err_info.is_err_file
assert gcs_server_err_info.is_err_file
assert not monitor_info.is_err_file
assert not worker_out_log_file_info.is_err_file
assert worker_err_log_file_info.is_err_file
assert worker_out_log_file_info.job_id == job_id
assert worker_err_log_file_info.job_id == job_id
assert worker_out_log_file_info.worker_pid == int(dead_pid)
assert worker_out_log_file_info.worker_pid == int(dead_pid)
"""
Test files are opened.
"""
log_monitor.open_closed_files()
assert len(log_monitor.open_file_infos) == 5
assert len(log_monitor.closed_file_infos) == 0
assert not log_monitor.can_open_more_files
"""
Test files are published.
"""
assert log_monitor.check_log_files_and_publish_updates()
assert raylet_err_info.worker_pid == "raylet"
assert gcs_server_err_info.worker_pid == "gcs_server"
assert monitor_info.worker_pid == "autoscaler"
assert mock_publisher.publish_logs.call_count
for file_info in log_monitor.open_file_infos:
mock_publisher.publish_logs.assert_any_call(
{
"ip": log_monitor.ip,
"pid": file_info.worker_pid,
"job": file_info.job_id,
"is_err": file_info.is_err_file,
"lines": [contents],
"actor_name": file_info.actor_name,
"task_name": file_info.task_name,
}
)
# If there's no new update, it should return False.
assert not log_monitor.check_log_files_and_publish_updates()
# Test max lines read == 99 is repsected.
lines = "1\n" * 150
with open(raylet_err_info.filename, "a") as f:
# Write 150 more lines.
f.write(lines)
assert log_monitor.check_log_files_and_publish_updates()
mock_publisher.publish_logs.assert_any_call(
{
"ip": log_monitor.ip,
"pid": raylet_err_info.worker_pid,
"job": raylet_err_info.job_id,
"is_err": raylet_err_info.is_err_file,
"lines": ["1" for _ in range(100)],
"actor_name": file_info.actor_name,
"task_name": file_info.task_name,
}
)
"""
Test files are closed.
"""
# log_monitor.open_closed_files() should close all files
# if it cannot open new files.
new_worker_err_file = f"worker-{worker_id}-{job_id}-{alive_pid}.err"
create_file(log_dir, new_worker_err_file, contents)
log_monitor.update_log_filenames()
# System logs are not closed.
# - raylet, gcs, monitor
# Dead workers are not tracked anymore. They will be moved to old folder.
# - dead pid out & err
# alive worker is going to be newly opened.
log_monitor.open_closed_files()
assert len(log_monitor.open_file_infos) == 4
assert log_monitor.can_open_more_files
# Two dead workers are not tracked anymore, and they will be in the old folder.
assert len(log_monitor.closed_file_infos) == 0
assert len(list((log_dir / "old").iterdir())) == 2
@pytest.mark.skipif(
sys.platform == "win32",
reason="Failing on Windows",
)
def test_log_monitor_actor_task_name(tmp_path):
log_dir = tmp_path / "logs"
log_dir.mkdir()
worker_id = "6df6d5dd8ca5215658e4a8f9a569a9d98e27094f9cc35a4ca43d272c"
job_id = "01000000"
pid = "47660"
mock_publisher = MagicMock()
log_monitor = LogMonitor(
str(log_dir), mock_publisher, lambda _: True, max_files_open=5
)
worker_out_log_file = f"worker-{worker_id}-{job_id}-{pid}.out"
first_line = "First line\n"
create_file(log_dir, worker_out_log_file, first_line)
log_monitor.update_log_filenames()
log_monitor.open_closed_files()
assert len(log_monitor.open_file_infos) == 1
file_info = log_monitor.open_file_infos[0]
# Test task name updated.
task_name = "task"
with open(file_info.filename, "a") as f:
# Write 150 more lines.
f.write(f"{ray_constants.LOG_PREFIX_TASK_NAME}{task_name}\n")
f.write("line")
log_monitor.check_log_files_and_publish_updates()
assert file_info.task_name == task_name
assert file_info.actor_name is None
mock_publisher.publish_logs.assert_any_call(
{
"ip": log_monitor.ip,
"pid": file_info.worker_pid,
"job": file_info.job_id,
"is_err": file_info.is_err_file,
"lines": ["line"],
"actor_name": None,
"task_name": task_name,
}
)
# Test the actor name is updated.
actor_name = "actor"
with open(file_info.filename, "a") as f:
# Write 150 more lines.
f.write(f"{ray_constants.LOG_PREFIX_ACTOR_NAME}{actor_name}\n")
f.write("line2")
log_monitor.check_log_files_and_publish_updates()
assert file_info.task_name is None
assert file_info.actor_name == actor_name
mock_publisher.publish_logs.assert_any_call(
{
"ip": log_monitor.ip,
"pid": file_info.worker_pid,
"job": file_info.job_id,
"is_err": file_info.is_err_file,
"lines": ["line2"],
"actor_name": actor_name,
"task_name": None,
}
)
@pytest.fixture
def mock_timer():
f = time.time
time.time = MagicMock()
yield time.time
time.time = f
@pytest.mark.skipif(
sys.platform == "win32",
reason="Failing on Windows",
)
def test_log_monitor_update_backpressure(tmp_path, mock_timer):
log_dir = tmp_path / "logs"
log_dir.mkdir()
mock_publisher = MagicMock()
log_monitor = LogMonitor(
str(log_dir), mock_publisher, lambda _: True, max_files_open=5
)
current = 0
mock_timer.return_value = current
log_monitor.log_filenames = []
# When threshold < RAY_LOG_MONITOR_MANY_FILES_THRESHOLD, update should happen.
assert log_monitor.should_update_filenames(current)
# Add a new file.
log_monitor.log_filenames = [
"raylet.out" for _ in range(RAY_LOG_MONITOR_MANY_FILES_THRESHOLD)
]
# If the threshold is met, we should update the file after
# LOG_NAME_UPDATE_INTERVAL_S.
assert not log_monitor.should_update_filenames(current)
mock_timer.return_value = LOG_NAME_UPDATE_INTERVAL_S - 0.1
assert not log_monitor.should_update_filenames(current)
mock_timer.return_value = LOG_NAME_UPDATE_INTERVAL_S
assert not log_monitor.should_update_filenames(current)
mock_timer.return_value = LOG_NAME_UPDATE_INTERVAL_S + 0.1
assert log_monitor.should_update_filenames(current)
if __name__ == "__main__":
import sys
# Make subprocess happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
```
#### File: ray/tests/test_runtime_env_working_dir_2.py
```python
import os
from pathlib import Path
import sys
import time
import tempfile
import pytest
from pytest_lazyfixture import lazy_fixture
from ray._private.test_utils import run_string_as_driver
import ray
from ray._private.test_utils import wait_for_condition, chdir, check_local_files_gced
from ray._private.runtime_env import RAY_WORKER_DEV_EXCLUDES
from ray._private.runtime_env.packaging import GCS_STORAGE_MAX_SIZE
from ray.exceptions import GetTimeoutError
# This test requires you have AWS credentials set up (any AWS credentials will
# do, this test only accesses a public bucket).
# This package contains a subdirectory called `test_module`.
# Calling `test_module.one()` should return `2`.
# If you find that confusing, take it up with @jiaodong...
S3_PACKAGE_URI = "s3://runtime-env-test/test_runtime_env.zip"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("option", ["working_dir", "py_modules"])
def test_inheritance(start_cluster, option: str):
"""Tests that child tasks/actors inherit URIs properly."""
cluster, address = start_cluster
with tempfile.TemporaryDirectory() as tmpdir, chdir(tmpdir):
with open("hello", "w") as f:
f.write("world")
if option == "working_dir":
ray.init(address, runtime_env={"working_dir": "."})
elif option == "py_modules":
ray.init(address, runtime_env={"py_modules": ["."]})
@ray.remote
def get_env():
return ray.get_runtime_context().runtime_env
@ray.remote
class EnvGetter:
def get(self):
return ray.get_runtime_context().runtime_env
job_env = ray.get_runtime_context().runtime_env
assert ray.get(get_env.remote()) == job_env
eg = EnvGetter.remote()
assert ray.get(eg.get.remote()) == job_env
# Passing a new URI should work.
if option == "working_dir":
env = {"working_dir": S3_PACKAGE_URI}
elif option == "py_modules":
env = {"py_modules": [S3_PACKAGE_URI]}
new_env = ray.get(get_env.options(runtime_env=env).remote())
assert new_env != job_env
eg = EnvGetter.options(runtime_env=env).remote()
assert ray.get(eg.get.remote()) != job_env
# Passing a local directory should not work.
if option == "working_dir":
env = {"working_dir": "."}
elif option == "py_modules":
env = {"py_modules": ["."]}
with pytest.raises(ValueError):
get_env.options(runtime_env=env).remote()
with pytest.raises(ValueError):
EnvGetter.options(runtime_env=env).remote()
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("option", ["working_dir", "py_modules"])
def test_large_file_boundary(shutdown_only, option: str):
"""Check that packages just under the max size work as expected."""
with tempfile.TemporaryDirectory() as tmp_dir, chdir(tmp_dir):
size = GCS_STORAGE_MAX_SIZE - 1024 * 1024
with open("test_file", "wb") as f:
f.write(os.urandom(size))
if option == "working_dir":
ray.init(runtime_env={"working_dir": "."})
else:
ray.init(runtime_env={"py_modules": ["."]})
@ray.remote
class Test:
def get_size(self):
with open("test_file", "rb") as f:
return len(f.read())
t = Test.remote()
assert ray.get(t.get_size.remote()) == size
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("option", ["working_dir", "py_modules"])
def test_large_file_error(shutdown_only, option: str):
with tempfile.TemporaryDirectory() as tmp_dir, chdir(tmp_dir):
# Write to two separate files, each of which is below the threshold to
# make sure the error is for the full package size.
size = GCS_STORAGE_MAX_SIZE // 2 + 1
with open("test_file_1", "wb") as f:
f.write(os.urandom(size))
with open("test_file_2", "wb") as f:
f.write(os.urandom(size))
with pytest.raises(ValueError):
if option == "working_dir":
ray.init(runtime_env={"working_dir": "."})
else:
ray.init(runtime_env={"py_modules": ["."]})
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("option", ["working_dir", "py_modules"])
def test_large_dir_upload_message(start_cluster, option):
cluster, address = start_cluster
with tempfile.TemporaryDirectory() as tmp_dir:
filepath = os.path.join(tmp_dir, "test_file.txt")
if option == "working_dir":
driver_script = f"""
import ray
ray.init("{address}", runtime_env={{"working_dir": "{tmp_dir}"}})
"""
else:
driver_script = f"""
import ray
ray.init("{address}", runtime_env={{"py_modules": ["{tmp_dir}"]}})
"""
with open(filepath, "w") as f:
f.write("Hi")
output = run_string_as_driver(driver_script)
assert "Pushing file package" in output
assert "Successfully pushed file package" in output
assert "warning" not in output.lower()
@pytest.mark.skipif(sys.platform != "darwin", reason="Package exceeds max size.")
def test_ray_worker_dev_flow(start_cluster):
cluster, address = start_cluster
ray.init(
address, runtime_env={"py_modules": [ray], "excludes": RAY_WORKER_DEV_EXCLUDES}
)
@ray.remote
def get_captured_ray_path():
return [ray.__path__]
@ray.remote
def get_lazy_ray_path():
import ray
return [ray.__path__]
captured_path = ray.get(get_captured_ray_path.remote())
lazy_path = ray.get(get_lazy_ray_path.remote())
assert captured_path == lazy_path
assert captured_path != ray.__path__[0]
@ray.remote
def test_recursive_task():
@ray.remote
def inner():
return [ray.__path__]
return ray.get(inner.remote())
assert ray.get(test_recursive_task.remote()) == captured_path
@ray.remote
def test_recursive_actor():
@ray.remote
class A:
def get(self):
return [ray.__path__]
a = A.remote()
return ray.get(a.get.remote())
assert ray.get(test_recursive_actor.remote()) == captured_path
from ray import serve
@ray.remote
def test_serve():
serve.start()
@serve.deployment
def f():
return "hi"
f.deploy()
h = f.get_handle()
assert ray.get(h.remote()) == "hi"
f.delete()
return [serve.__path__]
assert ray.get(test_serve.remote()) != serve.__path__[0]
from ray import tune
@ray.remote
def test_tune():
def objective(step, alpha, beta):
return (0.1 + alpha * step / 100) ** (-1) + beta * 0.1
def training_function(config):
# Hyperparameters
alpha, beta = config["alpha"], config["beta"]
for step in range(10):
intermediate_score = objective(step, alpha, beta)
tune.report(mean_loss=intermediate_score)
analysis = tune.run(
training_function,
config={
"alpha": tune.grid_search([0.001, 0.01, 0.1]),
"beta": tune.choice([1, 2, 3]),
},
)
print("Best config: ", analysis.get_best_config(metric="mean_loss", mode="min"))
assert ray.get(test_tune.remote()) != serve.__path__[0]
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("option", ["working_dir", "py_modules"])
@pytest.mark.parametrize("source", [S3_PACKAGE_URI, lazy_fixture("tmp_working_dir")])
def test_default_large_cache(start_cluster, option: str, source: str):
"""Check small files aren't GC'ed when using the default large cache."""
NUM_NODES = 3
cluster, address = start_cluster
for i in range(NUM_NODES - 1): # Head node already added.
cluster.add_node(num_cpus=1, runtime_env_dir_name=f"node_{i}_runtime_resources")
if option == "working_dir":
ray.init(address, runtime_env={"working_dir": source})
elif option == "py_modules":
if source != S3_PACKAGE_URI:
source = str(Path(source) / "test_module")
ray.init(address, runtime_env={"py_modules": [source]})
@ray.remote
def f():
pass
# Wait for runtime env to be set up. This can be accomplished by getting
# the result of a task.
ray.get(f.remote())
ray.shutdown()
# If we immediately check that the files weren't GCed, it may spuriously
# pass, so sleep first to give time for any deletions to happen.
time.sleep(5)
assert not check_local_files_gced(cluster)
ray.init(address)
@ray.remote(num_cpus=1)
class A:
def check(self):
import test_module
test_module.one()
if option == "working_dir":
A = A.options(runtime_env={"working_dir": S3_PACKAGE_URI})
else:
A = A.options(runtime_env={"py_modules": [S3_PACKAGE_URI]})
_ = A.remote()
ray.shutdown()
time.sleep(5)
assert not check_local_files_gced(cluster)
@pytest.mark.skipif(
os.environ.get("CI") and sys.platform != "linux",
reason="Requires PR wheels built in CI, so only run on linux CI machines.",
)
@pytest.mark.parametrize(
"ray_start_cluster",
[
{
"num_nodes": 1,
"_system_config": {
"num_workers_soft_limit": 0,
},
},
{
"num_nodes": 1,
"_system_config": {
"num_workers_soft_limit": 5,
},
},
{
"num_nodes": 1,
"_system_config": {
"num_workers_soft_limit": 0,
# this delay will make worker start slow and time out
"testing_asio_delay_us": "InternalKVGcsService.grpc_server"
".InternalKVGet=2000000:2000000",
"worker_register_timeout_seconds": 1,
},
},
{
"num_nodes": 1,
"_system_config": {
"num_workers_soft_limit": 5,
# this delay will make worker start slow and time out
"testing_asio_delay_us": "InternalKVGcsService.grpc_server"
".InternalKVGet=2000000:2000000",
"worker_register_timeout_seconds": 1,
},
},
],
indirect=True,
)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
@pytest.mark.parametrize("option", ["working_dir", "py_modules"])
def test_task_level_gc(runtime_env_disable_URI_cache, ray_start_cluster, option):
"""Tests that task-level working_dir is GC'd when the worker exits."""
cluster = ray_start_cluster
soft_limit_zero = False
worker_register_timeout = False
system_config = cluster.list_all_nodes()[0]._ray_params._system_config
if (
"num_workers_soft_limit" in system_config
and system_config["num_workers_soft_limit"] == 0
):
soft_limit_zero = True
if (
"worker_register_timeout_seconds" in system_config
and system_config["worker_register_timeout_seconds"] != 0
):
worker_register_timeout = True
@ray.remote
def f():
import test_module
test_module.one()
@ray.remote(num_cpus=1)
class A:
def check(self):
import test_module
test_module.one()
if option == "working_dir":
runtime_env = {"working_dir": S3_PACKAGE_URI}
else:
runtime_env = {"py_modules": [S3_PACKAGE_URI]}
# Note: We should set a bigger timeout if downloads the s3 package slowly.
get_timeout = 10
# Start a task with runtime env
if worker_register_timeout:
with pytest.raises(GetTimeoutError):
ray.get(f.options(runtime_env=runtime_env).remote(), timeout=get_timeout)
else:
ray.get(f.options(runtime_env=runtime_env).remote())
if soft_limit_zero or worker_register_timeout:
# Wait for worker exited and local files gced
wait_for_condition(lambda: check_local_files_gced(cluster))
else:
# Local files should not be gced because of an enough soft limit.
assert not check_local_files_gced(cluster)
# Start a actor with runtime env
actor = A.options(runtime_env=runtime_env).remote()
if worker_register_timeout:
with pytest.raises(GetTimeoutError):
ray.get(actor.check.remote(), timeout=get_timeout)
# Wait for worker exited and local files gced
wait_for_condition(lambda: check_local_files_gced(cluster))
else:
ray.get(actor.check.remote())
assert not check_local_files_gced(cluster)
# Kill actor
ray.kill(actor)
if soft_limit_zero or worker_register_timeout:
# Wait for worker exited and local files gced
wait_for_condition(lambda: check_local_files_gced(cluster))
else:
# Local files should not be gced because of an enough soft limit.
assert not check_local_files_gced(cluster)
# Start a task with runtime env
if worker_register_timeout:
with pytest.raises(GetTimeoutError):
ray.get(f.options(runtime_env=runtime_env).remote(), timeout=get_timeout)
else:
ray.get(f.options(runtime_env=runtime_env).remote())
if soft_limit_zero or worker_register_timeout:
# Wait for worker exited and local files gced
wait_for_condition(lambda: check_local_files_gced(cluster))
else:
# Local files should not be gced because of an enough soft limit.
assert not check_local_files_gced(cluster)
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
```
#### File: ray/tests/test_traceback.py
```python
import re
import sys
import threading
import pytest
import ray
from ray.exceptions import RayTaskError, RayActorError
"""This module tests stacktrace of Ray.
There are total 3 different stacktrace types in Ray.
1. Not nested task (including actor creation) or actor task failure.
2. Chained task + actor task failure.
3. Dependency failure (upstreamed dependency raises an exception).
There are important factors.
- The root cause of the failure should be printed at the bottom.
- Ray-related code shouldn't be printed at all to the user-level stacktrace.
- It should be easy to follow stacktrace.
Each of test verifies that there's no regression by comparing the line number.
If we include unnecessary stacktrace (e.g., logs from internal files),
these tests will fail.
"""
def scrub_traceback(ex):
assert isinstance(ex, str)
print(ex)
ex = ex.strip("\n")
ex = re.sub("pid=[0-9]+,", "pid=XXX,", ex)
ex = re.sub("ip=[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+", "ip=YYY", ex)
ex = re.sub("repr=.*\)", "repr=ZZZ)", ex)
ex = re.sub("line .*,", "line ZZ,", ex)
ex = re.sub('".*"', '"FILE"', ex)
# These are used to coloring the string.
ex = re.sub("\\x1b\[36m", "", ex)
ex = re.sub("\\x1b\[39m", "", ex)
# When running bazel test with pytest 6.x, the module name becomes
# "python.ray.tests.test_traceback" instead of just "test_traceback"
ex = re.sub(r"python\.ray\.tests\.test_traceback", "test_traceback", ex)
# Clean up object address.
ex = re.sub("object at .*>", "object at ADDRESS>", ex)
return ex
def clean_noqa(ex):
assert isinstance(ex, str)
# noqa is required to ignore lint, so we just remove it.
ex = re.sub(" # noqa", "", ex)
return ex
@pytest.mark.skipif(
sys.platform == "win32", reason="Clean stacktrace not supported on Windows"
)
def test_actor_creation_stacktrace(ray_start_regular):
"""Test the actor creation task stacktrace."""
expected_output = """The actor died because of an error raised in its creation task, ray::A.__init__() (pid=XXX, ip=YYY, repr=ZZZ) # noqa
File "FILE", line ZZ, in __init__
g(3)
File "FILE", line ZZ, in g
raise ValueError(a)
ValueError: 3"""
def g(a):
raise ValueError(a)
@ray.remote
class A:
def __init__(self):
g(3)
def ping(self):
pass
try:
a = A.remote()
ray.get(a.ping.remote())
except RayActorError as ex:
print(ex)
assert clean_noqa(expected_output) == scrub_traceback(str(ex))
@pytest.mark.skipif(
sys.platform == "win32", reason="Clean stacktrace not supported on Windows"
)
def test_task_stacktrace(ray_start_regular):
"""Test the normal task stacktrace."""
expected_output = """ray::f() (pid=XXX, ip=YYY)
File "FILE", line ZZ, in f
return g(c)
File "FILE", line ZZ, in g
raise ValueError(a)
ValueError: 7"""
def g(a):
raise ValueError(a)
# pass
@ray.remote
def f():
a = 3
b = 4
c = a + b
return g(c)
try:
ray.get(f.remote())
except ValueError as ex:
print(ex)
assert clean_noqa(expected_output) == scrub_traceback(str(ex))
@pytest.mark.skipif(
sys.platform == "win32", reason="Clean stacktrace not supported on Windows"
)
def test_actor_task_stacktrace(ray_start_regular):
"""Test the actor task stacktrace."""
expected_output = """ray::A.f() (pid=XXX, ip=YYY, repr=ZZZ) # noqa
File "FILE", line ZZ, in f
return g(c)
File "FILE", line ZZ, in g
raise ValueError(a)
ValueError: 7"""
def g(a):
raise ValueError(a)
@ray.remote
class A:
def f(self):
a = 3
b = 4
c = a + b
return g(c)
a = A.remote()
try:
ray.get(a.f.remote())
except ValueError as ex:
print(ex)
assert clean_noqa(expected_output) == scrub_traceback(str(ex))
@pytest.mark.skipif(
sys.platform == "win32", reason="Clean stacktrace not supported on Windows"
)
def test_exception_chain(ray_start_regular):
"""Test the chained stacktrace."""
expected_output = """ray::foo() (pid=XXX, ip=YYY) # noqa
File "FILE", line ZZ, in foo
return ray.get(bar.remote())
ray.exceptions.RayTaskError(ZeroDivisionError): ray::bar() (pid=XXX, ip=YYY)
File "FILE", line ZZ, in bar
return 1 / 0
ZeroDivisionError: division by zero"""
@ray.remote
def bar():
return 1 / 0
@ray.remote
def foo():
return ray.get(bar.remote())
r = foo.remote()
try:
ray.get(r)
except ZeroDivisionError as ex:
assert isinstance(ex, RayTaskError)
print(ex)
assert clean_noqa(expected_output) == scrub_traceback(str(ex))
@pytest.mark.skipif(
sys.platform == "win32", reason="Clean stacktrace not supported on Windows"
)
def test_dep_failure(ray_start_regular):
"""Test the stacktrace genereated due to dependency failures."""
expected_output = """ray::f() (pid=XXX, ip=YYY) # noqa
At least one of the input arguments for this task could not be computed:
ray.exceptions.RayTaskError: ray::a() (pid=XXX, ip=YYY)
At least one of the input arguments for this task could not be computed:
ray.exceptions.RayTaskError: ray::b() (pid=XXX, ip=YYY)
File "FILE", line ZZ, in b
raise ValueError("FILE")
ValueError: b failed"""
@ray.remote
def f(a, b):
pass
@ray.remote
def a(d):
pass
@ray.remote
def b():
raise ValueError("b failed")
try:
ray.get(f.remote(a.remote(b.remote()), b.remote()))
except Exception as ex:
print(ex)
from pprint import pprint
pprint(clean_noqa(expected_output))
pprint(scrub_traceback(str(ex)))
assert clean_noqa(expected_output) == scrub_traceback(str(ex))
@pytest.mark.skipif(
sys.platform == "win32", reason="Clean stacktrace not supported on Windows"
)
def test_actor_repr_in_traceback(ray_start_regular):
def parse_labels_from_traceback(ex):
error_msg = str(ex)
error_lines = error_msg.split("\n")
traceback_line = error_lines[0]
unformatted_labels = traceback_line.split("(")[2].split(", ")
label_dict = {}
for label in unformatted_labels:
# Remove parenthesis if included.
if label.startswith("("):
label = label[1:]
elif label.endswith(")"):
label = label[:-1]
key, value = label.split("=", 1)
label_dict[key] = value
return label_dict
# Test the default repr is Actor(repr=[class_name])
def g(a):
raise ValueError(a)
@ray.remote
class A:
def f(self):
a = 3
b = 4
c = a + b
return g(c)
def get_repr(self):
return repr(self)
a = A.remote()
try:
ray.get(a.f.remote())
except ValueError as ex:
print(ex)
label_dict = parse_labels_from_traceback(ex)
assert label_dict["repr"] == ray.get(a.get_repr.remote())
# Test if the repr is properly overwritten.
actor_repr = "ABC"
@ray.remote
class A:
def f(self):
a = 3
b = 4
c = a + b
return g(c)
def __repr__(self):
return actor_repr
a = A.remote()
try:
ray.get(a.f.remote())
except ValueError as ex:
print(ex)
label_dict = parse_labels_from_traceback(ex)
assert label_dict["repr"] == actor_repr
def test_unpickleable_stacktrace(shutdown_only):
expected_output = """System error: Failed to unpickle serialized exception
traceback: Traceback (most recent call last):
File "FILE", line ZZ, in from_ray_exception
return pickle.loads(ray_exception.serialized_exception)
TypeError: __init__() missing 1 required positional argument: 'arg'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "FILE", line ZZ, in deserialize_objects
obj = self._deserialize_object(data, metadata, object_ref)
File "FILE", line ZZ, in _deserialize_object
return RayError.from_bytes(obj)
File "FILE", line ZZ, in from_bytes
return RayError.from_ray_exception(ray_exception)
File "FILE", line ZZ, in from_ray_exception
raise RuntimeError(msg) from e
RuntimeError: Failed to unpickle serialized exception"""
class NoPickleError(OSError):
def __init__(self, arg):
pass
def g(a):
raise NoPickleError("asdf")
@ray.remote
def f():
a = 3
b = 4
c = a + b
return g(c)
try:
ray.get(f.remote())
except Exception as ex:
assert clean_noqa(expected_output) == scrub_traceback(str(ex))
def test_serialization_error_message(shutdown_only):
expected_output_task = """Could not serialize the argument <unlocked _thread.lock object at ADDRESS> for a task or actor test_traceback.test_serialization_error_message.<locals>.task_with_unserializable_arg. Check https://docs.ray.io/en/master/ray-core/objects/serialization.html#troubleshooting for more information.""" # noqa
expected_output_actor = """Could not serialize the argument <unlocked _thread.lock object at ADDRESS> for a task or actor test_traceback.test_serialization_error_message.<locals>.A.__init__. Check https://docs.ray.io/en/master/ray-core/objects/serialization.html#troubleshooting for more information.""" # noqa
expected_capture_output_task = """Could not serialize the function test_traceback.test_serialization_error_message.<locals>.capture_lock. Check https://docs.ray.io/en/master/ray-core/objects/serialization.html#troubleshooting for more information.""" # noqa
expected_capture_output_actor = """Could not serialize the actor class test_traceback.test_serialization_error_message.<locals>.B.__init__. Check https://docs.ray.io/en/master/ray-core/objects/serialization.html#troubleshooting for more information.""" # noqa
ray.init(num_cpus=1)
lock = threading.Lock()
@ray.remote
def task_with_unserializable_arg(lock):
print(lock)
@ray.remote
class A:
def __init__(self, lock):
print(lock)
@ray.remote
def capture_lock():
print(lock)
@ray.remote
class B:
def __init__(self):
print(lock)
"""
Test a task with an unserializable object.
"""
with pytest.raises(TypeError) as excinfo:
task_with_unserializable_arg.remote(lock)
def scrub_traceback(ex):
return re.sub("object at .*> for a", "object at ADDRESS> for a", ex)
test_prefix = "com_github_ray_project_ray.python.ray.tests."
assert clean_noqa(expected_output_task) == scrub_traceback(
str(excinfo.value)
).replace(test_prefix, "")
"""
Test an actor with an unserializable object.
"""
with pytest.raises(TypeError) as excinfo:
a = A.remote(lock)
print(a)
assert clean_noqa(expected_output_actor) == scrub_traceback(
str(excinfo.value)
).replace(test_prefix, "")
"""
Test the case where an unserializable object is captured by tasks.
"""
with pytest.raises(TypeError) as excinfo:
capture_lock.remote()
assert clean_noqa(expected_capture_output_task) == str(excinfo.value).replace(
test_prefix, ""
)
"""
Test the case where an unserializable object is captured by actors.
"""
with pytest.raises(TypeError) as excinfo:
b = B.remote()
print(b)
assert clean_noqa(expected_capture_output_actor) == str(excinfo.value).replace(
test_prefix, ""
)
if __name__ == "__main__":
import pytest
import os
import sys
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
```
#### File: _internal/results_preprocessors/keys.py
```python
from typing import List, Dict, Optional, Iterable
import warnings
from ray.train._internal.results_preprocessors.preprocessor import (
ResultsPreprocessor,
_deprecation_msg,
)
from ray.util.annotations import Deprecated
@Deprecated
class ExcludedKeysResultsPreprocessor(ResultsPreprocessor):
"""Preprocesses each result dictionary by excluding specified keys.
Example:
- excluded_keys: ``["a"]``
- input: ``[{"a": 1, "b": 2}, {"a": 3, "b": 4}]``
- output: ``[{"b": 2}, {"b": 4}]``
Args:
excluded_keys (Optional[Iterable[str]]): The keys to remove. If
``None`` then no keys will be removed.
"""
def __init__(self, excluded_keys: Optional[Iterable[str]] = None) -> None:
warnings.warn(
_deprecation_msg,
DeprecationWarning,
stacklevel=2,
)
self.excluded_keys = set(excluded_keys) or {}
def preprocess(self, results: List[Dict]) -> List[Dict]:
new_results = [
{
key: value
for key, value in result.items()
if key not in self.excluded_keys
}
for result in results
]
return new_results
```
#### File: ray/train/predictor.py
```python
import abc
from typing import Dict, Type
import numpy as np
import pandas as pd
from ray.air.checkpoint import Checkpoint
from ray.air.data_batch_type import DataBatchType
from ray.air.util.data_batch_conversion import (
DataType,
convert_batch_type_to_pandas,
convert_pandas_to_batch_type,
)
from ray.util.annotations import DeveloperAPI, PublicAPI
try:
import pyarrow
import pyarrow.Table as pa_table
except ImportError:
pa_table = None
TYPE_TO_ENUM: Dict[Type[DataBatchType], DataType] = {
np.ndarray: DataType.NUMPY,
dict: DataType.NUMPY,
pd.DataFrame: DataType.PANDAS,
pa_table: DataType.ARROW,
}
@PublicAPI(stability="alpha")
class PredictorNotSerializableException(RuntimeError):
"""Error raised when trying to serialize a Predictor instance."""
pass
@PublicAPI(stability="alpha")
class Predictor(abc.ABC):
"""Predictors load models from checkpoints to perform inference.
Note: The base ``Predictor`` class cannot be instantiated directly. Only one of
its subclasses can be used.
**How does a Predictor work?**
Predictors expose a ``predict`` method that accepts an input batch of type
``DataBatchType`` and outputs predictions of the same type as the input batch.
When the ``predict`` method is called the following occurs:
- The input batch is converted into a pandas DataFrame. Tensor input (like a
``np.ndarray``) will be converted into a single column Pandas Dataframe.
- If there is a :ref:`Preprocessor <air-preprocessor-ref>` saved in the provided
:ref:`Checkpoint <air-checkpoint-ref>`, the preprocessor will be used to
transform the DataFrame.
- The transformed DataFrame will be passed to the model for inference (via the
``predictor._predict_pandas`` method).
- The predictions will be outputted by ``predict`` in the same type as the
original input.
**How do I create a new Predictor?**
To implement a new Predictor for your particular framework, you should subclass
the base ``Predictor`` and implement the following two methods:
1. ``_predict_pandas``: Given a pandas.DataFrame input, return a
pandas.DataFrame containing predictions.
2. ``from_checkpoint``: Logic for creating a Predictor from an
:ref:`AIR Checkpoint <air-checkpoint-ref>`.
3. Optionally ``_predict_arrow`` for better performance when working with
tensor data to avoid extra copies from Pandas conversions.
"""
@classmethod
@PublicAPI(stability="alpha")
@abc.abstractmethod
def from_checkpoint(cls, checkpoint: Checkpoint, **kwargs) -> "Predictor":
"""Create a specific predictor from a checkpoint.
Args:
checkpoint: Checkpoint to load predictor data from.
kwargs: Arguments specific to predictor implementations.
Returns:
Predictor: Predictor object.
"""
raise NotImplementedError
@PublicAPI(stability="alpha")
def predict(self, data: DataBatchType, **kwargs) -> DataBatchType:
"""Perform inference on a batch of data.
Args:
data: A batch of input data of type ``DataBatchType``.
kwargs: Arguments specific to predictor implementations. These are passed
directly to ``_predict_pandas``.
Returns:
DataBatchType: Prediction result.
"""
data_df = convert_batch_type_to_pandas(data)
if getattr(self, "preprocessor", None):
data_df = self.preprocessor.transform_batch(data_df)
predictions_df = self._predict_pandas(data_df, **kwargs)
return convert_pandas_to_batch_type(
predictions_df, type=TYPE_TO_ENUM[type(data)]
)
@DeveloperAPI
def _predict_pandas(self, data: "pd.DataFrame", **kwargs) -> "pd.DataFrame":
"""Perform inference on a Pandas DataFrame.
All predictors are expected to implement this method.
Args:
data: A pandas DataFrame to perform predictions on.
kwargs: Arguments specific to the predictor implementation.
Returns:
A pandas DataFrame containing the prediction result.
"""
raise NotImplementedError
@DeveloperAPI
def _predict_arrow(self, data: "pyarrow.Table", **kwargs) -> "pyarrow.Table":
"""Perform inference on an Arrow Table.
Predictors can implement this method instead of ``_predict_pandas``
for better performance when the input batch type is a Numpy array, dict of
numpy arrays, or an Arrow Table as conversion from these types are zero copy.
Args:
data: An Arrow Table to perform predictions on.
kwargs: Arguments specific to the predictor implementation.
Returns:
An Arrow Table containing the prediction result.
"""
raise NotImplementedError
def __reduce__(self):
raise PredictorNotSerializableException(
"Predictor instances are not serializable. Instead, you may want "
"to serialize a checkpoint and initialize the Predictor with "
"Predictor.from_checkpoint."
)
```
#### File: train/rl/rl_predictor.py
```python
from typing import TYPE_CHECKING, Optional
import numpy as np
import pandas as pd
from ray.air.checkpoint import Checkpoint
from ray.air.constants import TENSOR_COLUMN_NAME
from ray.rllib.policy.policy import Policy
from ray.rllib.utils.typing import EnvType
from ray.train.predictor import Predictor
from ray.train.rl.rl_trainer import load_checkpoint
if TYPE_CHECKING:
from ray.data.preprocessor import Preprocessor
class RLPredictor(Predictor):
"""A predictor for RLlib policies.
Args:
policy: The RLlib policy on which to perform inference on.
preprocessor: A preprocessor used to transform data batches prior
to prediction.
"""
def __init__(
self,
policy: Policy,
preprocessor: Optional["Preprocessor"] = None,
):
self.policy = policy
self.preprocessor = preprocessor
@classmethod
def from_checkpoint(
cls,
checkpoint: Checkpoint,
env: Optional[EnvType] = None,
**kwargs,
) -> "Predictor":
"""Create RLPredictor from checkpoint.
This method requires that the checkpoint was created with the Ray AIR
RLTrainer.
Args:
checkpoint: The checkpoint to load the model and
preprocessor from.
env: Optional environment to instantiate the trainer with. If not given,
it is parsed from the saved trainer configuration instead.
"""
policy, preprocessor = load_checkpoint(checkpoint, env)
return RLPredictor(policy=policy, preprocessor=preprocessor)
def _predict_pandas(self, data: "pd.DataFrame", **kwargs) -> "pd.DataFrame":
if TENSOR_COLUMN_NAME in data:
obs = data[TENSOR_COLUMN_NAME].to_numpy()
else:
obs = data.to_numpy()
actions, _outs, _info = self.policy.compute_actions_from_input_dict(
input_dict={"obs": obs}
)
return pd.DataFrame(np.array(actions))
```
#### File: train/tensorflow/config.py
```python
import json
import logging
import os
from dataclasses import dataclass
from typing import List
import ray
from ray.train.backend import BackendConfig, Backend
from ray.train._internal.utils import get_address_and_port
from ray.train._internal.worker_group import WorkerGroup
from ray.util import PublicAPI
logger = logging.getLogger(__name__)
@PublicAPI(stability="beta")
@dataclass
class TensorflowConfig(BackendConfig):
@property
def backend_cls(self):
return _TensorflowBackend
def _setup_tensorflow_environment(worker_addresses: List[str], index: int):
"""Set up distributed Tensorflow training information.
This function should be called on each worker.
Args:
worker_addresses: Addresses of all the workers.
index: Index (i.e. world rank) of the current worker.
"""
tf_config = {
"cluster": {"worker": worker_addresses},
"task": {"type": "worker", "index": index},
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
class _TensorflowBackend(Backend):
def on_start(self, worker_group: WorkerGroup, backend_config: TensorflowConfig):
# Compute URL for initializing distributed setup.
def get_url():
address, port = get_address_and_port()
return f"{address}:{port}"
urls = worker_group.execute(get_url)
# Get setup tasks in order to throw errors on failure.
setup_futures = []
for i in range(len(worker_group)):
setup_futures.append(
worker_group.execute_single_async(
i, _setup_tensorflow_environment, worker_addresses=urls, index=i
)
)
ray.get(setup_futures)
```
#### File: train/tests/test_tensorflow_trainer.py
```python
import os
import numpy as np
import pytest
import ray
from ray import train
from ray.air import session
from ray.air.checkpoint import Checkpoint
from ray.air.examples.tf.tensorflow_linear_dataset_example import get_dataset
from ray.air.examples.tf.tensorflow_linear_dataset_example import (
train_func as tensorflow_linear_train_func,
)
from ray.train.constants import MODEL_KEY, TRAIN_DATASET_KEY
from ray.train.tensorflow import TensorflowPredictor, TensorflowTrainer
@pytest.fixture
def ray_start_4_cpus():
address_info = ray.init(num_cpus=4)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
def build_model():
import tensorflow as tf
model = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(1,)),
tf.keras.layers.Dense(1),
]
)
return model
@pytest.mark.parametrize("num_workers", [1, 2])
def test_tensorflow_linear(ray_start_4_cpus, num_workers):
"""Also tests air Keras callback."""
def train_func(config):
result = tensorflow_linear_train_func(config)
assert len(result) == epochs
assert result[-1]["loss"] < result[0]["loss"]
num_workers = num_workers
epochs = 3
scaling_config = {"num_workers": num_workers}
config = {
"lr": 1e-3,
"batch_size": 32,
"epochs": epochs,
}
trainer = TensorflowTrainer(
train_loop_per_worker=train_func,
train_loop_config=config,
scaling_config=scaling_config,
datasets={TRAIN_DATASET_KEY: get_dataset()},
)
trainer.fit()
def test_tensorflow_e2e(ray_start_4_cpus):
def train_func():
model = build_model().get_weights()
train.save_checkpoint(**{MODEL_KEY: model})
scaling_config = {"num_workers": 2}
trainer = TensorflowTrainer(
train_loop_per_worker=train_func, scaling_config=scaling_config
)
result = trainer.fit()
class TensorflowScorer:
def __init__(self):
self.pred = TensorflowPredictor.from_checkpoint(
result.checkpoint, build_model
)
def __call__(self, x):
return self.pred.predict(x, dtype=np.float)
predict_dataset = ray.data.range(3)
predictions = predict_dataset.map_batches(
TensorflowScorer, batch_format="pandas", compute="actors"
)
assert predictions.count() == 3
def test_report_and_load_using_ml_session(ray_start_4_cpus):
def train_func():
if session.get_checkpoint():
with session.get_checkpoint().as_directory() as checkpoint_dir:
import tensorflow as tf
model = tf.keras.models.load_model(checkpoint_dir)
else:
model = build_model()
model.save("my_model", overwrite=True)
session.report(
metrics={"iter": 1}, checkpoint=Checkpoint.from_directory("my_model")
)
scaling_config = {"num_workers": 2}
trainer = TensorflowTrainer(
train_loop_per_worker=train_func, scaling_config=scaling_config
)
result = trainer.fit()
trainer2 = TensorflowTrainer(
train_loop_per_worker=train_func,
scaling_config=scaling_config,
resume_from_checkpoint=result.checkpoint,
)
result = trainer2.fit()
checkpoint = result.checkpoint
with checkpoint.as_directory() as ckpt_dir:
assert os.path.exists(os.path.join(ckpt_dir, "saved_model.pb"))
assert result.metrics["iter"] == 1
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", "-x", __file__]))
```
#### File: workflow/tests/test_workflow_manager.py
```python
import pytest
import ray
from ray import workflow
from filelock import FileLock
def test_workflow_manager_simple(workflow_start_regular):
assert [] == workflow.list_all()
with pytest.raises(workflow.common.WorkflowNotFoundError):
workflow.get_status("X")
def test_workflow_manager(workflow_start_regular, tmp_path):
# For sync between jobs
tmp_file = str(tmp_path / "lock")
lock = FileLock(tmp_file)
lock.acquire()
# For sync between jobs
flag_file = tmp_path / "flag"
flag_file.touch()
@ray.remote
def long_running(i):
lock = FileLock(tmp_file)
with lock.acquire():
pass
if i % 2 == 0:
if flag_file.exists():
raise ValueError()
return 100
outputs = [
workflow.create(long_running.bind(i)).run_async(workflow_id=str(i))
for i in range(100)
]
# Test list all, it should list all jobs running
all_tasks = workflow.list_all()
assert len(all_tasks) == 100
all_tasks_running = workflow.list_all(workflow.RUNNING)
assert dict(all_tasks) == dict(all_tasks_running)
assert workflow.get_status("0") == "RUNNING"
# Release lock and make sure all tasks finished
lock.release()
for o in outputs:
try:
r = ray.get(o)
except Exception:
continue
assert 100 == r
all_tasks_running = workflow.list_all(workflow.WorkflowStatus.RUNNING)
assert len(all_tasks_running) == 0
# Half of them failed and half succeed
failed_jobs = workflow.list_all("FAILED")
assert len(failed_jobs) == 50
finished_jobs = workflow.list_all("SUCCESSFUL")
assert len(finished_jobs) == 50
all_tasks_status = workflow.list_all(
{
workflow.WorkflowStatus.SUCCESSFUL,
workflow.WorkflowStatus.FAILED,
workflow.WorkflowStatus.RUNNING,
}
)
assert len(all_tasks_status) == 100
assert failed_jobs == [
(k, v) for (k, v) in all_tasks_status if v == workflow.WorkflowStatus.FAILED
]
assert finished_jobs == [
(k, v) for (k, v) in all_tasks_status if v == workflow.WorkflowStatus.SUCCESSFUL
]
# Test get_status
assert workflow.get_status("0") == "FAILED"
assert workflow.get_status("1") == "SUCCESSFUL"
lock.acquire()
r = workflow.resume("0")
assert workflow.get_status("0") == workflow.RUNNING
flag_file.unlink()
lock.release()
assert 100 == ray.get(r)
assert workflow.get_status("0") == workflow.SUCCESSFUL
# Test cancel
lock.acquire()
workflow.resume("2")
assert workflow.get_status("2") == workflow.RUNNING
workflow.cancel("2")
assert workflow.get_status("2") == workflow.CANCELED
# Now resume_all
resumed = workflow.resume_all(include_failed=True)
assert len(resumed) == 48
lock.release()
assert [ray.get(o) for (_, o) in resumed] == [100] * 48
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
```
#### File: ray/workflow/workflow_access.py
```python
import logging
import time
from typing import Any, Dict, List, Tuple, Optional, TYPE_CHECKING
from dataclasses import dataclass
import ray
from ray.workflow import common
from ray.workflow.common import WorkflowStaticRef
from ray.workflow import recovery
from ray.workflow import workflow_storage
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
from ray.actor import ActorHandle
from ray.workflow.common import StepID, WorkflowExecutionResult
logger = logging.getLogger(__name__)
@PublicAPI(stability="beta")
class WorkflowExecutionError(Exception):
def __init__(self, workflow_id: str):
self.message = f"Workflow[id={workflow_id}] failed during execution."
super().__init__(self.message)
class _SelfDereferenceObject:
"""A object that dereferences itself during deserialization"""
def __init__(self, workflow_id: Optional[str], nested_ref: ray.ObjectRef):
self.workflow_id = workflow_id
self.nested_ref = nested_ref
def __reduce__(self):
return _resolve_workflow_output, (self.workflow_id, self.nested_ref)
def flatten_workflow_output(
workflow_id: str, workflow_output: ray.ObjectRef
) -> ray.ObjectRef:
"""Converts the nested ref to a direct ref of an object.
Args:
workflow_id: The ID of a workflow.
workflow_output: A (nested) object ref of the workflow output.
Returns:
A direct ref of an object.
"""
return ray.put(_SelfDereferenceObject(workflow_id, workflow_output))
def _resolve_workflow_output(
workflow_id: Optional[str], output: WorkflowStaticRef
) -> Any:
"""Resolve the output of a workflow.
Args:
workflow_id: The ID of the workflow. If it's set to be None,
it won't report to workflow manager
output: The output object ref of a workflow.
Raises:
WorkflowExecutionError: When the workflow fails.
Returns:
The resolved physical object.
"""
if workflow_id is not None:
try:
actor = get_management_actor()
except ValueError as e:
raise ValueError(
"Failed to connect to the workflow management actor."
) from e
from ray.workflow.step_executor import _resolve_static_workflow_ref
try:
output = _resolve_static_workflow_ref(output)
except Exception as e:
if workflow_id is not None:
# re-raise the exception so we know it is a workflow failure.
try:
ray.get(actor.report_failure.remote(workflow_id))
except Exception:
# the actor does not exist
logger.warning(
"Could not inform the workflow management actor "
"about the error of the workflow."
)
raise WorkflowExecutionError(workflow_id) from e
if workflow_id is not None:
try:
ray.get(actor.report_success.remote(workflow_id))
except Exception:
# the actor does not exist
logger.warning(
"Could not inform the workflow management actor "
"about the success of the workflow."
)
return output
def cancel_job(obj: ray.ObjectRef):
return
# TODO (yic) Enable true canceling in ray.
#
# try:
# while isinstance(obj, ray.ObjectRef):
# ray.cancel(obj)
# obj = ray.get(obj)
# except Exception:
# pass
@dataclass
class LatestWorkflowOutput:
output: WorkflowStaticRef
workflow_id: str
step_id: "StepID"
# TODO(suquark): we may use an actor pool in the future if too much
# concurrent workflow access blocks the actor.
@ray.remote(num_cpus=0)
class WorkflowManagementActor:
"""Keep the ownership and manage the workflow output."""
def __init__(self):
self._workflow_outputs: Dict[str, LatestWorkflowOutput] = {}
# Cache step output. It is used for step output lookup of
# "WorkflowRef". The dictionary entry is removed when the status of
# a step is marked as finished (successful or failed).
self._step_output_cache: Dict[Tuple[str, str], LatestWorkflowOutput] = {}
self._step_status: Dict[str, Dict[str, common.WorkflowStatus]] = {}
self._workflow_status: Dict[str, common.WorkflowStatus] = {}
def get_cached_step_output(
self, workflow_id: str, step_id: "StepID"
) -> ray.ObjectRef:
"""Get the cached result of a step.
Args:
workflow_id: The ID of the workflow.
step_id: The ID of the step.
Returns:
An object reference that can be used to retrieve the
step result. If it does not exist, return None
"""
try:
output = self._step_output_cache[(workflow_id, step_id)].output
return output
except Exception:
return None
def run_or_resume(
self, job_id: str, workflow_id: str, ignore_existing: bool = False
) -> "WorkflowExecutionResult":
"""Run or resume a workflow.
Args:
job_id: The ID of the job that submits the workflow execution.
workflow_id: The ID of the workflow.
ignore_existing: Ignore we already have an existing output. When
set false, raise an exception if there has already been a workflow
running with this id
Returns:
Workflow execution result that contains the state and output.
"""
if workflow_id in self._workflow_outputs and not ignore_existing:
raise RuntimeError(
f"The output of workflow[id={workflow_id}] already exists."
)
wf_store = workflow_storage.WorkflowStorage(workflow_id)
workflow_prerun_metadata = {"start_time": time.time()}
wf_store.save_workflow_prerun_metadata(workflow_prerun_metadata)
step_id = wf_store.get_entrypoint_step_id()
try:
current_output = self._workflow_outputs[workflow_id].output
except KeyError:
current_output = None
result = recovery.resume_workflow_step(
job_id, workflow_id, step_id, current_output
)
latest_output = LatestWorkflowOutput(result.output, workflow_id, step_id)
self._workflow_outputs[workflow_id] = latest_output
logger.info(f"run_or_resume: {workflow_id}, {step_id}," f"{result.output.ref}")
self._step_output_cache[(workflow_id, step_id)] = latest_output
self._update_workflow_status(workflow_id, common.WorkflowStatus.RUNNING)
if workflow_id not in self._step_status:
self._step_status[workflow_id] = {}
logger.info(f"Workflow job [id={workflow_id}] started.")
return result
def gen_step_id(self, workflow_id: str, step_name: str) -> str:
wf_store = workflow_storage.WorkflowStorage(workflow_id)
idx = wf_store.gen_step_id(step_name)
if idx == 0:
return step_name
else:
return f"{step_name}_{idx}"
def _update_workflow_status(self, workflow_id: str, status: common.WorkflowStatus):
wf_store = workflow_storage.WorkflowStorage(workflow_id)
wf_store.update_workflow_status(status)
self._workflow_status[workflow_id] = status
def update_step_status(
self,
workflow_id: str,
step_id: str,
status: common.WorkflowStatus,
outputs: List[WorkflowStaticRef],
):
# Note: For virtual actor, we could add more steps even if
# the workflow finishes.
self._step_status.setdefault(workflow_id, {})
if status == common.WorkflowStatus.SUCCESSFUL:
self._step_status[workflow_id].pop(step_id, None)
else:
self._step_status.setdefault(workflow_id, {})[step_id] = status
remaining = len(self._step_status[workflow_id])
if status != common.WorkflowStatus.RUNNING:
self._step_output_cache.pop((workflow_id, step_id), None)
if status != common.WorkflowStatus.FAILED and remaining != 0:
return
if status == common.WorkflowStatus.FAILED:
if workflow_id in self._workflow_outputs:
cancel_job(self._workflow_outputs.pop(workflow_id).output)
self._update_workflow_status(workflow_id, common.WorkflowStatus.FAILED)
self._step_status.pop(workflow_id)
else:
self._update_workflow_status(workflow_id, common.WorkflowStatus.SUCCESSFUL)
self._step_status.pop(workflow_id)
wf_store = workflow_storage.WorkflowStorage(workflow_id)
wf_store.save_workflow_postrun_metadata({"end_time": time.time()})
def cancel_workflow(self, workflow_id: str) -> None:
self._step_status.pop(workflow_id)
cancel_job(self._workflow_outputs.pop(workflow_id).output)
self._update_workflow_status(workflow_id, common.WorkflowStatus.CANCELED)
def is_workflow_running(self, workflow_id: str) -> bool:
return (
workflow_id in self._step_status and workflow_id in self._workflow_outputs
)
def list_running_workflow(self) -> List[str]:
return list(self._step_status.keys())
def get_output(self, workflow_id: str, name: Optional[str]) -> WorkflowStaticRef:
"""Get the output of a running workflow.
Args:
workflow_id: The ID of a workflow job.
Returns:
An object reference that can be used to retrieve the
workflow result.
"""
if workflow_id in self._workflow_outputs and name is None:
return self._workflow_outputs[workflow_id].output
wf_store = workflow_storage.WorkflowStorage(workflow_id)
status = wf_store.load_workflow_status()
if status == common.WorkflowStatus.NONE:
raise ValueError(f"No such workflow {workflow_id}")
if status == common.WorkflowStatus.CANCELED:
raise ValueError(f"Workflow {workflow_id} is canceled")
if name is None:
# For resumable workflow, the workflow result is not ready.
# It has to be resumed first.
if status == common.WorkflowStatus.RESUMABLE:
raise ValueError(
f"Workflow {workflow_id} is in resumable status, "
"please resume it"
)
if name is None:
step_id = wf_store.get_entrypoint_step_id()
else:
step_id = name
output = self.get_cached_step_output(workflow_id, step_id)
if output is not None:
return WorkflowStaticRef.from_output(step_id, output)
@ray.remote
def load(wf_store, workflow_id, step_id):
result = wf_store.inspect_step(step_id)
if result.output_object_valid:
# we already have the output
return wf_store.load_step_output(step_id)
if isinstance(result.output_step_id, str):
actor = get_management_actor()
return WorkflowStaticRef.from_output(
result.output_step_id,
actor.get_output.remote(workflow_id, result.output_step_id),
)
raise ValueError(
f"Cannot load output from step id {step_id} "
f"in workflow {workflow_id}"
)
return WorkflowStaticRef.from_output(
step_id,
load.remote(wf_store, workflow_id, step_id),
)
def get_running_workflow(self) -> List[str]:
return list(self._workflow_outputs.keys())
def report_failure(self, workflow_id: str) -> None:
"""Report the failure of a workflow_id.
Args:
workflow_id: The ID of the workflow.
"""
logger.error(f"Workflow job [id={workflow_id}] failed.")
self._workflow_outputs.pop(workflow_id, None)
def report_success(self, workflow_id: str) -> None:
"""Report the success of a workflow_id.
Args:
workflow_id: The ID of the workflow.
"""
# TODO(suquark): maybe we should not report success for every
# step of virtual actor writer?
logger.info(f"Workflow job [id={workflow_id}] succeeded.")
self._workflow_outputs.pop(workflow_id, None)
def ready(self) -> None:
"""A no-op to make sure the actor is ready."""
def init_management_actor() -> None:
"""Initialize WorkflowManagementActor"""
try:
get_management_actor()
except ValueError:
logger.info("Initializing workflow manager...")
# the actor does not exist
actor = WorkflowManagementActor.options(
name=common.MANAGEMENT_ACTOR_NAME,
namespace=common.MANAGEMENT_ACTOR_NAMESPACE,
lifetime="detached",
).remote()
# No-op to ensure the actor is created before the driver exits.
ray.get(actor.ready.remote())
def get_management_actor() -> "ActorHandle":
return ray.get_actor(
common.MANAGEMENT_ACTOR_NAME, namespace=common.MANAGEMENT_ACTOR_NAMESPACE
)
def get_or_create_management_actor() -> "ActorHandle":
"""Get or create WorkflowManagementActor"""
# TODO(suquark): We should not get the actor everytime. We also need to
# resume the actor if it failed. Using a global variable to cache the
# actor seems not enough to resume the actor, because there is no
# aliveness detection for an actor.
try:
workflow_manager = get_management_actor()
except ValueError:
# the actor does not exist
logger.warning(
"Cannot access workflow manager. It could be because "
"the workflow manager exited unexpectedly. A new "
"workflow manager is being created."
)
workflow_manager = WorkflowManagementActor.options(
name=common.MANAGEMENT_ACTOR_NAME,
namespace=common.MANAGEMENT_ACTOR_NAMESPACE,
lifetime="detached",
).remote()
return workflow_manager
```
#### File: algorithms/appo/utils.py
```python
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2
POLICY_SCOPE = "func"
TARGET_POLICY_SCOPE = "target_func"
def make_appo_models(policy) -> ModelV2:
"""Builds model and target model for APPO.
Returns:
ModelV2: The Model for the Policy to use.
Note: The target model will not be returned, just assigned to
`policy.target_model`.
"""
# Get the num_outputs for the following model construction calls.
_, logit_dim = ModelCatalog.get_action_dist(
policy.action_space, policy.config["model"]
)
# Construct the (main) model.
policy.model = ModelCatalog.get_model_v2(
policy.observation_space,
policy.action_space,
logit_dim,
policy.config["model"],
name=POLICY_SCOPE,
framework=policy.framework,
)
policy.model_variables = policy.model.variables()
# Construct the target model.
policy.target_model = ModelCatalog.get_model_v2(
policy.observation_space,
policy.action_space,
logit_dim,
policy.config["model"],
name=TARGET_POLICY_SCOPE,
framework=policy.framework,
)
policy.target_model_variables = policy.target_model.variables()
# Return only the model (not the target model).
return policy.model
```
#### File: algorithms/bandit/bandit_torch_policy.py
```python
import logging
import time
from gym import spaces
import ray
from ray.rllib.algorithms.bandit.bandit_torch_model import (
DiscreteLinearModelThompsonSampling,
DiscreteLinearModelUCB,
DiscreteLinearModel,
ParametricLinearModelThompsonSampling,
ParametricLinearModelUCB,
)
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.modelv2 import restore_original_dimensions
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.metrics.learner_info import LEARNER_STATS_KEY
from ray.util.debug import log_once
from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2
logger = logging.getLogger(__name__)
class BanditTorchPolicy(TorchPolicyV2):
def __init__(self, observation_space, action_space, config):
config = dict(ray.rllib.algorithms.bandit.bandit.DEFAULT_CONFIG, **config)
TorchPolicyV2.__init__(
self,
observation_space,
action_space,
config,
max_seq_len=config["model"]["max_seq_len"],
)
self.regrets = []
@override(TorchPolicyV2)
def make_model_and_action_dist(self):
dist_class, logit_dim = ModelCatalog.get_action_dist(
self.action_space, self.config["model"], framework="torch"
)
model_cls = DiscreteLinearModel
if hasattr(self.observation_space, "original_space"):
original_space = self.observation_space.original_space
else:
original_space = self.observation_space
exploration_config = self.config.get("exploration_config")
# Model is dependent on exploration strategy because of its implicitness
# TODO: Have a separate model catalogue for bandits
if exploration_config:
if exploration_config["type"] == "ThompsonSampling":
if isinstance(original_space, spaces.Dict):
assert (
"item" in original_space.spaces
), "Cannot find 'item' key in observation space"
model_cls = ParametricLinearModelThompsonSampling
else:
model_cls = DiscreteLinearModelThompsonSampling
elif exploration_config["type"] == "UpperConfidenceBound":
if isinstance(original_space, spaces.Dict):
assert (
"item" in original_space.spaces
), "Cannot find 'item' key in observation space"
model_cls = ParametricLinearModelUCB
else:
model_cls = DiscreteLinearModelUCB
model = model_cls(
self.observation_space,
self.action_space,
logit_dim,
self.config["model"],
name="LinearModel",
)
return model, dist_class
@override(TorchPolicyV2)
def learn_on_batch(self, postprocessed_batch):
train_batch = self._lazy_tensor_dict(postprocessed_batch)
unflattened_obs = restore_original_dimensions(
train_batch[SampleBatch.CUR_OBS], self.observation_space, self.framework
)
info = {}
start = time.time()
self.model.partial_fit(
unflattened_obs,
train_batch[SampleBatch.REWARDS],
train_batch[SampleBatch.ACTIONS],
)
infos = postprocessed_batch["infos"]
if "regret" in infos[0]:
regret = sum(row["infos"]["regret"] for row in postprocessed_batch.rows())
self.regrets.append(regret)
info["cumulative_regret"] = sum(self.regrets)
else:
if log_once("no_regrets"):
logger.warning(
"The env did not report `regret` values in "
"its `info` return, ignoring."
)
info["update_latency"] = time.time() - start
return {LEARNER_STATS_KEY: info}
```
#### File: algorithms/maddpg/maddpg.py
```python
import logging
from typing import List, Optional, Type
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
from ray.rllib.algorithms.dqn.dqn import DQN
from ray.rllib.algorithms.maddpg.maddpg_tf_policy import MADDPGTFPolicy
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.utils.annotations import Deprecated, override
from ray.rllib.utils.typing import AlgorithmConfigDict
from ray.rllib.utils.deprecation import DEPRECATED_VALUE
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MADDPGConfig(AlgorithmConfig):
"""Defines a configuration class from which a MADDPG Algorithm can be built.
Example:
>>> from ray.rllib.algorithms.maddpg.maddpg import MADDPGConfig
>>> config = MADDPGConfig()
>>> print(config.replay_buffer_config)
>>> replay_config = config.replay_buffer_config.update(
>>> {
>>> "capacity": 100000,
>>> "prioritized_replay_alpha": 0.8,
>>> "prioritized_replay_beta": 0.45,
>>> "prioritized_replay_eps": 2e-6,
>>> }
>>> )
>>> config.training(replay_buffer_config=replay_config)\
>>> .resources(num_gpus=0)\
>>> .rollouts(num_rollout_workers=4)\
>>> .environment("CartPole-v1")
>>> algo = config.build()
>>> while True:
>>> algo.train()
Example:
>>> from ray.rllib.algorithms.maddpg.maddpg import MADDPGConfig
>>> from ray import tune
>>> config = MADDPGConfig()
>>> config.training(n_step=tune.grid_search([3, 5]))
>>> config.environment(env="CartPole-v1")
>>> tune.run(
>>> "MADDPG",
>>> stop={"episode_reward_mean":200},
>>> config=config.to_dict()
>>> )
"""
def __init__(self, algo_class=None):
"""Initializes a DQNConfig instance."""
super().__init__(algo_class=algo_class or MADDPG)
# fmt: off
# __sphinx_doc_begin__
# MADDPG specific config settings:
self.agent_id = None
self.use_local_critic = False
self.use_state_preprocessor = False
self.actor_hiddens = [64, 64]
self.actor_hidden_activation = "relu"
self.critic_hiddens = [64, 64]
self.critic_hidden_activation = "relu"
self.n_step = 1
self.good_policy = "maddpg"
self.adv_policy = "maddpg"
self.replay_buffer_config = {
"type": "MultiAgentReplayBuffer",
# Specify prioritized replay by supplying a buffer type that supports
# prioritization, for example: MultiAgentPrioritizedReplayBuffer.
"prioritized_replay": DEPRECATED_VALUE,
"capacity": int(1e6),
# How many steps of the model to sample before learning starts.
"learning_starts": 1024 * 25,
# Force lockstep replay mode for MADDPG.
"replay_mode": "lockstep",
}
self.training_intensity = None
self.critic_lr = 1e-2
self.actor_lr = 1e-2
self.target_network_update_freq = 0
self.tau = 0.01
self.actor_feature_reg = 0.001
self.grad_norm_clipping = 0.5
# Changes to Algorithm's default:
self.rollout_fragment_length = 100
self.train_batch_size = 1024
self.num_workers = 1
self.min_time_s_per_iteration = 0
# fmt: on
# __sphinx_doc_end__
@override(AlgorithmConfig)
def training(
self,
*,
agent_id: Optional[str] = None,
use_local_critic: Optional[bool] = None,
use_state_preprocessor: Optional[bool] = None,
actor_hiddens: Optional[List[int]] = None,
actor_hidden_activation: Optional[str] = None,
critic_hiddens: Optional[List[int]] = None,
critic_hidden_activation: Optional[str] = None,
n_step: Optional[int] = None,
good_policy: Optional[str] = None,
adv_policy: Optional[str] = None,
replay_buffer_config: Optional[dict] = None,
training_intensity: Optional[float] = None,
critic_lr: Optional[float] = None,
actor_lr: Optional[float] = None,
target_network_update_freq: Optional[int] = None,
tau: Optional[float] = None,
actor_feature_reg: Optional[float] = None,
grad_norm_clipping: Optional[float] = None,
**kwargs,
) -> "MADDPGConfig":
"""Sets the training related configuration.
Args:
agent_id: ID of the agent controlled by this policy.
use_local_critic: Use a local critic for this policy.
use_state_preprocessor: Apply a state preprocessor with spec given by the
"model" config option (like other RL algorithms). This is mostly useful
if you have a weird observation shape, like an image. Disabled by
default.
actor_hiddens: Postprocess the policy network model output with these hidden
layers. If `use_state_preprocessor` is False, then these will be the
*only* hidden layers in the network.
actor_hidden_activation: Hidden layers activation of the postprocessing
stage of the policy network.
critic_hiddens: Postprocess the critic network model output with these
hidden layers; again, if use_state_preprocessor is True, then the state
will be preprocessed by the model specified with the "model" config
option first.
critic_hidden_activation: Hidden layers activation of the postprocessing
state of the critic.
n_step: N-step for Q-learning.
good_policy: Algorithm for good policies.
adv_policy: Algorithm for adversary policies.
replay_buffer_config: Replay buffer config.
Examples:
{
"_enable_replay_buffer_api": True,
"type": "MultiAgentReplayBuffer",
"learning_starts": 1000,
"capacity": 50000,
"replay_sequence_length": 1,
}
- OR -
{
"_enable_replay_buffer_api": True,
"type": "MultiAgentPrioritizedReplayBuffer",
"capacity": 50000,
"prioritized_replay_alpha": 0.6,
"prioritized_replay_beta": 0.4,
"prioritized_replay_eps": 1e-6,
"replay_sequence_length": 1,
}
- Where -
prioritized_replay_alpha: Alpha parameter controls the degree of
prioritization in the buffer. In other words, when a buffer sample has
a higher temporal-difference error, with how much more probability
should it drawn to use to update the parametrized Q-network. 0.0
corresponds to uniform probability. Setting much above 1.0 may quickly
result as the sampling distribution could become heavily “pointy” with
low entropy.
prioritized_replay_beta: Beta parameter controls the degree of
importance sampling which suppresses the influence of gradient updates
from samples that have higher probability of being sampled via alpha
parameter and the temporal-difference error.
prioritized_replay_eps: Epsilon parameter sets the baseline probability
for sampling so that when the temporal-difference error of a sample is
zero, there is still a chance of drawing the sample.
training_intensity: If set, this will fix the ratio of replayed from a
buffer and learned on timesteps to sampled from an environment and
stored in the replay buffer timesteps. Otherwise, the replay will
proceed at the native ratio determined by
`(train_batch_size / rollout_fragment_length)`.
critic_lr: Learning rate for the critic (Q-function) optimizer.
actor_lr: Learning rate for the actor (policy) optimizer.
target_network_update_freq: Update the target network every
`target_network_update_freq` sample steps.
tau: Update the target by \tau * policy + (1-\tau) * target_policy.
actor_feature_reg: Weights for feature regularization for the actor.
grad_norm_clipping: If not None, clip gradients during optimization at this
value.
Returns:
This updated AlgorithmConfig object.
"""
# Pass kwargs onto super's `training()` method.
super().training(**kwargs)
if agent_id is not None:
self.agent_id = agent_id
if use_local_critic is not None:
self.use_local_critic = use_local_critic
if use_state_preprocessor is not None:
self.use_state_preprocessor = use_state_preprocessor
if actor_hiddens is not None:
self.actor_hiddens = actor_hiddens
if actor_hidden_activation is not None:
self.actor_hidden_activation = actor_hidden_activation
if critic_hiddens is not None:
self.critic_hiddens = critic_hiddens
if critic_hidden_activation is not None:
self.critic_hidden_activation = critic_hidden_activation
if n_step is not None:
self.n_step = n_step
if good_policy is not None:
self.good_policy = good_policy
if adv_policy is not None:
self.adv_policy = adv_policy
if replay_buffer_config is not None:
self.replay_buffer_config = replay_buffer_config
if training_intensity is not None:
self.training_intensity = training_intensity
if critic_lr is not None:
self.critic_lr = critic_lr
if actor_lr is not None:
self.actor_lr = actor_lr
if target_network_update_freq is not None:
self.target_network_update_freq = target_network_update_freq
if tau is not None:
self.tau = tau
if actor_feature_reg is not None:
self.actor_feature_reg = actor_feature_reg
if grad_norm_clipping is not None:
self.grad_norm_clipping = grad_norm_clipping
return self
def before_learn_on_batch(multi_agent_batch, policies, train_batch_size):
samples = {}
# Modify keys.
for pid, p in policies.items():
i = p.config["agent_id"]
keys = multi_agent_batch.policy_batches[pid].keys()
keys = ["_".join([k, str(i)]) for k in keys]
samples.update(dict(zip(keys, multi_agent_batch.policy_batches[pid].values())))
# Make ops and feed_dict to get "new_obs" from target action sampler.
new_obs_ph_n = [p.new_obs_ph for p in policies.values()]
new_obs_n = list()
for k, v in samples.items():
if "new_obs" in k:
new_obs_n.append(v)
for i, p in enumerate(policies.values()):
feed_dict = {new_obs_ph_n[i]: new_obs_n[i]}
new_act = p.get_session().run(p.target_act_sampler, feed_dict)
samples.update({"new_actions_%d" % i: new_act})
# Share samples among agents.
policy_batches = {pid: SampleBatch(samples) for pid in policies.keys()}
return MultiAgentBatch(policy_batches, train_batch_size)
class MADDPG(DQN):
@classmethod
@override(DQN)
def get_default_config(cls) -> AlgorithmConfigDict:
return MADDPGConfig().to_dict()
@override(DQN)
def validate_config(self, config: AlgorithmConfigDict) -> None:
"""Adds the `before_learn_on_batch` hook to the config.
This hook is called explicitly prior to TrainOneStep() in the execution
setups for DQN and APEX.
"""
# Call super's validation method.
super().validate_config(config)
def f(batch, workers, config):
policies = dict(
workers.local_worker().foreach_policy_to_train(lambda p, i: (i, p))
)
return before_learn_on_batch(batch, policies, config["train_batch_size"])
config["before_learn_on_batch"] = f
@override(DQN)
def get_default_policy_class(self, config: AlgorithmConfigDict) -> Type[Policy]:
return MADDPGTFPolicy
# Deprecated: Use ray.rllib.algorithms.maddpg.MADDPG instead!
class _deprecated_default_config(dict):
def __init__(self):
super().__init__(MADDPGConfig().to_dict())
@Deprecated(
old="ray.rllib.algorithms.maddpg.maddpg.DEFAULT_CONFIG",
new="ray.rllib.algorithms.maddpg.maddpg.MADDPGConfig(...)",
error=False,
)
def __getitem__(self, item):
return super().__getitem__(item)
DEFAULT_CONFIG = _deprecated_default_config()
```
#### File: algorithms/pg/pg_torch_policy.py
```python
import logging
from typing import Dict, List, Type, Union, Optional, Tuple
import ray
from ray.rllib.evaluation.episode import Episode
from ray.rllib.utils.typing import AgentID
from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.numpy import convert_to_numpy
from ray.rllib.algorithms.pg.utils import post_process_advantages
from ray.rllib.evaluation.postprocessing import Postprocessing
from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.typing import TensorType
torch, nn = try_import_torch()
logger = logging.getLogger(__name__)
class PGTorchPolicy(TorchPolicyV2):
"""PyTorch policy class used with PGTrainer."""
def __init__(self, observation_space, action_space, config):
config = dict(ray.rllib.algorithms.pg.PGConfig().to_dict(), **config)
TorchPolicyV2.__init__(
self,
observation_space,
action_space,
config,
max_seq_len=config["model"]["max_seq_len"],
)
# TODO: Don't require users to call this manually.
self._initialize_loss_from_dummy_batch()
@override(TorchPolicyV2)
def loss(
self,
model: ModelV2,
dist_class: Type[TorchDistributionWrapper],
train_batch: SampleBatch,
) -> Union[TensorType, List[TensorType]]:
"""The basic policy gradients loss function.
Calculates the vanilla policy gradient loss based on:
L = -E[ log(pi(a|s)) * A]
Args:
model: The Model to calculate the loss for.
dist_class: The action distr. class.
train_batch: The training data.
Returns:
Union[TensorType, List[TensorType]]: A single loss tensor or a list
of loss tensors.
"""
# Pass the training data through our model to get distribution parameters.
dist_inputs, _ = model(train_batch)
# Create an action distribution object.
action_dist = dist_class(dist_inputs, model)
# Calculate the vanilla PG loss based on:
# L = -E[ log(pi(a|s)) * A]
log_probs = action_dist.logp(train_batch[SampleBatch.ACTIONS])
# Final policy loss.
policy_loss = -torch.mean(log_probs * train_batch[Postprocessing.ADVANTAGES])
# Store values for stats function in model (tower), such that for
# multi-GPU, we do not override them during the parallel loss phase.
model.tower_stats["policy_loss"] = policy_loss
return policy_loss
@override(TorchPolicyV2)
def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]:
"""Returns the calculated loss in a stats dict.
Args:
policy: The Policy object.
train_batch: The data used for training.
Returns:
Dict[str, TensorType]: The stats dict.
"""
return convert_to_numpy(
{
"policy_loss": torch.mean(
torch.stack(self.get_tower_stats("policy_loss"))
),
}
)
@override(TorchPolicyV2)
def postprocess_trajectory(
self,
sample_batch: SampleBatch,
other_agent_batches: Optional[
Dict[AgentID, Tuple["Policy", SampleBatch]]
] = None,
episode: Optional["Episode"] = None,
) -> SampleBatch:
sample_batch = super().postprocess_trajectory(
sample_batch, other_agent_batches, episode
)
return post_process_advantages(self, sample_batch, other_agent_batches, episode)
```
#### File: algorithms/sac/rnnsac.py
```python
from typing import Type, Optional
from ray.rllib.algorithms.sac import (
SAC,
SACConfig,
)
from ray.rllib.algorithms.sac.rnnsac_torch_policy import RNNSACTorchPolicy
from ray.rllib.policy.policy import Policy
from ray.rllib.utils.annotations import override
from ray.rllib.utils.typing import AlgorithmConfigDict
from ray.rllib.utils.deprecation import DEPRECATED_VALUE, Deprecated
class RNNSACConfig(SACConfig):
"""Defines a configuration class from which an RNNSAC can be built.
Example:
>>> config = RNNSACConfig().training(gamma=0.9, lr=0.01)\
... .resources(num_gpus=0)\
... .rollouts(num_rollout_workers=4)
>>> print(config.to_dict())
>>> # Build a Algorithm object from the config and run 1 training iteration.
>>> algo = config.build(env="CartPole-v1")
>>> algo.train()
"""
def __init__(self, algo_class=None):
super().__init__(algo_class=algo_class or RNNSAC)
# fmt: off
# __sphinx_doc_begin__
self.batch_mode = "complete_episodes"
self.zero_init_states = True
self.replay_buffer_config = {
# This algorithm learns on sequences. We therefore require the replay buffer
# to slice sampled batches into sequences before replay. How sequences
# are sliced depends on the parameters `replay_sequence_length`,
# `replay_burn_in`, and `replay_zero_init_states`.
"storage_unit": "sequences",
# If > 0, use the `burn_in` first steps of each replay-sampled sequence
# (starting either from all 0.0-values if `zero_init_state=True` or
# from the already stored values) to calculate an even more accurate
# initial states for the actual sequence (starting after this burn-in
# window). In the burn-in case, the actual length of the sequence
# used for loss calculation is `n - burn_in` time steps
# (n=LSTM’s/attention net’s max_seq_len).
"replay_burn_in": 0,
# Set automatically: The number of contiguous environment steps to
# replay at once. Will be calculated via
# model->max_seq_len + burn_in.
# Do not set this to any valid value!
"replay_sequence_length": -1,
},
self.burn_in = DEPRECATED_VALUE
# fmt: on
# __sphinx_doc_end__
@override(SACConfig)
def training(
self,
*,
zero_init_states: Optional[bool] = None,
**kwargs,
) -> "RNNSACConfig":
"""Sets the training related configuration.
Args:
zero_init_states: If True, assume a zero-initialized state input (no matter
where in the episode the sequence is located).
If False, store the initial states along with each SampleBatch, use
it (as initial state when running through the network for training),
and update that initial state during training (from the internal
state outputs of the immediately preceding sequence).
Returns:
This updated AlgorithmConfig object.
"""
super().training(**kwargs)
if zero_init_states is not None:
self.zero_init_states = zero_init_states
return self
class RNNSAC(SAC):
@classmethod
@override(SAC)
def get_default_config(cls) -> AlgorithmConfigDict:
return RNNSACConfig().to_dict()
@override(SAC)
def validate_config(self, config: AlgorithmConfigDict) -> None:
# Call super's validation method.
super().validate_config(config)
# Add the `burn_in` to the Model's max_seq_len.
replay_sequence_length = (
config["replay_buffer_config"]["replay_burn_in"]
+ config["model"]["max_seq_len"]
)
# Check if user tries to set replay_sequence_length (to anything
# other than the proper value)
if config["replay_buffer_config"].get("replay_sequence_length", None) not in [
None,
-1,
replay_sequence_length,
]:
raise ValueError(
"`replay_sequence_length` is calculated automatically to be "
"config['model']['max_seq_len'] + config['burn_in']. Leave "
"config['replay_sequence_length'] blank to avoid this error."
)
# Set the replay sequence length to the max_seq_len of the model.
config["replay_buffer_config"][
"replay_sequence_length"
] = replay_sequence_length
if config["framework"] != "torch":
raise ValueError("Only `framework=torch` supported so far for RNNSAC!")
@override(SAC)
def get_default_policy_class(self, config: AlgorithmConfigDict) -> Type[Policy]:
return RNNSACTorchPolicy
class _deprecated_default_config(dict):
def __init__(self):
super().__init__(RNNSACConfig().to_dict())
@Deprecated(
old="ray.rllib.algorithms.sac.rnnsac.DEFAULT_CONFIG",
new="ray.rllib.algorithms.sac.rnnsac.RNNSACConfig(...)",
error=False,
)
def __getitem__(self, item):
return super().__getitem__(item)
DEFAULT_CONFIG = _deprecated_default_config()
```
#### File: algorithms/sac/sac_tf_policy.py
```python
import copy
import gym
from gym.spaces import Box, Discrete
from functools import partial
import logging
from typing import Dict, List, Optional, Tuple, Type, Union
import ray
import ray.experimental.tf_utils
from ray.rllib.algorithms.ddpg.ddpg_tf_policy import (
ComputeTDErrorMixin,
TargetNetworkMixin,
)
from ray.rllib.algorithms.dqn.dqn_tf_policy import (
postprocess_nstep_and_prio,
PRIO_WEIGHTS,
)
from ray.rllib.algorithms.sac.sac_tf_model import SACTFModel
from ray.rllib.algorithms.sac.sac_torch_model import SACTorchModel
from ray.rllib.evaluation.episode import Episode
from ray.rllib.models import ModelCatalog, MODEL_DEFAULTS
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import (
Beta,
Categorical,
DiagGaussian,
Dirichlet,
SquashedGaussian,
TFActionDistribution,
)
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.framework import get_variable, try_import_tf
from ray.rllib.utils.spaces.simplex import Simplex
from ray.rllib.utils.tf_utils import huber_loss
from ray.rllib.utils.typing import (
AgentID,
LocalOptimizer,
ModelGradients,
TensorType,
AlgorithmConfigDict,
)
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
def build_sac_model(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> ModelV2:
"""Constructs the necessary ModelV2 for the Policy and returns it.
Args:
policy: The TFPolicy that will use the models.
obs_space (gym.spaces.Space): The observation space.
action_space (gym.spaces.Space): The action space.
config: The SAC trainer's config dict.
Returns:
ModelV2: The ModelV2 to be used by the Policy. Note: An additional
target model will be created in this function and assigned to
`policy.target_model`.
"""
# Force-ignore any additionally provided hidden layer sizes.
# Everything should be configured using SAC's `q_model_config` and
# `policy_model_config` config settings.
policy_model_config = copy.deepcopy(MODEL_DEFAULTS)
policy_model_config.update(config["policy_model_config"])
q_model_config = copy.deepcopy(MODEL_DEFAULTS)
q_model_config.update(config["q_model_config"])
default_model_cls = SACTorchModel if config["framework"] == "torch" else SACTFModel
model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=None,
model_config=config["model"],
framework=config["framework"],
default_model=default_model_cls,
name="sac_model",
policy_model_config=policy_model_config,
q_model_config=q_model_config,
twin_q=config["twin_q"],
initial_alpha=config["initial_alpha"],
target_entropy=config["target_entropy"],
)
assert isinstance(model, default_model_cls)
# Create an exact copy of the model and store it in `policy.target_model`.
# This will be used for tau-synched Q-target models that run behind the
# actual Q-networks and are used for target q-value calculations in the
# loss terms.
policy.target_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=None,
model_config=config["model"],
framework=config["framework"],
default_model=default_model_cls,
name="target_sac_model",
policy_model_config=policy_model_config,
q_model_config=q_model_config,
twin_q=config["twin_q"],
initial_alpha=config["initial_alpha"],
target_entropy=config["target_entropy"],
)
assert isinstance(policy.target_model, default_model_cls)
return model
def postprocess_trajectory(
policy: Policy,
sample_batch: SampleBatch,
other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None,
episode: Optional[Episode] = None,
) -> SampleBatch:
"""Postprocesses a trajectory and returns the processed trajectory.
The trajectory contains only data from one episode and from one agent.
- If `config.batch_mode=truncate_episodes` (default), sample_batch may
contain a truncated (at-the-end) episode, in case the
`config.rollout_fragment_length` was reached by the sampler.
- If `config.batch_mode=complete_episodes`, sample_batch will contain
exactly one episode (no matter how long).
New columns can be added to sample_batch and existing ones may be altered.
Args:
policy: The Policy used to generate the trajectory
(`sample_batch`)
sample_batch: The SampleBatch to postprocess.
other_agent_batches (Optional[Dict[AgentID, SampleBatch]]): Optional
dict of AgentIDs mapping to other agents' trajectory data (from the
same episode). NOTE: The other agents use the same policy.
episode (Optional[Episode]): Optional multi-agent episode
object in which the agents operated.
Returns:
SampleBatch: The postprocessed, modified SampleBatch (or a new one).
"""
return postprocess_nstep_and_prio(policy, sample_batch)
def _get_dist_class(
policy: Policy, config: AlgorithmConfigDict, action_space: gym.spaces.Space
) -> Type[TFActionDistribution]:
"""Helper function to return a dist class based on config and action space.
Args:
policy: The policy for which to return the action
dist class.
config: The Algorithm's config dict.
action_space (gym.spaces.Space): The action space used.
Returns:
Type[TFActionDistribution]: A TF distribution class.
"""
if hasattr(policy, "dist_class") and policy.dist_class is not None:
return policy.dist_class
elif config["model"].get("custom_action_dist"):
action_dist_class, _ = ModelCatalog.get_action_dist(
action_space, config["model"], framework="tf"
)
return action_dist_class
elif isinstance(action_space, Discrete):
return Categorical
elif isinstance(action_space, Simplex):
return Dirichlet
else:
assert isinstance(action_space, Box)
if config["normalize_actions"]:
return SquashedGaussian if not config["_use_beta_distribution"] else Beta
else:
return DiagGaussian
def get_distribution_inputs_and_class(
policy: Policy,
model: ModelV2,
obs_batch: TensorType,
*,
explore: bool = True,
**kwargs
) -> Tuple[TensorType, Type[TFActionDistribution], List[TensorType]]:
"""The action distribution function to be used the algorithm.
An action distribution function is used to customize the choice of action
distribution class and the resulting action distribution inputs (to
parameterize the distribution object).
After parameterizing the distribution, a `sample()` call
will be made on it to generate actions.
Args:
policy: The Policy being queried for actions and calling this
function.
model: The SAC specific Model to use to generate the
distribution inputs (see sac_tf|torch_model.py). Must support the
`get_action_model_outputs` method.
obs_batch: The observations to be used as inputs to the
model.
explore: Whether to activate exploration or not.
Returns:
Tuple[TensorType, Type[TFActionDistribution], List[TensorType]]: The
dist inputs, dist class, and a list of internal state outputs
(in the RNN case).
"""
# Get base-model (forward) output (this should be a noop call).
forward_out, state_out = model(
SampleBatch(obs=obs_batch, _is_training=policy._get_is_training_placeholder()),
[],
None,
)
# Use the base output to get the policy outputs from the SAC model's
# policy components.
distribution_inputs, _ = model.get_action_model_outputs(forward_out)
# Get a distribution class to be used with the just calculated dist-inputs.
action_dist_class = _get_dist_class(policy, policy.config, policy.action_space)
return distribution_inputs, action_dist_class, state_out
def sac_actor_critic_loss(
policy: Policy,
model: ModelV2,
dist_class: Type[TFActionDistribution],
train_batch: SampleBatch,
) -> Union[TensorType, List[TensorType]]:
"""Constructs the loss for the Soft Actor Critic.
Args:
policy: The Policy to calculate the loss for.
model (ModelV2): The Model to calculate the loss for.
dist_class (Type[ActionDistribution]: The action distr. class.
train_batch: The training data.
Returns:
Union[TensorType, List[TensorType]]: A single loss tensor or a list
of loss tensors.
"""
# Should be True only for debugging purposes (e.g. test cases)!
deterministic = policy.config["_deterministic_loss"]
_is_training = policy._get_is_training_placeholder()
# Get the base model output from the train batch.
model_out_t, _ = model(
SampleBatch(obs=train_batch[SampleBatch.CUR_OBS], _is_training=_is_training),
[],
None,
)
# Get the base model output from the next observations in the train batch.
model_out_tp1, _ = model(
SampleBatch(obs=train_batch[SampleBatch.NEXT_OBS], _is_training=_is_training),
[],
None,
)
# Get the target model's base outputs from the next observations in the
# train batch.
target_model_out_tp1, _ = policy.target_model(
SampleBatch(obs=train_batch[SampleBatch.NEXT_OBS], _is_training=_is_training),
[],
None,
)
# Discrete actions case.
if model.discrete:
# Get all action probs directly from pi and form their logp.
action_dist_inputs_t, _ = model.get_action_model_outputs(model_out_t)
log_pis_t = tf.nn.log_softmax(action_dist_inputs_t, -1)
policy_t = tf.math.exp(log_pis_t)
action_dist_inputs_tp1, _ = model.get_action_model_outputs(model_out_tp1)
log_pis_tp1 = tf.nn.log_softmax(action_dist_inputs_tp1, -1)
policy_tp1 = tf.math.exp(log_pis_tp1)
# Q-values.
q_t, _ = model.get_q_values(model_out_t)
# Target Q-values.
q_tp1, _ = policy.target_model.get_q_values(target_model_out_tp1)
if policy.config["twin_q"]:
twin_q_t, _ = model.get_twin_q_values(model_out_t)
twin_q_tp1, _ = policy.target_model.get_twin_q_values(target_model_out_tp1)
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
q_tp1 -= model.alpha * log_pis_tp1
# Actually selected Q-values (from the actions batch).
one_hot = tf.one_hot(
train_batch[SampleBatch.ACTIONS], depth=q_t.shape.as_list()[-1]
)
q_t_selected = tf.reduce_sum(q_t * one_hot, axis=-1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.reduce_sum(twin_q_t * one_hot, axis=-1)
# Discrete case: "Best" means weighted by the policy (prob) outputs.
q_tp1_best = tf.reduce_sum(tf.multiply(policy_tp1, q_tp1), axis=-1)
q_tp1_best_masked = (
1.0 - tf.cast(train_batch[SampleBatch.DONES], tf.float32)
) * q_tp1_best
# Continuous actions case.
else:
# Sample simgle actions from distribution.
action_dist_class = _get_dist_class(policy, policy.config, policy.action_space)
action_dist_inputs_t, _ = model.get_action_model_outputs(model_out_t)
action_dist_t = action_dist_class(action_dist_inputs_t, policy.model)
policy_t = (
action_dist_t.sample()
if not deterministic
else action_dist_t.deterministic_sample()
)
log_pis_t = tf.expand_dims(action_dist_t.logp(policy_t), -1)
action_dist_inputs_tp1, _ = model.get_action_model_outputs(model_out_tp1)
action_dist_tp1 = action_dist_class(action_dist_inputs_tp1, policy.model)
policy_tp1 = (
action_dist_tp1.sample()
if not deterministic
else action_dist_tp1.deterministic_sample()
)
log_pis_tp1 = tf.expand_dims(action_dist_tp1.logp(policy_tp1), -1)
# Q-values for the actually selected actions.
q_t, _ = model.get_q_values(
model_out_t, tf.cast(train_batch[SampleBatch.ACTIONS], tf.float32)
)
if policy.config["twin_q"]:
twin_q_t, _ = model.get_twin_q_values(
model_out_t, tf.cast(train_batch[SampleBatch.ACTIONS], tf.float32)
)
# Q-values for current policy in given current state.
q_t_det_policy, _ = model.get_q_values(model_out_t, policy_t)
if policy.config["twin_q"]:
twin_q_t_det_policy, _ = model.get_twin_q_values(model_out_t, policy_t)
q_t_det_policy = tf.reduce_min(
(q_t_det_policy, twin_q_t_det_policy), axis=0
)
# target q network evaluation
q_tp1, _ = policy.target_model.get_q_values(target_model_out_tp1, policy_tp1)
if policy.config["twin_q"]:
twin_q_tp1, _ = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1
)
# Take min over both twin-NNs.
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 -= model.alpha * log_pis_tp1
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (
1.0 - tf.cast(train_batch[SampleBatch.DONES], tf.float32)
) * q_tp1_best
# Compute RHS of bellman equation for the Q-loss (critic(s)).
q_t_selected_target = tf.stop_gradient(
tf.cast(train_batch[SampleBatch.REWARDS], tf.float32)
+ policy.config["gamma"] ** policy.config["n_step"] * q_tp1_best_masked
)
# Compute the TD-error (potentially clipped).
base_td_error = tf.math.abs(q_t_selected - q_t_selected_target)
if policy.config["twin_q"]:
twin_td_error = tf.math.abs(twin_q_t_selected - q_t_selected_target)
td_error = 0.5 * (base_td_error + twin_td_error)
else:
td_error = base_td_error
# Calculate one or two critic losses (2 in the twin_q case).
prio_weights = tf.cast(train_batch[PRIO_WEIGHTS], tf.float32)
critic_loss = [tf.reduce_mean(prio_weights * huber_loss(base_td_error))]
if policy.config["twin_q"]:
critic_loss.append(tf.reduce_mean(prio_weights * huber_loss(twin_td_error)))
# Alpha- and actor losses.
# Note: In the papers, alpha is used directly, here we take the log.
# Discrete case: Multiply the action probs as weights with the original
# loss terms (no expectations needed).
if model.discrete:
alpha_loss = tf.reduce_mean(
tf.reduce_sum(
tf.multiply(
tf.stop_gradient(policy_t),
-model.log_alpha
* tf.stop_gradient(log_pis_t + model.target_entropy),
),
axis=-1,
)
)
actor_loss = tf.reduce_mean(
tf.reduce_sum(
tf.multiply(
# NOTE: No stop_grad around policy output here
# (compare with q_t_det_policy for continuous case).
policy_t,
model.alpha * log_pis_t - tf.stop_gradient(q_t),
),
axis=-1,
)
)
else:
alpha_loss = -tf.reduce_mean(
model.log_alpha * tf.stop_gradient(log_pis_t + model.target_entropy)
)
actor_loss = tf.reduce_mean(model.alpha * log_pis_t - q_t_det_policy)
# Save for stats function.
policy.policy_t = policy_t
policy.q_t = q_t
policy.td_error = td_error
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.alpha_loss = alpha_loss
policy.alpha_value = model.alpha
policy.target_entropy = model.target_entropy
# In a custom apply op we handle the losses separately, but return them
# combined in one loss here.
return actor_loss + tf.math.add_n(critic_loss) + alpha_loss
def compute_and_clip_gradients(
policy: Policy, optimizer: LocalOptimizer, loss: TensorType
) -> ModelGradients:
"""Gradients computing function (from loss tensor, using local optimizer).
Note: For SAC, optimizer and loss are ignored b/c we have 3
losses and 3 local optimizers (all stored in policy).
`optimizer` will be used, though, in the tf-eager case b/c it is then a
fake optimizer (OptimizerWrapper) object with a `tape` property to
generate a GradientTape object for gradient recording.
Args:
policy: The Policy object that generated the loss tensor and
that holds the given local optimizer.
optimizer: The tf (local) optimizer object to
calculate the gradients with.
loss: The loss tensor for which gradients should be
calculated.
Returns:
ModelGradients: List of the possibly clipped gradients- and variable
tuples.
"""
# Eager: Use GradientTape (which is a property of the `optimizer` object
# (an OptimizerWrapper): see rllib/policy/eager_tf_policy.py).
if policy.config["framework"] in ["tf2", "tfe"]:
tape = optimizer.tape
pol_weights = policy.model.policy_variables()
actor_grads_and_vars = list(
zip(tape.gradient(policy.actor_loss, pol_weights), pol_weights)
)
q_weights = policy.model.q_variables()
if policy.config["twin_q"]:
half_cutoff = len(q_weights) // 2
grads_1 = tape.gradient(policy.critic_loss[0], q_weights[:half_cutoff])
grads_2 = tape.gradient(policy.critic_loss[1], q_weights[half_cutoff:])
critic_grads_and_vars = list(zip(grads_1, q_weights[:half_cutoff])) + list(
zip(grads_2, q_weights[half_cutoff:])
)
else:
critic_grads_and_vars = list(
zip(tape.gradient(policy.critic_loss[0], q_weights), q_weights)
)
alpha_vars = [policy.model.log_alpha]
alpha_grads_and_vars = list(
zip(tape.gradient(policy.alpha_loss, alpha_vars), alpha_vars)
)
# Tf1.x: Use optimizer.compute_gradients()
else:
actor_grads_and_vars = policy._actor_optimizer.compute_gradients(
policy.actor_loss, var_list=policy.model.policy_variables()
)
q_weights = policy.model.q_variables()
if policy.config["twin_q"]:
half_cutoff = len(q_weights) // 2
base_q_optimizer, twin_q_optimizer = policy._critic_optimizer
critic_grads_and_vars = base_q_optimizer.compute_gradients(
policy.critic_loss[0], var_list=q_weights[:half_cutoff]
) + twin_q_optimizer.compute_gradients(
policy.critic_loss[1], var_list=q_weights[half_cutoff:]
)
else:
critic_grads_and_vars = policy._critic_optimizer[0].compute_gradients(
policy.critic_loss[0], var_list=q_weights
)
alpha_grads_and_vars = policy._alpha_optimizer.compute_gradients(
policy.alpha_loss, var_list=[policy.model.log_alpha]
)
# Clip if necessary.
if policy.config["grad_clip"]:
clip_func = partial(tf.clip_by_norm, clip_norm=policy.config["grad_clip"])
else:
clip_func = tf.identity
# Save grads and vars for later use in `build_apply_op`.
policy._actor_grads_and_vars = [
(clip_func(g), v) for (g, v) in actor_grads_and_vars if g is not None
]
policy._critic_grads_and_vars = [
(clip_func(g), v) for (g, v) in critic_grads_and_vars if g is not None
]
policy._alpha_grads_and_vars = [
(clip_func(g), v) for (g, v) in alpha_grads_and_vars if g is not None
]
grads_and_vars = (
policy._actor_grads_and_vars
+ policy._critic_grads_and_vars
+ policy._alpha_grads_and_vars
)
return grads_and_vars
def apply_gradients(
policy: Policy, optimizer: LocalOptimizer, grads_and_vars: ModelGradients
) -> Union["tf.Operation", None]:
"""Gradients applying function (from list of "grad_and_var" tuples).
Note: For SAC, optimizer and grads_and_vars are ignored b/c we have 3
losses and optimizers (stored in policy).
Args:
policy: The Policy object whose Model(s) the given gradients
should be applied to.
optimizer: The tf (local) optimizer object through
which to apply the gradients.
grads_and_vars: The list of grad_and_var tuples to
apply via the given optimizer.
Returns:
Union[tf.Operation, None]: The tf op to be used to run the apply
operation. None for eager mode.
"""
actor_apply_ops = policy._actor_optimizer.apply_gradients(
policy._actor_grads_and_vars
)
cgrads = policy._critic_grads_and_vars
half_cutoff = len(cgrads) // 2
if policy.config["twin_q"]:
critic_apply_ops = [
policy._critic_optimizer[0].apply_gradients(cgrads[:half_cutoff]),
policy._critic_optimizer[1].apply_gradients(cgrads[half_cutoff:]),
]
else:
critic_apply_ops = [policy._critic_optimizer[0].apply_gradients(cgrads)]
# Eager mode -> Just apply and return None.
if policy.config["framework"] in ["tf2", "tfe"]:
policy._alpha_optimizer.apply_gradients(policy._alpha_grads_and_vars)
return
# Tf static graph -> Return op.
else:
alpha_apply_ops = policy._alpha_optimizer.apply_gradients(
policy._alpha_grads_and_vars,
global_step=tf1.train.get_or_create_global_step(),
)
return tf.group([actor_apply_ops, alpha_apply_ops] + critic_apply_ops)
def stats(policy: Policy, train_batch: SampleBatch) -> Dict[str, TensorType]:
"""Stats function for SAC. Returns a dict with important loss stats.
Args:
policy: The Policy to generate stats for.
train_batch: The SampleBatch (already) used for training.
Returns:
Dict[str, TensorType]: The stats dict.
"""
return {
"mean_td_error": tf.reduce_mean(policy.td_error),
"actor_loss": tf.reduce_mean(policy.actor_loss),
"critic_loss": tf.reduce_mean(policy.critic_loss),
"alpha_loss": tf.reduce_mean(policy.alpha_loss),
"alpha_value": tf.reduce_mean(policy.alpha_value),
"target_entropy": tf.constant(policy.target_entropy),
"mean_q": tf.reduce_mean(policy.q_t),
"max_q": tf.reduce_max(policy.q_t),
"min_q": tf.reduce_min(policy.q_t),
}
class ActorCriticOptimizerMixin:
"""Mixin class to generate the necessary optimizers for actor-critic algos.
- Creates global step for counting the number of update operations.
- Creates separate optimizers for actor, critic, and alpha.
"""
def __init__(self, config):
# Eager mode.
if config["framework"] in ["tf2", "tfe"]:
self.global_step = get_variable(0, tf_name="global_step")
self._actor_optimizer = tf.keras.optimizers.Adam(
learning_rate=config["optimization"]["actor_learning_rate"]
)
self._critic_optimizer = [
tf.keras.optimizers.Adam(
learning_rate=config["optimization"]["critic_learning_rate"]
)
]
if config["twin_q"]:
self._critic_optimizer.append(
tf.keras.optimizers.Adam(
learning_rate=config["optimization"]["critic_learning_rate"]
)
)
self._alpha_optimizer = tf.keras.optimizers.Adam(
learning_rate=config["optimization"]["entropy_learning_rate"]
)
# Static graph mode.
else:
self.global_step = tf1.train.get_or_create_global_step()
self._actor_optimizer = tf1.train.AdamOptimizer(
learning_rate=config["optimization"]["actor_learning_rate"]
)
self._critic_optimizer = [
tf1.train.AdamOptimizer(
learning_rate=config["optimization"]["critic_learning_rate"]
)
]
if config["twin_q"]:
self._critic_optimizer.append(
tf1.train.AdamOptimizer(
learning_rate=config["optimization"]["critic_learning_rate"]
)
)
self._alpha_optimizer = tf1.train.AdamOptimizer(
learning_rate=config["optimization"]["entropy_learning_rate"]
)
def setup_early_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> None:
"""Call mixin classes' constructors before Policy's initialization.
Adds the necessary optimizers to the given Policy.
Args:
policy: The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config: The Policy's config.
"""
ActorCriticOptimizerMixin.__init__(policy, config)
def setup_mid_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> None:
"""Call mixin classes' constructors before Policy's loss initialization.
Adds the `compute_td_error` method to the given policy.
Calling `compute_td_error` with batch data will re-calculate the loss
on that batch AND return the per-batch-item TD-error for prioritized
replay buffer record weight updating (in case a prioritized replay buffer
is used).
Args:
policy: The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config: The Policy's config.
"""
ComputeTDErrorMixin.__init__(policy, sac_actor_critic_loss)
def setup_late_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> None:
"""Call mixin classes' constructors after Policy initialization.
Adds the `update_target` method to the given policy.
Calling `update_target` updates all target Q-networks' weights from their
respective "main" Q-metworks, based on tau (smooth, partial updating).
Args:
policy: The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config: The Policy's config.
"""
TargetNetworkMixin.__init__(policy, config)
def validate_spaces(
policy: Policy,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> None:
"""Validates the observation- and action spaces used for the Policy.
Args:
policy: The policy, whose spaces are being validated.
observation_space (gym.spaces.Space): The observation space to
validate.
action_space (gym.spaces.Space): The action space to validate.
config: The Policy's config dict.
Raises:
UnsupportedSpaceException: If one of the spaces is not supported.
"""
# Only support single Box or single Discrete spaces.
if not isinstance(action_space, (Box, Discrete, Simplex)):
raise UnsupportedSpaceException(
"Action space ({}) of {} is not supported for "
"SAC. Must be [Box|Discrete|Simplex].".format(action_space, policy)
)
# If Box, make sure it's a 1D vector space.
elif isinstance(action_space, (Box, Simplex)) and len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space ({}) of {} has multiple dimensions "
"{}. ".format(action_space, policy, action_space.shape)
+ "Consider reshaping this into a single dimension, "
"using a Tuple action space, or the multi-agent API."
)
# Build a child class of `DynamicTFPolicy`, given the custom functions defined
# above.
SACTFPolicy = build_tf_policy(
name="SACTFPolicy",
get_default_config=lambda: ray.rllib.algorithms.sac.sac.DEFAULT_CONFIG,
make_model=build_sac_model,
postprocess_fn=postprocess_trajectory,
action_distribution_fn=get_distribution_inputs_and_class,
loss_fn=sac_actor_critic_loss,
stats_fn=stats,
compute_gradients_fn=compute_and_clip_gradients,
apply_gradients_fn=apply_gradients,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.td_error},
mixins=[TargetNetworkMixin, ActorCriticOptimizerMixin, ComputeTDErrorMixin],
validate_spaces=validate_spaces,
before_init=setup_early_mixins,
before_loss_init=setup_mid_mixins,
after_init=setup_late_mixins,
)
```
#### File: algorithms/simple_q/simple_q_tf_policy.py
```python
import logging
from typing import Dict, List, Tuple, Type, Union
import ray
from ray.rllib.algorithms.simple_q.utils import make_q_models
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import Categorical, TFActionDistribution
from ray.rllib.policy.dynamic_tf_policy_v2 import DynamicTFPolicyV2
from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_mixins import TargetNetworkMixin, compute_gradients
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.tf_utils import huber_loss
from ray.rllib.utils.typing import (
LocalOptimizer,
ModelGradients,
TensorStructType,
TensorType,
)
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
# We need this builder function because we want to share the same
# custom logics between TF1 dynamic and TF2 eager policies.
def get_simple_q_tf_policy(
base: Type[Union[DynamicTFPolicyV2, EagerTFPolicyV2]]
) -> Type:
"""Construct a SimpleQTFPolicy inheriting either dynamic or eager base policies.
Args:
base: Base class for this policy. DynamicTFPolicyV2 or EagerTFPolicyV2.
Returns:
A TF Policy to be used with MAMLTrainer.
"""
class SimpleQTFPolicy(TargetNetworkMixin, base):
def __init__(
self,
obs_space,
action_space,
config,
existing_model=None,
existing_inputs=None,
):
# First thing first, enable eager execution if necessary.
base.enable_eager_execution_if_necessary()
config = dict(
ray.rllib.algorithms.simple_q.simple_q.SimpleQConfig().to_dict(),
**config,
)
# Initialize base class.
base.__init__(
self,
obs_space,
action_space,
config,
existing_inputs=existing_inputs,
existing_model=existing_model,
)
# Note: this is a bit ugly, but loss and optimizer initialization must
# happen after all the MixIns are initialized.
self.maybe_initialize_optimizer_and_loss()
TargetNetworkMixin.__init__(self, obs_space, action_space, config)
@override(base)
def make_model(self) -> ModelV2:
"""Builds Q-model and target Q-model for Simple Q learning."""
model, self.target_model = make_q_models(self)
return model
@override(base)
def action_distribution_fn(
self,
model: ModelV2,
*,
obs_batch: TensorType,
state_batches: TensorType,
**kwargs,
) -> Tuple[TensorType, type, List[TensorType]]:
# Compute the Q-values for each possible action, using our Q-value network.
q_vals = self._compute_q_values(self.model, obs_batch, is_training=False)
return q_vals, Categorical, state_batches
def xyz_compute_actions(
self,
*,
input_dict,
explore=True,
timestep=None,
episodes=None,
is_training=False,
**kwargs,
) -> Tuple[TensorStructType, List[TensorType], Dict[str, TensorStructType]]:
if timestep is None:
timestep = self.global_timestep
# Compute the Q-values for each possible action, using our Q-value network.
q_vals = self._compute_q_values(
self.model, input_dict[SampleBatch.OBS], is_training=is_training
)
# Use a Categorical distribution for the exploration component.
# This way, it may either sample storchastically (e.g. when using SoftQ)
# or deterministically/greedily (e.g. when using EpsilonGreedy).
distribution = Categorical(q_vals, self.model)
# Call the exploration component's `get_exploration_action` method to
# explore, if necessary.
actions, logp = self.exploration.get_exploration_action(
action_distribution=distribution, timestep=timestep, explore=explore
)
# Return (exploration) actions, state_outs (empty list), and extra outs.
return (
actions,
[],
{
"q_values": q_vals,
SampleBatch.ACTION_LOGP: logp,
SampleBatch.ACTION_PROB: tf.exp(logp),
SampleBatch.ACTION_DIST_INPUTS: q_vals,
},
)
@override(base)
def loss(
self,
model: Union[ModelV2, "tf.keras.Model"],
dist_class: Type[TFActionDistribution],
train_batch: SampleBatch,
) -> Union[TensorType, List[TensorType]]:
# q network evaluation
q_t = self._compute_q_values(self.model, train_batch[SampleBatch.CUR_OBS])
# target q network evalution
q_tp1 = self._compute_q_values(
self.target_model,
train_batch[SampleBatch.NEXT_OBS],
)
if not hasattr(self, "q_func_vars"):
self.q_func_vars = model.variables()
self.target_q_func_vars = self.target_model.variables()
# q scores for actions which we know were selected in the given state.
one_hot_selection = tf.one_hot(
tf.cast(train_batch[SampleBatch.ACTIONS], tf.int32), self.action_space.n
)
q_t_selected = tf.reduce_sum(q_t * one_hot_selection, 1)
# compute estimate of best possible value starting from state at t + 1
dones = tf.cast(train_batch[SampleBatch.DONES], tf.float32)
q_tp1_best_one_hot_selection = tf.one_hot(
tf.argmax(q_tp1, 1), self.action_space.n
)
q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_tp1_best_masked = (1.0 - dones) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = (
train_batch[SampleBatch.REWARDS]
+ self.config["gamma"] * q_tp1_best_masked
)
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
loss = tf.reduce_mean(huber_loss(td_error))
# save TD error as an attribute for outside access
self.td_error = td_error
return loss
@override(base)
def compute_gradients_fn(
self, optimizer: LocalOptimizer, loss: TensorType
) -> ModelGradients:
return compute_gradients(self, optimizer, loss)
@override(base)
def extra_learn_fetches_fn(self) -> Dict[str, TensorType]:
return {"td_error": self.td_error}
def _compute_q_values(
self, model: ModelV2, obs_batch: TensorType, is_training=None
) -> TensorType:
_is_training = (
is_training
if is_training is not None
else self._get_is_training_placeholder()
)
model_out, _ = model(
SampleBatch(obs=obs_batch, _is_training=_is_training), [], None
)
return model_out
return SimpleQTFPolicy
SimpleQTF1Policy = get_simple_q_tf_policy(DynamicTFPolicyV2)
SimpleQTF2Policy = get_simple_q_tf_policy(EagerTFPolicyV2)
```
#### File: algorithms/simple_q/utils.py
```python
import gym
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.utils.error import UnsupportedSpaceException
Q_SCOPE = "q_func"
Q_TARGET_SCOPE = "target_q_func"
def make_q_models(policy):
if not isinstance(policy.action_space, gym.spaces.Discrete):
raise UnsupportedSpaceException(
f"Action space {policy.action_space} is not supported for DQN."
)
model = ModelCatalog.get_model_v2(
obs_space=policy.observation_space,
action_space=policy.action_space,
num_outputs=policy.action_space.n,
model_config=policy.config["model"],
framework=policy.config["framework"],
name=Q_SCOPE,
)
target_model = ModelCatalog.get_model_v2(
obs_space=policy.observation_space,
action_space=policy.action_space,
num_outputs=policy.action_space.n,
model_config=policy.config["model"],
framework=policy.config["framework"],
name=Q_TARGET_SCOPE,
)
return model, target_model
```
#### File: algorithms/td3/td3.py
```python
from ray.rllib.algorithms.ddpg.ddpg import DDPG, DDPGConfig
from ray.rllib.utils.annotations import override
from ray.rllib.utils.deprecation import Deprecated
from ray.rllib.utils.typing import AlgorithmConfigDict
from ray.rllib.utils.deprecation import DEPRECATED_VALUE
class TD3Config(DDPGConfig):
"""Defines a configuration class from which a TD3 Algorithm can be built.
Example:
>>> from ray.rllib.algorithms.ddpg.td3 import TD3Config
>>> config = TD3Config().training(lr=0.01).resources(num_gpus=1)
>>> print(config.to_dict())
>>> # Build a Algorithm object from the config and run one training iteration.
>>> trainer = config.build(env="Pendulum-v1")
>>> trainer.train()
Example:
>>> from ray.rllib.algorithms.ddpg.td3 import TD3Config
>>> from ray import tune
>>> config = TD3Config()
>>> # Print out some default values.
>>> print(config.lr)
0.0004
>>> # Update the config object.
>>> config.training(lr=tune.grid_search([0.001, 0.0001]))
>>> # Set the config object's env.
>>> config.environment(env="Pendulum-v1")
>>> # Use to_dict() to get the old-style python config dict
>>> # when running with tune.
>>> tune.run(
... "TD3",
... stop={"episode_reward_mean": 200},
... config=config.to_dict(),
... )
"""
def __init__(self, algo_class=None):
"""Initializes a TD3Config instance."""
super().__init__(algo_class=algo_class or TD3)
# fmt: off
# __sphinx_doc_begin__
# Override some of DDPG/SimpleQ/Algorithm's default values with TD3-specific
# values.
# .training()
# largest changes: twin Q functions, delayed policy updates, target
# smoothing, no l2-regularization.
self.twin_q = True
self.policy_delay = 2
self.smooth_target_policy = True,
self.l2_reg = 0.0
# Different tau (affecting target network update).
self.tau = 5e-3
# Different batch size.
self.train_batch_size = 100
# No prioritized replay by default (we may want to change this at some
# point).
self.replay_buffer_config = {
"type": "MultiAgentReplayBuffer",
# Specify prioritized replay by supplying a buffer type that supports
# prioritization, for example: MultiAgentPrioritizedReplayBuffer.
"prioritized_replay": DEPRECATED_VALUE,
"capacity": 1000000,
"learning_starts": 10000,
"worker_side_prioritization": False,
}
# .exploration()
# TD3 uses Gaussian Noise by default.
self.exploration_config = {
# TD3 uses simple Gaussian noise on top of deterministic NN-output
# actions (after a possible pure random phase of n timesteps).
"type": "GaussianNoise",
# For how many timesteps should we return completely random
# actions, before we start adding (scaled) noise?
"random_timesteps": 10000,
# Gaussian stddev of action noise for exploration.
"stddev": 0.1,
# Scaling settings by which the Gaussian noise is scaled before
# being added to the actions. NOTE: The scale timesteps start only
# after(!) any random steps have been finished.
# By default, do not anneal over time (fixed 1.0).
"initial_scale": 1.0,
"final_scale": 1.0,
"scale_timesteps": 1,
}
# __sphinx_doc_end__
# fmt: on
class TD3(DDPG):
@classmethod
@override(DDPG)
def get_default_config(cls) -> AlgorithmConfigDict:
return TD3Config().to_dict()
# Deprecated: Use ray.rllib.algorithms.ddpg..td3.TD3Config instead!
class _deprecated_default_config(dict):
def __init__(self):
super().__init__(TD3Config().to_dict())
@Deprecated(
old="ray.rllib.algorithms.ddpg.td3::TD3_DEFAULT_CONFIG",
new="ray.rllib.algorithms.td3.td3::TD3Config(...)",
error=False,
)
def __getitem__(self, item):
return super().__getitem__(item)
TD3_DEFAULT_CONFIG = _deprecated_default_config()
```
#### File: connectors/action/lambdas.py
```python
from typing import Any, Callable, Dict, List, Type
from ray.rllib.connectors.connector import (
ConnectorContext,
ActionConnector,
register_connector,
)
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.numpy import convert_to_numpy
from ray.rllib.utils.spaces.space_utils import unbatch
from ray.rllib.utils.typing import (
ActionConnectorDataType,
PolicyOutputType,
StateBatches,
TensorStructType,
)
@DeveloperAPI
def register_lambda_action_connector(
name: str, fn: Callable[[TensorStructType, StateBatches, Dict], PolicyOutputType]
) -> Type[ActionConnector]:
"""A util to register any function transforming PolicyOutputType as an ActionConnector.
The only requirement is that fn should take actions, states, and fetches as input,
and return transformed actions, states, and fetches.
Args:
name: Name of the resulting actor connector.
fn: The function that transforms PolicyOutputType.
Returns:
A new ActionConnector class that transforms PolicyOutputType using fn.
"""
class LambdaActionConnector(ActionConnector):
def __call__(self, ac_data: ActionConnectorDataType) -> ActionConnectorDataType:
assert isinstance(
ac_data.output, tuple
), "Action connector requires PolicyOutputType data."
actions, states, fetches = ac_data.output
return ActionConnectorDataType(
ac_data.env_id,
ac_data.agent_id,
fn(actions, states, fetches),
)
def to_config(self):
return name, None
@staticmethod
def from_config(ctx: ConnectorContext, params: List[Any]):
return LambdaActionConnector(ctx)
LambdaActionConnector.__name__ = name
LambdaActionConnector.__qualname__ = name
register_connector(name, LambdaActionConnector)
return LambdaActionConnector
# Convert actions and states into numpy arrays if necessary.
ConvertToNumpyConnector = register_lambda_action_connector(
"ConvertToNumpyConnector",
lambda actions, states, fetches: (
convert_to_numpy(actions),
convert_to_numpy(states),
fetches,
),
)
# Split action-component batches into single action rows.
UnbatchActionsConnector = register_lambda_action_connector(
"UnbatchActionsConnector",
lambda actions, states, fetches: (unbatch(actions), states, fetches),
)
```
#### File: connectors/action/normalize.py
```python
from typing import Any, List
from ray.rllib.connectors.connector import (
ConnectorContext,
ActionConnector,
register_connector,
)
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.spaces.space_utils import (
get_base_struct_from_space,
unsquash_action,
)
from ray.rllib.utils.typing import ActionConnectorDataType
@DeveloperAPI
class NormalizeActionsConnector(ActionConnector):
def __init__(self, ctx: ConnectorContext):
super().__init__(ctx)
self._action_space_struct = get_base_struct_from_space(ctx.action_space)
def __call__(self, ac_data: ActionConnectorDataType) -> ActionConnectorDataType:
assert isinstance(
ac_data.output, tuple
), "Action connector requires PolicyOutputType data."
actions, states, fetches = ac_data.output
return ActionConnectorDataType(
ac_data.env_id,
ac_data.agent_id,
(unsquash_action(actions, self._action_space_struct), states, fetches),
)
def to_config(self):
return NormalizeActionsConnector.__name__, None
@staticmethod
def from_config(ctx: ConnectorContext, params: List[Any]):
return NormalizeActionsConnector(ctx)
register_connector(NormalizeActionsConnector.__name__, NormalizeActionsConnector)
```
#### File: connectors/agent/env_to_agent.py
```python
from typing import Any, List
from ray.rllib.connectors.connector import (
ConnectorContext,
AgentConnector,
register_connector,
)
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.typing import AgentConnectorDataType
@DeveloperAPI
class EnvToAgentDataConnector(AgentConnector):
"""Converts per environment multi-agent obs into per agent SampleBatches."""
def __init__(self, ctx: ConnectorContext):
super().__init__(ctx)
self._view_requirements = ctx.view_requirements
def __call__(self, ac_data: AgentConnectorDataType) -> List[AgentConnectorDataType]:
if ac_data.agent_id:
# data is already for a single agent.
return [ac_data]
assert isinstance(ac_data.data, (tuple, list)) and len(ac_data.data) == 5, (
"EnvToPerAgentDataConnector expects a tuple of "
+ "(obs, rewards, dones, infos, episode_infos)."
)
# episode_infos contains additional training related data bits
# for each agent, such as SampleBatch.T, SampleBatch.AGENT_INDEX,
# SampleBatch.ACTIONS, SampleBatch.DONES (if hitting horizon),
# and is usually empty in inference mode.
obs, rewards, dones, infos, training_episode_infos = ac_data.data
for var, name in zip(
(obs, rewards, dones, infos, training_episode_infos),
("obs", "rewards", "dones", "infos", "training_episode_infos"),
):
assert isinstance(var, dict), (
f"EnvToPerAgentDataConnector expects {name} "
+ "to be a MultiAgentDict."
)
env_id = ac_data.env_id
per_agent_data = []
for agent_id, obs in obs.items():
input_dict = {
SampleBatch.ENV_ID: env_id,
SampleBatch.REWARDS: rewards[agent_id],
# SampleBatch.DONES may be overridden by data from
# training_episode_infos next.
SampleBatch.DONES: dones[agent_id],
SampleBatch.NEXT_OBS: obs,
}
if SampleBatch.INFOS in self._view_requirements:
input_dict[SampleBatch.INFOS] = infos[agent_id]
if agent_id in training_episode_infos:
input_dict.update(training_episode_infos[agent_id])
per_agent_data.append(AgentConnectorDataType(env_id, agent_id, input_dict))
return per_agent_data
def to_config(self):
return EnvToAgentDataConnector.__name__, None
@staticmethod
def from_config(ctx: ConnectorContext, params: List[Any]):
return EnvToAgentDataConnector(ctx)
register_connector(EnvToAgentDataConnector.__name__, EnvToAgentDataConnector)
```
#### File: connectors/agent/state_buffer.py
```python
from collections import defaultdict
import numpy as np
import tree # dm_tree
from typing import Any, List
from ray.rllib.connectors.connector import (
ConnectorContext,
AgentConnector,
register_connector,
)
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.numpy import convert_to_numpy
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
from ray.rllib.utils.typing import (
AgentConnectorDataType,
PolicyOutputType,
)
@DeveloperAPI
class _AgentState(object):
def __init__(self):
self.t = 0
self.action = None
self.states = None
@DeveloperAPI
class StateBufferConnector(AgentConnector):
def __init__(self, ctx: ConnectorContext):
super().__init__(ctx)
self._initial_states = ctx.initial_states
self._action_space_struct = get_base_struct_from_space(ctx.action_space)
self._states = defaultdict(lambda: defaultdict(_AgentState))
def reset(self, env_id: str):
del self._states[env_id]
def on_policy_output(self, env_id: str, agent_id: str, output: PolicyOutputType):
# Buffer latest output states for next input __call__.
action, states, _ = output
agent_state = self._states[env_id][agent_id]
agent_state.action = convert_to_numpy(action)
agent_state.states = convert_to_numpy(states)
def __call__(
self, ctx: ConnectorContext, ac_data: AgentConnectorDataType
) -> List[AgentConnectorDataType]:
d = ac_data.data
assert (
type(d) == dict
), "Single agent data must be of type Dict[str, TensorStructType]"
env_id = ac_data.env_id
agent_id = ac_data.agent_id
assert env_id and agent_id, "StateBufferConnector requires env_id and agent_id"
agent_state = self._states[env_id][agent_id]
d.update(
{
SampleBatch.T: agent_state.t,
SampleBatch.ENV_ID: env_id,
}
)
if agent_state.states is not None:
states = agent_state.states
else:
states = self._initial_states
for i, v in enumerate(states):
d["state_out_{}".format(i)] = v
if agent_state.action is not None:
d[SampleBatch.ACTIONS] = agent_state.action # Last action
else:
# Default zero action.
d[SampleBatch.ACTIONS] = tree.map_structure(
lambda s: np.zeros_like(s.sample(), s.dtype)
if hasattr(s, "dtype")
else np.zeros_like(s.sample()),
self._action_space_struct,
)
agent_state.t += 1
return [ac_data]
def to_config(self):
return StateBufferConnector.__name__, None
@staticmethod
def from_config(ctx: ConnectorContext, params: List[Any]):
return StateBufferConnector(ctx)
register_connector(StateBufferConnector.__name__, StateBufferConnector)
```
#### File: rllib/connectors/connector.py
```python
import abc
import gym
import logging
from typing import Any, Dict, List, Tuple
from ray.tune.registry import RLLIB_CONNECTOR, _global_registry
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.view_requirement import ViewRequirement
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.typing import (
ActionConnectorDataType,
AgentConnectorDataType,
TensorType,
TrainerConfigDict,
)
logger = logging.getLogger(__name__)
@DeveloperAPI
class ConnectorContext:
"""Data bits that may be needed for running connectors.
Note(jungong) : we need to be really careful with the data fields here.
E.g., everything needs to be serializable, in case we need to fetch them
in a remote setting.
"""
# TODO(jungong) : figure out how to fetch these in a remote setting.
# Probably from a policy server when initializing a policy client.
def __init__(
self,
config: TrainerConfigDict = None,
model_initial_states: List[TensorType] = None,
observation_space: gym.Space = None,
action_space: gym.Space = None,
view_requirements: Dict[str, ViewRequirement] = None,
):
"""Construct a ConnectorContext instance.
Args:
model_initial_states: States that are used for constructing
the initial input dict for RNN models. [] if a model is not recurrent.
action_space_struct: a policy's action space, in python
data format. E.g., python dict instead of DictSpace, python tuple
instead of TupleSpace.
"""
self.config = config
self.initial_states = model_initial_states or []
self.observation_space = observation_space
self.action_space = action_space
self.view_requirements = view_requirements
@staticmethod
def from_policy(policy: Policy) -> "ConnectorContext":
"""Build ConnectorContext from a given policy.
Args:
policy: Policy
Returns:
A ConnectorContext instance.
"""
return ConnectorContext(
policy.config,
policy.get_initial_state(),
policy.observation_space,
policy.action_space,
policy.view_requirements,
)
@DeveloperAPI
class Connector(abc.ABC):
"""Connector base class.
A connector is a step of transformation, of either envrionment data before they
get to a policy, or policy output before it is sent back to the environment.
Connectors may be training-aware, for example, behave slightly differently
during training and inference.
All connectors are required to be serializable and implement to_config().
"""
def __init__(self, ctx: ConnectorContext):
# This gets flipped to False for inference.
self.is_training = True
def is_training(self, is_training: bool):
self.is_training = is_training
def to_config(self) -> Tuple[str, List[Any]]:
"""Serialize a connector into a JSON serializable Tuple.
to_config is required, so that all Connectors are serializable.
Returns:
A tuple of connector's name and its serialized states.
"""
# Must implement by each connector.
return NotImplementedError
@staticmethod
def from_config(self, ctx: ConnectorContext, params: List[Any]) -> "Connector":
"""De-serialize a JSON params back into a Connector.
from_config is required, so that all Connectors are serializable.
Args:
ctx: Context for constructing this connector.
params: Serialized states of the connector to be recovered.
Returns:
De-serialized connector.
"""
# Must implement by each connector.
return NotImplementedError
@DeveloperAPI
class AgentConnector(Connector):
"""Connector connecting user environments to RLlib policies.
An agent connector transforms a single piece of data in AgentConnectorDataType
format into a list of data in the same AgentConnectorDataTypes format.
The API is designed so multi-agent observations can be broken and emitted as
multiple single agent observations.
AgentConnectorDataTypes can be used to specify arbitrary type of env data,
Example:
.. code-block:: python
# A dict of multi-agent data from one env step() call.
ac = AgentConnectorDataType(
env_id="env_1",
agent_id=None,
data={
"agent_1": np.array(...),
"agent_2": np.array(...),
}
)
Example:
.. code-block:: python
# Single agent data ready to be preprocessed.
ac = AgentConnectorDataType(
env_id="env_1",
agent_id="agent_1",
data=np.array(...)
)
We can adapt a simple stateless function into an agent connector by using
register_lambda_agent_connector:
.. code-block:: python
TimesTwoAgentConnector = register_lambda_agent_connector(
"TimesTwoAgentConnector", lambda data: data * 2
)
More complicated agent connectors can be implemented by extending this
AgentConnector class:
Example:
.. code-block:: python
class FrameSkippingAgentConnector(AgentConnector):
def __init__(self, n):
self._n = n
self._frame_count = default_dict(str, default_dict(str, int))
def reset(self, env_id: str):
del self._frame_count[env_id]
def __call__(
self, ac_data: AgentConnectorDataType
) -> List[AgentConnectorDataType]:
assert ac_data.env_id and ac_data.agent_id, (
"Frame skipping works per agent")
count = self._frame_count[ac_data.env_id][ac_data.agent_id]
self._frame_count[ac_data.env_id][ac_data.agent_id] = count + 1
return [ac_data] if count % self._n == 0 else []
As shown, an agent connector may choose to emit an empty list to stop input
observations from being prosessed further.
"""
def reset(self, env_id: str):
"""Reset connector state for a specific environment.
For example, at the end of an episode.
Args:
env_id: required. ID of a user environment. Required.
"""
pass
def on_policy_output(self, output: ActionConnectorDataType):
"""Callback on agent connector of policy output.
This is useful for certain connectors, for example RNN state buffering,
where the agent connect needs to be aware of the output of a policy
forward pass.
Args:
ctx: Context for running this connector call.
output: Env and agent IDs, plus data output from policy forward pass.
"""
pass
def __call__(self, ac_data: AgentConnectorDataType) -> List[AgentConnectorDataType]:
"""Transform incoming data from environment before they reach policy.
Args:
ctx: Context for running this connector call.
data: Env and agent IDs, plus arbitrary data from an environment or
upstream agent connectors.
Returns:
A list of transformed data in AgentConnectorDataType format.
The return type is a list because an AgentConnector may choose to
derive multiple outputs for a single input data, for example
multi-agent obs -> multiple single agent obs.
Agent connectors may also return an empty list for certain input,
useful for connectors such as frame skipping.
"""
raise NotImplementedError
@DeveloperAPI
class ActionConnector(Connector):
"""Action connector connects policy outputs including actions,
to user environments.
An action connector transforms a single piece of policy output in
ActionConnectorDataType format, which is basically PolicyOutputType
plus env and agent IDs.
Any functions that operates directly on PolicyOutputType can be
easily adpated into an ActionConnector by using register_lambda_action_connector.
Example:
.. code-block:: python
ZeroActionConnector = register_lambda_action_connector(
"ZeroActionsConnector",
lambda actions, states, fetches: (
np.zeros_like(actions), states, fetches
)
)
More complicated action connectors can also be implemented by sub-classing
this ActionConnector class.
"""
def __call__(self, ac_data: ActionConnectorDataType) -> ActionConnectorDataType:
"""Transform policy output before they are sent to a user environment.
Args:
ctx: Context for running this connector call.
ac_data: Env and agent IDs, plus policy output.
Returns:
The processed action connector data.
"""
raise NotImplementedError
@DeveloperAPI
class ConnectorPipeline:
"""Utility class for quick manipulation of a connector pipeline."""
def remove(self, name: str):
"""Remove a connector by <name>
Args:
name: name of the connector to be removed.
"""
idx = -1
for idx, c in enumerate(self.connectors):
if c.__class__.__name__ == name:
break
if idx < 0:
raise ValueError(f"Can not find connector {name}")
del self.connectors[idx]
def insert_before(self, name: str, connector: Connector):
"""Insert a new connector before connector <name>
Args:
name: name of the connector before which a new connector
will get inserted.
connector: a new connector to be inserted.
"""
idx = -1
for idx, c in enumerate(self.connectors):
if c.__class__.__name__ == name:
break
if idx < 0:
raise ValueError(f"Can not find connector {name}")
self.connectors.insert(idx, connector)
def insert_after(self, name: str, connector: Connector):
"""Insert a new connector after connector <name>
Args:
name: name of the connector after which a new connector
will get inserted.
connector: a new connector to be inserted.
"""
idx = -1
for idx, c in enumerate(self.connectors):
if c.__class__.__name__ == name:
break
if idx < 0:
raise ValueError(f"Can not find connector {name}")
self.connectors.insert(idx + 1, connector)
def prepend(self, connector: Connector):
"""Append a new connector at the beginning of a connector pipeline.
Args:
connector: a new connector to be appended.
"""
self.connectors.insert(0, connector)
def append(self, connector: Connector):
"""Append a new connector at the end of a connector pipeline.
Args:
connector: a new connector to be appended.
"""
self.connectors.append(connector)
@DeveloperAPI
def register_connector(name: str, cls: Connector):
"""Register a connector for use with RLlib.
Args:
name: Name to register.
cls: Callable that creates an env.
"""
if not issubclass(cls, Connector):
raise TypeError("Can only register Connector type.", cls)
_global_registry.register(RLLIB_CONNECTOR, name, cls)
@DeveloperAPI
def get_connector(ctx: ConnectorContext, name: str, params: Tuple[Any]) -> Connector:
"""Get a connector by its name and serialized config.
Args:
name: name of the connector.
params: serialized parameters of the connector.
Returns:
Constructed connector.
"""
if not _global_registry.contains(RLLIB_CONNECTOR, name):
raise NameError("connector not found.", name)
cls = _global_registry.get(RLLIB_CONNECTOR, name)
return cls.from_config(ctx, params)
```
#### File: rllib/connectors/util.py
```python
from ray.rllib.connectors.connector import (
Connector,
get_connector,
)
from typing import Dict
def get_connectors_from_cfg(config: dict) -> Dict[str, Connector]:
return {k: get_connector(*v) for k, v in config.items()}
```
#### File: evaluation/tests/test_envs_that_crash.py
```python
import unittest
import ray
from ray.rllib.algorithms.pg import pg
from ray.rllib.examples.env.cartpole_crashing import CartPoleCrashing
from ray.rllib.utils.error import EnvError
class TestEnvsThatCrash(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init(num_cpus=4)
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_crash_during_env_pre_checking(self):
"""Expect the env pre-checking to fail on each worker."""
config = (
pg.PGConfig()
.rollouts(num_rollout_workers=2, num_envs_per_worker=4)
.environment(
env=CartPoleCrashing,
env_config={
# Crash prob=100% (during pre-checking's `step()` test calls).
"p_crash": 1.0,
"init_time_s": 0.5,
},
)
)
# Expect ValueError due to pre-checking failing (our pre-checker module
# raises a ValueError if `step()` fails).
self.assertRaisesRegex(
ValueError,
"Simulated env crash!",
lambda: config.build(),
)
def test_crash_during_sampling(self):
"""Expect some sub-envs to fail (and not recover)."""
config = (
pg.PGConfig()
.rollouts(num_rollout_workers=2, num_envs_per_worker=3)
.environment(
env=CartPoleCrashing,
env_config={
# Crash prob=20%.
"p_crash": 0.2,
"init_time_s": 0.3,
# Make sure nothing happens during pre-checks.
"skip_env_checking": True,
},
)
)
# Pre-checking disables, so building the Algorithm is save.
algo = config.build()
# Expect EnvError due to the sub-env(s) crashing on the different workers
# and `ignore_worker_failures=False` (so the original EnvError should
# just be bubbled up by RLlib Algorithm and tune.Trainable during the `step()`
# call).
self.assertRaisesRegex(EnvError, "Simulated env crash!", lambda: algo.train())
def test_crash_only_one_worker_during_sampling_but_ignore(self):
"""Expect some sub-envs to fail (and not recover), but ignore."""
config = (
pg.PGConfig()
.rollouts(
num_rollout_workers=2,
num_envs_per_worker=3,
# Ignore worker failures (continue with worker #2).
ignore_worker_failures=True,
)
.environment(
env=CartPoleCrashing,
env_config={
# Crash prob=80%.
"p_crash": 0.8,
# Only crash on worker with index 1.
"crash_on_worker_indices": [1],
# Make sure nothing happens during pre-checks.
"skip_env_checking": True,
},
)
)
# Pre-checking disables, so building the Algorithm is save.
algo = config.build()
# Expect some errors being logged here, but in general, should continue
# as we ignore worker failures.
algo.train()
# One worker has been removed -> Only one left.
self.assertTrue(len(algo.workers.remote_workers()) == 1)
algo.stop()
def test_crash_only_one_worker_during_sampling_but_recreate(self):
"""Expect some sub-envs to fail (and not recover), but re-create worker."""
config = (
pg.PGConfig()
.rollouts(
num_rollout_workers=2,
rollout_fragment_length=10,
num_envs_per_worker=3,
# Re-create failed workers (then continue).
recreate_failed_workers=True,
)
.training(train_batch_size=20)
.environment(
env=CartPoleCrashing,
env_config={
# Crash prob=1%.
"p_crash": 0.01,
# Only crash on worker with index 2.
"crash_on_worker_indices": [2],
# Make sure nothing happens during pre-checks.
"skip_env_checking": True,
},
)
)
# Pre-checking disables, so building the Algorithm is save.
algo = config.build()
# Try to re-create for infinite amount of times.
# The worker recreation/ignore tolerance used to be hard-coded to 3, but this
# has now been
for _ in range(10):
# Expect some errors being logged here, but in general, should continue
# as we recover from all worker failures.
algo.train()
# One worker has been removed, then re-created -> Still 2 left.
self.assertTrue(len(algo.workers.remote_workers()) == 2)
algo.stop()
def test_crash_sub_envs_during_sampling_but_restart_sub_envs(self):
"""Expect sub-envs to fail (and not recover), but re-start them individually."""
config = (
pg.PGConfig()
.rollouts(
num_rollout_workers=2,
num_envs_per_worker=3,
# Re-start failed individual sub-envs (then continue).
# This means no workers will ever fail due to individual env errors
# (only maybe for reasons other than the env).
restart_failed_sub_environments=True,
# If the worker was affected by an error (other than the env error),
# allow it to be removed, but training will continue.
ignore_worker_failures=True,
)
.environment(
env=CartPoleCrashing,
env_config={
# Crash prob=1%.
"p_crash": 0.01,
# Make sure nothing happens during pre-checks.
"skip_env_checking": True,
},
)
)
# Pre-checking disables, so building the Algorithm is save.
algo = config.build()
# Try to re-create the sub-env for infinite amount of times.
# The worker recreation/ignore tolerance used to be hard-coded to 3, but this
# has now been
for _ in range(10):
# Expect some errors being logged here, but in general, should continue
# as we recover from all sub-env failures.
algo.train()
# No worker has been removed. Still 2 left.
self.assertTrue(len(algo.workers.remote_workers()) == 2)
algo.stop()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
```
#### File: offline/estimators/doubly_robust.py
```python
from ray.rllib.offline.estimators.off_policy_estimator import OffPolicyEstimate
from ray.rllib.offline.estimators.direct_method import DirectMethod, k_fold_cv
from ray.rllib.utils.annotations import DeveloperAPI, override
from ray.rllib.utils.typing import SampleBatchType
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.numpy import convert_to_numpy
import numpy as np
@DeveloperAPI
class DoublyRobust(DirectMethod):
"""The Doubly Robust (DR) estimator.
DR estimator described in https://arxiv.org/pdf/1511.03722.pdf"""
@override(DirectMethod)
def estimate(
self, batch: SampleBatchType, should_train: bool = True
) -> OffPolicyEstimate:
self.check_can_estimate_for(batch)
estimates = []
# Split data into train and test using k-fold cross validation
for train_episodes, test_episodes in k_fold_cv(batch, self.k, should_train):
# Train Q-function
if train_episodes:
# Reinitialize model
self.model.reset()
train_batch = SampleBatch.concat_samples(train_episodes)
losses = self.train(train_batch)
self.losses.append(losses)
# Calculate doubly robust OPE estimates
for episode in test_episodes:
rewards, old_prob = episode["rewards"], episode["action_prob"]
new_prob = np.exp(self.action_log_likelihood(episode))
v_old = 0.0
v_new = 0.0
q_values = self.model.estimate_q(
episode[SampleBatch.OBS], episode[SampleBatch.ACTIONS]
)
q_values = convert_to_numpy(q_values)
all_actions = np.zeros([episode.count, self.policy.action_space.n])
all_actions[:] = np.arange(self.policy.action_space.n)
# Two transposes required for torch.distributions to work
tmp_episode = episode.copy()
tmp_episode[SampleBatch.ACTIONS] = all_actions.T
action_probs = np.exp(self.action_log_likelihood(tmp_episode)).T
v_values = self.model.estimate_v(episode[SampleBatch.OBS], action_probs)
v_values = convert_to_numpy(v_values)
for t in reversed(range(episode.count)):
v_old = rewards[t] + self.gamma * v_old
v_new = v_values[t] + (new_prob[t] / old_prob[t]) * (
rewards[t] + self.gamma * v_new - q_values[t]
)
v_new = v_new.item()
estimates.append(
OffPolicyEstimate(
self.name,
{
"v_old": v_old,
"v_new": v_new,
"v_gain": v_new / max(1e-8, v_old),
},
)
)
return estimates
```
#### File: offline/estimators/weighted_importance_sampling.py
```python
from ray.rllib.offline.estimators.off_policy_estimator import (
OffPolicyEstimator,
OffPolicyEstimate,
)
from ray.rllib.policy import Policy
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.typing import SampleBatchType
import numpy as np
@DeveloperAPI
class WeightedImportanceSampling(OffPolicyEstimator):
"""The weighted step-wise IS estimator.
Step-wise WIS estimator in https://arxiv.org/pdf/1511.03722.pdf,
https://arxiv.org/pdf/1911.06854.pdf"""
@override(OffPolicyEstimator)
def __init__(self, name: str, policy: Policy, gamma: float):
super().__init__(name, policy, gamma)
self.filter_values = []
self.filter_counts = []
@override(OffPolicyEstimator)
def estimate(self, batch: SampleBatchType) -> OffPolicyEstimate:
self.check_can_estimate_for(batch)
estimates = []
for sub_batch in batch.split_by_episode():
rewards, old_prob = sub_batch["rewards"], sub_batch["action_prob"]
new_prob = np.exp(self.action_log_likelihood(sub_batch))
# calculate importance ratios
p = []
for t in range(sub_batch.count):
if t == 0:
pt_prev = 1.0
else:
pt_prev = p[t - 1]
p.append(pt_prev * new_prob[t] / old_prob[t])
for t, v in enumerate(p):
if t >= len(self.filter_values):
self.filter_values.append(v)
self.filter_counts.append(1.0)
else:
self.filter_values[t] += v
self.filter_counts[t] += 1.0
# calculate stepwise weighted IS estimate
v_old = 0.0
v_new = 0.0
for t in range(sub_batch.count):
v_old += rewards[t] * self.gamma ** t
w_t = self.filter_values[t] / self.filter_counts[t]
v_new += p[t] / w_t * rewards[t] * self.gamma ** t
estimates.append(
OffPolicyEstimate(
self.name,
{
"v_old": v_old,
"v_new": v_new,
"v_gain": v_new / max(1e-8, v_old),
},
)
)
return estimates
```
#### File: tests/backward_compat/test_backward_compat.py
```python
import unittest
class TestBackwardCompatibility(unittest.TestCase):
def test_register_all(self):
"""Tests the old (1.10) way of registering all Trainers.
Uses the old 1.10 registry.py file and thus makes sure all Trainers can still
be imported using their old paths (albeit this will create a warning).
"""
# Try importing old Trainer class (this is just an Alias now to the `Algorithm`
# class).
from ray.rllib.agents.trainer import Trainer # noqa
# Old registry code.
from ray.rllib.tests.backward_compat.old_registry import (
ALGORITHMS,
_get_trainer_class,
)
from ray.rllib.contrib.registry import CONTRIBUTED_ALGORITHMS
# Test the old `_get_trainer_class()` utility that was used to pull Trainer
# class and default config.
for key in (
list(ALGORITHMS.keys())
+ list(CONTRIBUTED_ALGORITHMS.keys())
+ ["__fake", "__sigmoid_fake_data", "__parameter_tuning"]
):
_get_trainer_class(key)
def test_old_configs(self):
"""Tests creating various Trainers (Algorithms) using 1.10 config dicts."""
from ray.rllib.tests.backward_compat.old_ppo import DEFAULT_CONFIG
from ray.rllib.agents.ppo import PPOTrainer
config = DEFAULT_CONFIG.copy()
trainer = PPOTrainer(config=config, env="CartPole-v0")
trainer.train()
trainer.stop()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
```
#### File: rllib/tests/test_nested_action_spaces.py
```python
from gym.spaces import Box, Dict, Discrete, MultiDiscrete, Tuple
import numpy as np
import os
import shutil
import tree # pip install dm_tree
import unittest
import ray
from ray.rllib.algorithms.bc import BC
from ray.rllib.algorithms.pg import PG, DEFAULT_CONFIG
from ray.rllib.examples.env.random_env import RandomEnv
from ray.rllib.offline.json_reader import JsonReader
from ray.rllib.utils.test_utils import framework_iterator
SPACES = {
"dict": Dict(
{
"a": Dict(
{
"aa": Box(-1.0, 1.0, shape=(3,)),
"ab": MultiDiscrete([4, 3]),
}
),
"b": Discrete(3),
"c": Tuple([Box(0, 10, (2,), dtype=np.int32), Discrete(2)]),
"d": Box(0, 3, (), dtype=np.int64),
}
),
"tuple": Tuple(
[
Tuple(
[
Box(-1.0, 1.0, shape=(2,)),
Discrete(3),
]
),
MultiDiscrete([4, 3]),
Dict(
{
"a": Box(0, 100, (), dtype=np.int32),
"b": Discrete(2),
}
),
]
),
"multidiscrete": MultiDiscrete([2, 3, 4]),
"intbox": Box(0, 100, (2,), dtype=np.int32),
}
class NestedActionSpacesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=5)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_nested_action_spaces(self):
config = DEFAULT_CONFIG.copy()
config["env"] = RandomEnv
# Write output to check, whether actions are written correctly.
tmp_dir = os.popen("mktemp -d").read()[:-1]
if not os.path.exists(tmp_dir):
# Last resort: Resolve via underlying tempdir (and cut tmp_.
tmp_dir = ray._private.utils.tempfile.gettempdir() + tmp_dir[4:]
assert os.path.exists(tmp_dir), f"'{tmp_dir}' not found!"
config["output"] = tmp_dir
# Switch off OPE as we don't write action-probs.
# TODO: We should probably always write those if `output` is given.
config["off_policy_estimation_methods"] = {}
# Pretend actions in offline files are already normalized.
config["actions_in_input_normalized"] = True
for _ in framework_iterator(config):
for name, action_space in SPACES.items():
config["env_config"] = {
"action_space": action_space,
}
for flatten in [True, False]:
print(f"A={action_space} flatten={flatten}")
shutil.rmtree(config["output"])
config["_disable_action_flattening"] = not flatten
pg = PG(config)
pg.train()
pg.stop()
# Check actions in output file (whether properly flattened
# or not).
reader = JsonReader(
inputs=config["output"],
ioctx=pg.workers.local_worker().io_context,
)
sample_batch = reader.next()
if flatten:
assert isinstance(sample_batch["actions"], np.ndarray)
assert len(sample_batch["actions"].shape) == 2
assert sample_batch["actions"].shape[0] == len(sample_batch)
else:
tree.assert_same_structure(
pg.get_policy().action_space_struct,
sample_batch["actions"],
)
# Test, whether offline data can be properly read by
# BC, configured accordingly.
config["input"] = config["output"]
del config["output"]
bc = BC(config=config)
bc.train()
bc.stop()
config["output"] = tmp_dir
config["input"] = "sampler"
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
```
#### File: exploration/tests/test_random_encoder.py
```python
import sys
import unittest
import pytest
import ray
import ray.rllib.algorithms.ppo as ppo
import ray.rllib.algorithms.sac as sac
from ray.rllib.algorithms.callbacks import RE3UpdateCallbacks
class TestRE3(unittest.TestCase):
"""Tests for RE3 exploration algorithm."""
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def run_re3(self, rl_algorithm):
"""Tests RE3 for PPO and SAC.
Both the on-policy and off-policy setups are validated.
"""
if rl_algorithm == "PPO":
config = ppo.PPOConfig().to_dict()
algo_cls = ppo.PPO
beta_schedule = "constant"
elif rl_algorithm == "SAC":
config = sac.SACConfig().to_dict()
algo_cls = sac.SAC
beta_schedule = "linear_decay"
class RE3Callbacks(RE3UpdateCallbacks, config["callbacks"]):
pass
config["env"] = "Pendulum-v1"
config["callbacks"] = RE3Callbacks
config["exploration_config"] = {
"type": "RE3",
"embeds_dim": 128,
"beta_schedule": beta_schedule,
"sub_exploration": {
"type": "StochasticSampling",
},
}
num_iterations = 30
algo = algo_cls(config=config)
learnt = False
for i in range(num_iterations):
result = algo.train()
print(result)
if result["episode_reward_max"] > -900.0:
print("Reached goal after {} iters!".format(i))
learnt = True
break
algo.stop()
self.assertTrue(learnt)
def test_re3_ppo(self):
"""Tests RE3 with PPO."""
self.run_re3("PPO")
def test_re3_sac(self):
"""Tests RE3 with SAC."""
self.run_re3("SAC")
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
```
#### File: utils/pre_checks/env.py
```python
from copy import copy
import logging
import gym
import numpy as np
import traceback
from typing import TYPE_CHECKING, Set
from ray.actor import ActorHandle
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.spaces.space_utils import convert_element_to_space_type
from ray.rllib.utils.typing import EnvType
from ray.util import log_once
if TYPE_CHECKING:
from ray.rllib.env import BaseEnv, MultiAgentEnv
logger = logging.getLogger(__name__)
@DeveloperAPI
def check_env(env: EnvType) -> None:
"""Run pre-checks on env that uncover common errors in environments.
Args:
env: Environment to be checked.
Raises:
ValueError: If env is not an instance of SUPPORTED_ENVIRONMENT_TYPES.
ValueError: See check_gym_env docstring for details.
"""
from ray.rllib.env import (
BaseEnv,
MultiAgentEnv,
RemoteBaseEnv,
VectorEnv,
ExternalMultiAgentEnv,
ExternalEnv,
)
if hasattr(env, "_skip_env_checking") and env._skip_env_checking:
# This is a work around for some environments that we already have in RLlb
# that we want to skip checking for now until we have the time to fix them.
if log_once("skip_env_checking"):
logger.warning("Skipping env checking for this experiment")
return
try:
if not isinstance(
env,
(
BaseEnv,
gym.Env,
MultiAgentEnv,
RemoteBaseEnv,
VectorEnv,
ExternalMultiAgentEnv,
ExternalEnv,
ActorHandle,
),
):
raise ValueError(
"Env must be of one of the following supported types: BaseEnv, "
"gym.Env, "
"MultiAgentEnv, VectorEnv, RemoteBaseEnv, ExternalMultiAgentEnv, "
f"ExternalEnv, but instead is of type {type(env)}."
)
if isinstance(env, MultiAgentEnv):
check_multiagent_environments(env)
elif isinstance(env, gym.Env):
check_gym_environments(env)
elif isinstance(env, BaseEnv):
check_base_env(env)
else:
logger.warning(
"Env checking isn't implemented for VectorEnvs, RemoteBaseEnvs, "
"ExternalMultiAgentEnv, ExternalEnvs or environments that are "
"Ray actors."
)
except Exception:
actual_error = traceback.format_exc()
raise ValueError(
f"{actual_error}\n"
"The above error has been found in your environment! "
"We've added a module for checking your custom environments. It "
"may cause your experiment to fail if your environment is not set up"
"correctly. You can disable this behavior by setting "
"`disable_env_checking=True` in your environment config "
"dictionary. You can run the environment checking module "
"standalone by calling ray.rllib.utils.check_env([env])."
)
@DeveloperAPI
def check_gym_environments(env: gym.Env) -> None:
"""Checking for common errors in gym environments.
Args:
env: Environment to be checked.
Warning:
If env has no attribute spec with a sub attribute,
max_episode_steps.
Raises:
AttributeError: If env has no observation space.
AttributeError: If env has no action space.
ValueError: Observation space must be a gym.spaces.Space.
ValueError: Action space must be a gym.spaces.Space.
ValueError: Observation sampled from observation space must be
contained in the observation space.
ValueError: Action sampled from action space must be
contained in the observation space.
ValueError: If env cannot be resetted.
ValueError: If an observation collected from a call to env.reset().
is not contained in the observation_space.
ValueError: If env cannot be stepped via a call to env.step().
ValueError: If the observation collected from env.step() is not
contained in the observation_space.
AssertionError: If env.step() returns a reward that is not an
int or float.
AssertionError: IF env.step() returns a done that is not a bool.
AssertionError: If env.step() returns an env_info that is not a dict.
"""
# check that env has observation and action spaces
if not hasattr(env, "observation_space"):
raise AttributeError("Env must have observation_space.")
if not hasattr(env, "action_space"):
raise AttributeError("Env must have action_space.")
# check that observation and action spaces are gym.spaces
if not isinstance(env.observation_space, gym.spaces.Space):
raise ValueError("Observation space must be a gym.space")
if not isinstance(env.action_space, gym.spaces.Space):
raise ValueError("Action space must be a gym.space")
# Raise a warning if there isn't a max_episode_steps attribute.
if not hasattr(env, "spec") or not hasattr(env.spec, "max_episode_steps"):
if log_once("max_episode_steps"):
logger.warning(
"Your env doesn't have a .spec.max_episode_steps "
"attribute. This is fine if you have set 'horizon' "
"in your config dictionary, or `soft_horizon`. "
"However, if you haven't, 'horizon' will default "
"to infinity, and your environment will not be "
"reset."
)
# check if sampled actions and observations are contained within their
# respective action and observation spaces.
def get_type(var):
return var.dtype if hasattr(var, "dtype") else type(var)
sampled_action = env.action_space.sample()
sampled_observation = env.observation_space.sample()
# check if observation generated from stepping the environment is
# contained within the observation space
reset_obs = env.reset()
if not env.observation_space.contains(reset_obs):
reset_obs_type = get_type(reset_obs)
space_type = env.observation_space.dtype
error = (
f"The observation collected from env.reset() was not "
f"contained within your env's observation space. Its possible "
f"that There was a type mismatch, or that one of the "
f"sub-observations was out of bounds: \n\n reset_obs: "
f"{reset_obs}\n\n env.observation_space: "
f"{env.observation_space}\n\n reset_obs's dtype: "
f"{reset_obs_type}\n\n env.observation_space's dtype: "
f"{space_type}"
)
temp_sampled_reset_obs = convert_element_to_space_type(
reset_obs, sampled_observation
)
if not env.observation_space.contains(temp_sampled_reset_obs):
raise ValueError(error)
# check if env.step can run, and generates observations rewards, done
# signals and infos that are within their respective spaces and are of
# the correct dtypes
next_obs, reward, done, info = env.step(sampled_action)
if not env.observation_space.contains(next_obs):
next_obs_type = get_type(next_obs)
space_type = env.observation_space.dtype
error = (
f"The observation collected from env.step(sampled_action) was "
f"not contained within your env's observation space. Its "
f"possible that There was a type mismatch, or that one of the "
f"sub-observations was out of bounds:\n\n next_obs: {next_obs}"
f"\n\n env.observation_space: {env.observation_space}"
f"\n\n next_obs's dtype: {next_obs_type}"
f"\n\n env.observation_space's dtype: {space_type}"
)
temp_sampled_next_obs = convert_element_to_space_type(
next_obs, sampled_observation
)
if not env.observation_space.contains(temp_sampled_next_obs):
raise ValueError(error)
_check_done(done)
_check_reward(reward)
_check_info(info)
@DeveloperAPI
def check_multiagent_environments(env: "MultiAgentEnv") -> None:
"""Checking for common errors in RLlib MultiAgentEnvs.
Args:
env: The env to be checked.
"""
from ray.rllib.env import MultiAgentEnv
if not isinstance(env, MultiAgentEnv):
raise ValueError("The passed env is not a MultiAgentEnv.")
elif not (
hasattr(env, "observation_space")
and hasattr(env, "action_space")
and hasattr(env, "_agent_ids")
and hasattr(env, "_spaces_in_preferred_format")
):
if log_once("ma_env_super_ctor_called"):
logger.warning(
f"Your MultiAgentEnv {env} does not have some or all of the needed "
"base-class attributes! Make sure you call `super().__init__` from "
"within your MutiAgentEnv's constructor. "
"This will raise an error in the future."
)
return
reset_obs = env.reset()
sampled_obs = env.observation_space_sample()
_check_if_element_multi_agent_dict(env, reset_obs, "reset()")
_check_if_element_multi_agent_dict(
env, sampled_obs, "env.observation_space_sample()"
)
try:
env.observation_space_contains(reset_obs)
except Exception as e:
raise ValueError(
"Your observation_space_contains function has some error "
) from e
if not env.observation_space_contains(reset_obs):
error = (
_not_contained_error("env.reset", "observation")
+ f"\n\n reset_obs: {reset_obs}\n\n env.observation_space_sample():"
f" {sampled_obs}\n\n "
)
raise ValueError(error)
if not env.observation_space_contains(sampled_obs):
error = (
_not_contained_error("observation_space_sample", "observation")
+ f"\n\n env.observation_space_sample():"
f" {sampled_obs}\n\n "
)
raise ValueError(error)
sampled_action = env.action_space_sample()
_check_if_element_multi_agent_dict(env, sampled_action, "action_space_sample")
try:
env.action_space_contains(sampled_action)
except Exception as e:
raise ValueError("Your action_space_contains function has some error ") from e
if not env.action_space_contains(sampled_action):
error = (
_not_contained_error("action_space_sample", "action")
+ f"\n\n sampled_action {sampled_action}\n\n"
)
raise ValueError(error)
next_obs, reward, done, info = env.step(sampled_action)
_check_if_element_multi_agent_dict(env, next_obs, "step, next_obs")
_check_if_element_multi_agent_dict(env, reward, "step, reward")
_check_if_element_multi_agent_dict(env, done, "step, done")
_check_if_element_multi_agent_dict(env, info, "step, info")
_check_reward(
{"dummy_env_id": reward}, base_env=True, agent_ids=env.get_agent_ids()
)
_check_done({"dummy_env_id": done}, base_env=True, agent_ids=env.get_agent_ids())
_check_info({"dummy_env_id": info}, base_env=True, agent_ids=env.get_agent_ids())
if not env.observation_space_contains(next_obs):
error = (
_not_contained_error("env.step(sampled_action)", "observation")
+ f":\n\n next_obs: {next_obs} \n\n sampled_obs: {sampled_obs}"
)
raise ValueError(error)
@DeveloperAPI
def check_base_env(env: "BaseEnv") -> None:
"""Checking for common errors in RLlib BaseEnvs.
Args:
env: The env to be checked.
"""
from ray.rllib.env import BaseEnv
if not isinstance(env, BaseEnv):
raise ValueError("The passed env is not a BaseEnv.")
reset_obs = env.try_reset()
sampled_obs = env.observation_space_sample()
_check_if_multi_env_dict(env, reset_obs, "try_reset")
_check_if_multi_env_dict(env, sampled_obs, "observation_space_sample()")
try:
env.observation_space_contains(reset_obs)
except Exception as e:
raise ValueError(
"Your observation_space_contains function has some error "
) from e
if not env.observation_space_contains(reset_obs):
error = (
_not_contained_error("try_reset", "observation")
+ f": \n\n reset_obs: {reset_obs}\n\n "
f"env.observation_space_sample(): {sampled_obs}\n\n "
)
raise ValueError(error)
if not env.observation_space_contains(sampled_obs):
error = (
_not_contained_error("observation_space_sample", "observation")
+ f": \n\n sampled_obs: {sampled_obs}\n\n "
)
raise ValueError(error)
sampled_action = env.action_space_sample()
try:
env.action_space_contains(sampled_action)
except Exception as e:
raise ValueError("Your action_space_contains function has some error ") from e
if not env.action_space_contains(sampled_action):
error = (
_not_contained_error("action_space_sample", "action")
+ f": \n\n sampled_action {sampled_action}\n\n"
)
raise ValueError(error)
_check_if_multi_env_dict(env, sampled_action, "action_space_sample()")
env.send_actions(sampled_action)
next_obs, reward, done, info, _ = env.poll()
_check_if_multi_env_dict(env, next_obs, "step, next_obs")
_check_if_multi_env_dict(env, reward, "step, reward")
_check_if_multi_env_dict(env, done, "step, done")
_check_if_multi_env_dict(env, info, "step, info")
if not env.observation_space_contains(next_obs):
error = (
_not_contained_error("poll", "observation")
+ f": \n\n reset_obs: {reset_obs}\n\n env.step():{next_obs}\n\n"
)
raise ValueError(error)
_check_reward(reward, base_env=True, agent_ids=env.get_agent_ids())
_check_done(done, base_env=True, agent_ids=env.get_agent_ids())
_check_info(info, base_env=True, agent_ids=env.get_agent_ids())
def _check_reward(reward, base_env=False, agent_ids=None):
if base_env:
for _, multi_agent_dict in reward.items():
for agent_id, rew in multi_agent_dict.items():
if not (
np.isreal(rew)
and not isinstance(rew, bool)
and (
np.isscalar(rew)
or (isinstance(rew, np.ndarray) and rew.shape == ())
)
):
error = (
"Your step function must return rewards that are"
f" integer or float. reward: {rew}. Instead it was a "
f"{type(rew)}"
)
raise ValueError(error)
if not (agent_id in agent_ids or agent_id == "__all__"):
error = (
f"Your reward dictionary must have agent ids that belong to "
f"the environment. Agent_ids recieved from "
f"env.get_agent_ids() are: {agent_ids}"
)
raise ValueError(error)
elif not (
np.isreal(reward)
and not isinstance(reward, bool)
and (
np.isscalar(reward)
or (isinstance(reward, np.ndarray) and reward.shape == ())
)
):
error = (
"Your step function must return a reward that is integer or float. "
"Instead it was a {}".format(type(reward))
)
raise ValueError(error)
def _check_done(done, base_env=False, agent_ids=None):
if base_env:
for _, multi_agent_dict in done.items():
for agent_id, done_ in multi_agent_dict.items():
if not isinstance(done_, (bool, np.bool, np.bool_)):
raise ValueError(
"Your step function must return dones that are boolean. But "
f"instead was a {type(done)}"
)
if not (agent_id in agent_ids or agent_id == "__all__"):
error = (
f"Your dones dictionary must have agent ids that belong to "
f"the environment. Agent_ids recieved from "
f"env.get_agent_ids() are: {agent_ids}"
)
raise ValueError(error)
elif not isinstance(done, (bool, np.bool_)):
error = (
"Your step function must return a done that is a boolean. But instead "
f"was a {type(done)}"
)
raise ValueError(error)
def _check_info(info, base_env=False, agent_ids=None):
if base_env:
for _, multi_agent_dict in info.items():
for agent_id, inf in multi_agent_dict.items():
if not isinstance(inf, dict):
raise ValueError(
"Your step function must return infos that are a dict. "
f"instead was a {type(inf)}: element: {inf}"
)
if not (agent_id in agent_ids or agent_id == "__all__"):
error = (
f"Your dones dictionary must have agent ids that belong to "
f"the environment. Agent_ids recieved from "
f"env.get_agent_ids() are: {agent_ids}"
)
raise ValueError(error)
elif not isinstance(info, dict):
error = (
"Your step function must return a info that "
f"is a dict. element type: {type(info)}. element: {info}"
)
raise ValueError(error)
def _not_contained_error(func_name, _type):
_error = (
f"The {_type} collected from {func_name} was not contained within"
f" your env's {_type} space. Its possible that there was a type"
f"mismatch (for example {_type}s of np.float32 and a space of"
f"np.float64 {_type}s), or that one of the sub-{_type}s was"
f"out of bounds"
)
return _error
def _check_if_multi_env_dict(env, element, function_string):
if not isinstance(element, dict):
raise ValueError(
f"The element returned by {function_string} is not a "
f"MultiEnvDict. Instead, it is of type: {type(element)}"
)
env_ids = env.get_sub_environments(as_dict=True).keys()
if not all(k in env_ids for k in element):
raise ValueError(
f"The element returned by {function_string} "
f"has dict keys that don't correspond to "
f"environment ids for this env "
f"{list(env_ids)}"
)
for _, multi_agent_dict in element.items():
_check_if_element_multi_agent_dict(
env, multi_agent_dict, function_string, base_env=True
)
def _check_if_element_multi_agent_dict(env, element, function_string, base_env=False):
if not isinstance(element, dict):
if base_env:
error = (
f"The element returned by {function_string} contains values "
f"that are not MultiAgentDicts. Instead, they are of "
f"type: {type(element)}"
)
else:
error = (
f"The element returned by {function_string} is not a "
f"MultiAgentDict. Instead, it is of type: "
f" {type(element)}"
)
raise ValueError(error)
agent_ids: Set = copy(env.get_agent_ids())
agent_ids.add("__all__")
if not all(k in agent_ids for k in element):
if base_env:
error = (
f"The element returned by {function_string} has agent_ids"
f" that are not the names of the agents in the env."
f"agent_ids in this\nMultiEnvDict:"
f" {list(element.keys())}\nAgent_ids in this env:"
f"{list(env.get_agent_ids())}"
)
else:
error = (
f"The element returned by {function_string} has agent_ids"
f" that are not the names of the agents in the env. "
f"\nAgent_ids in this MultiAgentDict: "
f"{list(element.keys())}\nAgent_ids in this env:"
f"{list(env.get_agent_ids())}. You likely need to add the private "
f"attribute `_agent_ids` to your env, which is a set containing the "
f"ids of agents supported by your env."
)
raise ValueError(error)
```
#### File: replay_buffers/tests/test_prioritized_replay_buffer_replay_buffer_api.py
```python
from collections import Counter
import numpy as np
import unittest
from ray.rllib.utils.replay_buffers.prioritized_replay_buffer import (
PrioritizedReplayBuffer,
)
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.utils.test_utils import check
class TestPrioritizedReplayBuffer(unittest.TestCase):
"""
Tests insertion and (weighted) sampling of the PrioritizedReplayBuffer.
"""
capacity = 10
alpha = 1.0
beta = 1.0
def _generate_data(self):
return SampleBatch(
{
SampleBatch.T: [np.random.random((4,))],
SampleBatch.ACTIONS: [np.random.choice([0, 1])],
SampleBatch.REWARDS: [np.random.rand()],
SampleBatch.OBS: [np.random.random((4,))],
SampleBatch.NEXT_OBS: [np.random.random((4,))],
SampleBatch.DONES: [np.random.choice([False, True])],
}
)
def test_multi_agent_batches(self):
"""Tests buffer with storage of MultiAgentBatches."""
self.batch_id = 0
def _add_multi_agent_batch_to_buffer(
buffer, num_policies, num_batches=5, seq_lens=False, **kwargs
):
def _generate_data(policy_id):
batch = SampleBatch(
{
SampleBatch.T: [0, 1],
SampleBatch.ACTIONS: 2 * [np.random.choice([0, 1])],
SampleBatch.REWARDS: 2 * [np.random.rand()],
SampleBatch.OBS: 2 * [np.random.random((4,))],
SampleBatch.NEXT_OBS: 2 * [np.random.random((4,))],
SampleBatch.DONES: [False, True],
SampleBatch.EPS_ID: 2 * [self.batch_id],
SampleBatch.AGENT_INDEX: 2 * [0],
SampleBatch.SEQ_LENS: [2],
"batch_id": 2 * [self.batch_id],
"policy_id": 2 * [policy_id],
}
)
if not seq_lens:
del batch[SampleBatch.SEQ_LENS]
self.batch_id += 1
return batch
for i in range(num_batches):
# genera a few policy batches
policy_batches = {
idx: _generate_data(idx)
for idx, _ in enumerate(range(num_policies))
}
batch = MultiAgentBatch(policy_batches, num_batches * 2)
buffer.add(batch, **kwargs)
buffer = PrioritizedReplayBuffer(
capacity=100, storage_unit="fragments", alpha=0.5
)
# Test add/sample
_add_multi_agent_batch_to_buffer(buffer, num_policies=2, num_batches=2)
# After adding a single batch to a buffer, it should not be full
assert len(buffer) == 2
assert buffer._num_timesteps_added == 8
assert buffer._num_timesteps_added_wrap == 8
assert buffer._next_idx == 2
assert buffer._eviction_started is False
# Sampling three times should yield 3 batches of 5 timesteps each
buffer.sample(3, beta=0.5)
assert buffer._num_timesteps_sampled == 12
_add_multi_agent_batch_to_buffer(
buffer, batch_size=100, num_policies=3, num_batches=3
)
# After adding two more batches, the buffer should be full
assert len(buffer) == 5
assert buffer._num_timesteps_added == 26
assert buffer._num_timesteps_added_wrap == 26
assert buffer._next_idx == 5
def test_sequence_size(self):
# Seq-len=1.
buffer = PrioritizedReplayBuffer(
capacity=100, alpha=0.1, storage_unit="fragments"
)
for _ in range(200):
buffer.add(self._generate_data())
assert len(buffer._storage) == 100, len(buffer._storage)
assert buffer.stats()["added_count"] == 200, buffer.stats()
# Test get_state/set_state.
state = buffer.get_state()
new_memory = PrioritizedReplayBuffer(capacity=100, alpha=0.1)
new_memory.set_state(state)
assert len(new_memory._storage) == 100, len(new_memory._storage)
assert new_memory.stats()["added_count"] == 200, new_memory.stats()
# Seq-len=5.
buffer = PrioritizedReplayBuffer(
capacity=100, alpha=0.1, storage_unit="fragments"
)
for _ in range(40):
buffer.add(
SampleBatch.concat_samples([self._generate_data() for _ in range(5)])
)
assert len(buffer._storage) == 20, len(buffer._storage)
assert buffer.stats()["added_count"] == 200, buffer.stats()
# Test get_state/set_state.
state = buffer.get_state()
new_memory = PrioritizedReplayBuffer(capacity=100, alpha=0.1)
new_memory.set_state(state)
assert len(new_memory._storage) == 20, len(new_memory._storage)
assert new_memory.stats()["added_count"] == 200, new_memory.stats()
def test_add(self):
buffer = PrioritizedReplayBuffer(capacity=2, alpha=self.alpha)
# Assert indices 0 before insert.
self.assertEqual(len(buffer), 0)
self.assertEqual(buffer._next_idx, 0)
# Insert single record.
data = self._generate_data()
buffer.add(data, weight=0.5)
self.assertTrue(len(buffer) == 1)
self.assertTrue(buffer._next_idx == 1)
# Insert single record.
data = self._generate_data()
buffer.add(data, weight=0.1)
self.assertTrue(len(buffer) == 2)
self.assertTrue(buffer._next_idx == 0)
# Insert over capacity.
data = self._generate_data()
buffer.add(data, weight=1.0)
self.assertTrue(len(buffer) == 2)
self.assertTrue(buffer._next_idx == 1)
# Test get_state/set_state.
state = buffer.get_state()
new_memory = PrioritizedReplayBuffer(capacity=2, alpha=self.alpha)
new_memory.set_state(state)
self.assertTrue(len(new_memory) == 2)
self.assertTrue(new_memory._next_idx == 1)
def test_update_priorities(self):
buffer = PrioritizedReplayBuffer(self.capacity, alpha=self.alpha)
# Insert n samples.
num_records = 5
for i in range(num_records):
data = self._generate_data()
buffer.add(data, weight=1.0)
self.assertTrue(len(buffer) == i + 1)
self.assertTrue(buffer._next_idx == i + 1)
# Test get_state/set_state.
state = buffer.get_state()
new_memory = PrioritizedReplayBuffer(self.capacity, alpha=self.alpha)
new_memory.set_state(state)
self.assertTrue(len(new_memory) == num_records)
self.assertTrue(new_memory._next_idx == num_records)
# Fetch records, their indices and weights.
batch = buffer.sample(3, beta=self.beta)
weights = batch["weights"]
indices = batch["batch_indexes"]
check(weights, np.ones(shape=(3,)))
self.assertEqual(3, len(indices))
self.assertTrue(len(buffer) == num_records)
self.assertTrue(buffer._next_idx == num_records)
# Update weight of indices 0, 2, 3, 4 to very small.
buffer.update_priorities(
np.array([0, 2, 3, 4]), np.array([0.01, 0.01, 0.01, 0.01])
)
# Expect to sample almost only index 1
# (which still has a weight of 1.0).
for _ in range(10):
batch = buffer.sample(1000, beta=self.beta)
indices = batch["batch_indexes"]
self.assertTrue(970 < np.sum(indices) < 1100)
# Test get_state/set_state.
state = buffer.get_state()
new_memory = PrioritizedReplayBuffer(self.capacity, alpha=self.alpha)
new_memory.set_state(state)
batch = new_memory.sample(1000, beta=self.beta)
indices = batch["batch_indexes"]
self.assertTrue(970 < np.sum(indices) < 1100)
# Update weight of indices 0 and 1 to >> 0.01.
# Expect to sample 0 and 1 equally (and some 2s, 3s, and 4s).
for _ in range(10):
rand = np.random.random() + 0.2
buffer.update_priorities(np.array([0, 1]), np.array([rand, rand]))
batch = buffer.sample(1000, beta=self.beta)
indices = batch["batch_indexes"]
# Expect biased to higher values due to some 2s, 3s, and 4s.
self.assertTrue(400 < np.sum(indices) < 800)
# Test get_state/set_state.
state = buffer.get_state()
new_memory = PrioritizedReplayBuffer(self.capacity, alpha=self.alpha)
new_memory.set_state(state)
batch = new_memory.sample(1000, beta=self.beta)
indices = batch["batch_indexes"]
self.assertTrue(400 < np.sum(indices) < 800)
# Update weights to be 1:2.
# Expect to sample double as often index 1 over index 0
# plus very few times indices 2, 3, or 4.
for _ in range(10):
rand = np.random.random() + 0.2
buffer.update_priorities(np.array([0, 1]), np.array([rand, rand * 2]))
batch = buffer.sample(1000, beta=self.beta)
indices = batch["batch_indexes"]
self.assertTrue(600 < np.sum(indices) < 850)
# Test get_state/set_state.
state = buffer.get_state()
new_memory = PrioritizedReplayBuffer(self.capacity, alpha=self.alpha)
new_memory.set_state(state)
batch = new_memory.sample(1000, beta=self.beta)
indices = batch["batch_indexes"]
self.assertTrue(600 < np.sum(indices) < 850)
# Update weights to be 1:4.
# Expect to sample quadruple as often index 1 over index 0
# plus very few times indices 2, 3, or 4.
for _ in range(10):
rand = np.random.random() + 0.2
buffer.update_priorities(np.array([0, 1]), np.array([rand, rand * 4]))
batch = buffer.sample(1000, beta=self.beta)
indices = batch["batch_indexes"]
self.assertTrue(750 < np.sum(indices) < 950)
# Test get_state/set_state.
state = buffer.get_state()
new_memory = PrioritizedReplayBuffer(self.capacity, alpha=self.alpha)
new_memory.set_state(state)
batch = new_memory.sample(1000, beta=self.beta)
indices = batch["batch_indexes"]
self.assertTrue(750 < np.sum(indices) < 950)
# Update weights to be 1:9.
# Expect to sample 9 times as often index 1 over index 0.
# plus very few times indices 2, 3, or 4.
for _ in range(10):
rand = np.random.random() + 0.2
buffer.update_priorities(np.array([0, 1]), np.array([rand, rand * 9]))
batch = buffer.sample(1000, beta=self.beta)
indices = batch["batch_indexes"]
self.assertTrue(850 < np.sum(indices) < 1100)
# Test get_state/set_state.
state = buffer.get_state()
new_memory = PrioritizedReplayBuffer(self.capacity, alpha=self.alpha)
new_memory.set_state(state)
batch = new_memory.sample(1000, beta=self.beta)
indices = batch["batch_indexes"]
self.assertTrue(850 < np.sum(indices) < 1100)
# Insert n more samples.
num_records = 5
for i in range(num_records):
data = self._generate_data()
buffer.add(data, weight=1.0)
self.assertTrue(len(buffer) == i + 6)
self.assertTrue(buffer._next_idx == (i + 6) % self.capacity)
# Update all weights to be 1.0 to 10.0 and sample a >100 batch.
buffer.update_priorities(
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([0.001, 0.1, 2.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0]),
)
counts = Counter()
for _ in range(10):
batch = buffer.sample(np.random.randint(100, 600), beta=self.beta)
indices = batch["batch_indexes"]
for i in indices:
counts[i] += 1
# Expect an approximately correct distribution of indices.
self.assertTrue(
counts[9]
>= counts[8]
>= counts[7]
>= counts[6]
>= counts[5]
>= counts[4]
>= counts[3]
>= counts[2]
>= counts[1]
>= counts[0]
)
# Test get_state/set_state.
state = buffer.get_state()
new_memory = PrioritizedReplayBuffer(self.capacity, alpha=self.alpha)
new_memory.set_state(state)
counts = Counter()
for _ in range(10):
batch = new_memory.sample(np.random.randint(100, 600), beta=self.beta)
indices = batch["batch_indexes"]
for i in indices:
counts[i] += 1
self.assertTrue(
counts[9]
>= counts[8]
>= counts[7]
>= counts[6]
>= counts[5]
>= counts[4]
>= counts[3]
>= counts[2]
>= counts[1]
>= counts[0]
)
def test_alpha_parameter(self):
# Test sampling from a PR with a very small alpha (should behave just
# like a regular ReplayBuffer).
buffer = PrioritizedReplayBuffer(self.capacity, alpha=0.01)
# Insert n samples.
num_records = 5
for i in range(num_records):
data = self._generate_data()
buffer.add(data, weight=float(np.random.rand()))
self.assertTrue(len(buffer) == i + 1)
self.assertTrue(buffer._next_idx == i + 1)
# Test get_state/set_state.
state = buffer.get_state()
new_memory = PrioritizedReplayBuffer(self.capacity, alpha=0.01)
new_memory.set_state(state)
self.assertTrue(len(new_memory) == num_records)
self.assertTrue(new_memory._next_idx == num_records)
# Fetch records, their indices and weights.
batch = buffer.sample(1000, beta=self.beta)
indices = batch["batch_indexes"]
counts = Counter()
for i in indices:
counts[i] += 1
# Expect an approximately uniform distribution of indices.
self.assertTrue(any(100 < i < 300 for i in counts.values()))
# Test get_state/set_state.
state = buffer.get_state()
new_memory = PrioritizedReplayBuffer(self.capacity, alpha=0.01)
new_memory.set_state(state)
batch = new_memory.sample(1000, beta=self.beta)
indices = batch["batch_indexes"]
counts = Counter()
for i in indices:
counts[i] += 1
self.assertTrue(any(100 < i < 300 for i in counts.values()))
def test_sequences_unit(self):
"""Tests adding, sampling and eviction of sequences."""
# We do not test the mathematical correctness of our prioritization
# here but rather if it works together with sequence mode at all
buffer = PrioritizedReplayBuffer(capacity=10, storage_unit="sequences")
batches = [
SampleBatch(
{
SampleBatch.T: i * [np.random.random((4,))],
SampleBatch.ACTIONS: i * [np.random.choice([0, 1])],
SampleBatch.REWARDS: i * [np.random.rand()],
SampleBatch.DONES: i * [np.random.choice([False, True])],
SampleBatch.SEQ_LENS: [i],
"batch_id": i * [i],
}
)
for i in range(1, 4)
]
# Add some batches with sequences of low priority
for batch in batches:
buffer.add(batch, weight=0.01)
# Add two high priority sequences
buffer.add(
SampleBatch(
{
SampleBatch.T: 4 * [np.random.random((4,))],
SampleBatch.ACTIONS: 4 * [np.random.choice([0, 1])],
SampleBatch.REWARDS: 4 * [np.random.rand()],
SampleBatch.DONES: 4 * [np.random.choice([False, True])],
SampleBatch.SEQ_LENS: [2, 2],
"batch_id": 4 * [4],
}
),
weight=1,
)
num_sampled_dict = {_id: 0 for _id in range(1, 5)}
num_samples = 200
for i in range(num_samples):
sample = buffer.sample(1, beta=self.beta)
_id = sample["batch_id"][0]
assert len(sample[SampleBatch.SEQ_LENS]) == 1
num_sampled_dict[_id] += 1
# Out of five sequences, we want to sequences from the last batch to
# be sampled almost always
assert np.allclose(
np.array(list(num_sampled_dict.values())) / num_samples,
[0.1, 0.1, 0.1, 0.8],
atol=0.2,
)
# Add another batch to evict
buffer.add(
SampleBatch(
{
SampleBatch.T: 5 * [np.random.random((4,))],
SampleBatch.ACTIONS: 5 * [np.random.choice([0, 1])],
SampleBatch.REWARDS: 5 * [np.random.rand()],
SampleBatch.DONES: 5 * [np.random.choice([False, True])],
SampleBatch.SEQ_LENS: [5],
"batch_id": 5 * [5],
}
),
weight=1,
)
# After adding 1 more batch, eviction has started with 15
# timesteps added in total
assert len(buffer) == 5
assert buffer._num_timesteps_added == sum(range(1, 6))
assert buffer._num_timesteps_added_wrap == 5
assert buffer._next_idx == 1
assert buffer._eviction_started is True
num_sampled_dict = {_id: 0 for _id in range(1, 6)}
num_samples = 200
for i in range(num_samples):
sample = buffer.sample(1, beta=self.beta)
_id = sample["batch_id"][0]
assert len(sample[SampleBatch.SEQ_LENS]) == 1
num_sampled_dict[_id] += 1
# Out of all six sequences, we want sequences from batches 4 and 5
# to be sampled with equal probability
assert np.allclose(
np.array(list(num_sampled_dict.values())) / num_samples,
[0, 0, 0, 0.5, 0.5],
atol=0.25,
)
def test_episodes_unit(self):
"""Tests adding, sampling, and eviction of episodes."""
# We do not test the mathematical correctness of our prioritization
# here but rather if it works together with episode mode at all
buffer = PrioritizedReplayBuffer(capacity=18, storage_unit="episodes")
batches = [
SampleBatch(
{
SampleBatch.T: [0, 1, 2, 3],
SampleBatch.ACTIONS: 4 * [np.random.choice([0, 1])],
SampleBatch.REWARDS: 4 * [np.random.rand()],
SampleBatch.DONES: [False, False, False, True],
SampleBatch.SEQ_LENS: [4],
SampleBatch.EPS_ID: 4 * [i],
}
)
for i in range(3)
]
# Add some batches with episodes of low priority
for batch in batches:
buffer.add(batch, weight=0.01)
# Add two high priority episodes
buffer.add(
SampleBatch(
{
SampleBatch.T: [0, 1, 0, 1],
SampleBatch.ACTIONS: 4 * [np.random.choice([0, 1])],
SampleBatch.REWARDS: 4 * [np.random.rand()],
SampleBatch.DONES: [False, True, False, True],
SampleBatch.SEQ_LENS: [2, 2],
SampleBatch.EPS_ID: [3, 3, 4, 4],
}
),
weight=1,
)
num_sampled_dict = {_id: 0 for _id in range(5)}
num_samples = 200
for i in range(num_samples):
sample = buffer.sample(1, beta=self.beta)
_id = sample[SampleBatch.EPS_ID][0]
assert len(sample[SampleBatch.SEQ_LENS]) == 1
num_sampled_dict[_id] += 1
# All episodes, even though in different batches should be sampled
# equally often
assert np.allclose(
np.array(list(num_sampled_dict.values())) / num_samples,
[0, 0, 0, 0.5, 0.5],
atol=0.1,
)
# Episode 6 is not entirely inside this batch, it should not be added
# to the buffer
buffer.add(
SampleBatch(
{
SampleBatch.T: [0, 1, 0, 1],
SampleBatch.ACTIONS: 4 * [np.random.choice([0, 1])],
SampleBatch.REWARDS: 4 * [np.random.rand()],
SampleBatch.DONES: [False, True, False, False],
SampleBatch.SEQ_LENS: [2, 2],
SampleBatch.EPS_ID: [5, 5, 6, 6],
}
),
weight=1,
)
num_sampled_dict = {_id: 0 for _id in range(7)}
num_samples = 200
for i in range(num_samples):
sample = buffer.sample(1, beta=self.beta)
_id = sample[SampleBatch.EPS_ID][0]
assert len(sample[SampleBatch.SEQ_LENS]) == 1
num_sampled_dict[_id] += 1
# Episode 7 should be dropped for not ending inside the batch
assert np.allclose(
np.array(list(num_sampled_dict.values())) / num_samples,
[0, 0, 0, 1 / 3, 1 / 3, 1 / 3, 0],
atol=0.1,
)
# Add another batch to evict the first batch
buffer.add(
SampleBatch(
{
SampleBatch.T: [0, 1, 2, 3],
SampleBatch.ACTIONS: 4 * [np.random.choice([0, 1])],
SampleBatch.REWARDS: 4 * [np.random.rand()],
SampleBatch.DONES: [False, False, False, True],
SampleBatch.SEQ_LENS: [4],
SampleBatch.EPS_ID: 4 * [7],
}
),
weight=0.01,
)
# After adding 1 more batch, eviction has started with 24
# timesteps added in total, 2 of which were discarded
assert len(buffer) == 6
assert buffer._num_timesteps_added == 4 * 6 - 2
assert buffer._num_timesteps_added_wrap == 4
assert buffer._next_idx == 1
assert buffer._eviction_started is True
num_sampled_dict = {_id: 0 for _id in range(8)}
num_samples = 200
for i in range(num_samples):
sample = buffer.sample(1, beta=self.beta)
_id = sample[SampleBatch.EPS_ID][0]
assert len(sample[SampleBatch.SEQ_LENS]) == 1
num_sampled_dict[_id] += 1
assert np.allclose(
np.array(list(num_sampled_dict.values())) / num_samples,
[0, 0, 0, 1 / 3, 1 / 3, 1 / 3, 0, 0],
atol=0.1,
)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
``` |
{
"source": "jianpengz/MB-DCNN",
"score": 2
} |
#### File: MB-DCNN/dataset/extractPatch_cls_train.py
```python
import os
import numpy as np
from PIL import Image
import pandas as pd
img_size = 224
def data_arg_15(WW, HH, img, train_labels, path_new, index):
crop_num = 1
label = []
train_name = []
# cheng bi li
p_center = [int(WW / 2), int(HH / 2)]
p = [p_center]
# scale 4/5, 3/5, 2/5, 1/5
# scale 4/5, 3.5/5, 3.0/5, 2.5/5, 2.0/5, 1.5/5, 1.0/5
scale1_WW = int(4. / 5 * WW)
scale1_HH = int(4. / 5 * HH)
scale2_WW = int(3.5 / 5 * WW)
scale2_HH = int(3.5 / 5 * HH)
scale3_WW = int(3. / 5 * WW)
scale3_HH = int(3. / 5 * HH)
scale4_WW = int(2.5 / 5 * WW)
scale4_HH = int(2.5 / 5 * HH)
scale5_WW = int(2. / 5 * WW)
scale5_HH = int(2. / 5 * HH)
scale6_WW = int(1.5 / 5 * WW)
scale6_HH = int(1.5 / 5 * HH)
scale7_WW = int(1. / 5 * WW)
scale7_HH = int(1. / 5 * HH)
scale_WW = [scale1_WW, scale2_WW, scale3_WW, scale4_WW, scale5_WW, scale6_WW, scale7_WW]
scale_HH = [scale1_HH, scale2_HH, scale3_HH, scale4_HH, scale5_HH, scale6_HH, scale7_HH]
for i in range(1): # 1 point
for j in range(7): # 7 scale
point = p[i]
scale_j_WW = scale_WW[j]
scale_j_HH = scale_HH[j]
rectangle = (point[0] - scale_j_WW / 2, point[1] - scale_j_HH / 2, point[0] + scale_j_WW / 2,
point[1] + scale_j_HH / 2)
img_i_j = img.crop(rectangle)
img_i_j_re = img_i_j.resize((img_size, img_size), Image.BICUBIC)
label.append(train_labels)
train_name.append(index[:-4] + '_15_' + str(crop_num) + '.png')
img_i_j_re.save(path_new + index[:-4] + '_15_' + str(crop_num) + '.png')
crop_num = crop_num + 1
# NO cheng bi li
WH = min(WW, HH)
p_center = [int(WW / 2), int(HH / 2)]
p = [p_center]
# scale 4/5, 3/5, 2/5, 1/5
# scale 4/5, 3.5/5, 3.0/5, 2.5/5, 2.0/5, 1.5/5, 1.0/5
scale1_WH = int(4.0 / 5 * WH)
scale2_WH = int(3.5 / 5 * WH)
scale3_WH = int(3.0 / 5 * WH)
scale4_WH = int(2.5 / 5 * WH)
scale5_WH = int(2.0 / 5 * WH)
scale6_WH = int(1.5 / 5 * WH)
scale7_WH = int(1.0 / 5 * WH)
scale_WH = [scale1_WH, scale2_WH, scale3_WH, scale4_WH, scale5_WH, scale6_WH, scale7_WH]
for i in range(1): # 1 point
for j in range(7): # 6 scale
point = p[i]
scale_j_WH = scale_WH[j]
rectangle = (point[0] - scale_j_WH / 2, point[1] - scale_j_WH / 2, point[0] + scale_j_WH / 2,
point[1] + scale_j_WH / 2)
img_i_j = img.crop(rectangle)
img_i_j_re = img_i_j.resize((img_size, img_size), Image.BICUBIC)
label.append(train_labels)
train_name.append(index[:-4] + '_15_' + str(crop_num) + '.png')
img_i_j_re.save(path_new + index[:-4] + '_15_' + str(crop_num) + '.png')
crop_num = crop_num + 1
return train_name, label
def data_arg_9(WW, HH, img, train_labels, path_new, index):
crop_num = 1
label = []
train_name = []
# cheng bi li
p_center = [int(WW / 2), int(HH / 2)]
p = [p_center]
scale1_WW = int(4. / 5 * WW) # scale 4/5, 3/5, 2/5, 1/5
scale1_HH = int(4. / 5 * HH)
scale2_WW = int(3. / 5 * WW)
scale2_HH = int(3. / 5 * HH)
scale3_WW = int(2. / 5 * WW)
scale3_HH = int(2. / 5 * HH)
scale4_WW = int(1. / 5 * WW)
scale4_HH = int(1. / 5 * HH)
scale_WW = [scale1_WW, scale2_WW, scale3_WW, scale4_WW]
scale_HH = [scale1_HH, scale2_HH, scale3_HH, scale4_HH]
for i in range(1): # 1 point
for j in range(4): # 4 scale
point = p[i]
scale_j_WW = scale_WW[j]
scale_j_HH = scale_HH[j]
rectangle = (point[0] - scale_j_WW / 2, point[1] - scale_j_HH / 2, point[0] + scale_j_WW / 2, point[1] + scale_j_HH / 2)
img_i_j = img.crop(rectangle)
img_i_j_re = img_i_j.resize((img_size, img_size), Image.BICUBIC)
label.append(train_labels)
train_name.append(index[:-4] + '_9_' + str(crop_num) + '.png')
img_i_j_re.save(path_new + index[:-4] + '_9_' + str(crop_num) + '.png')
crop_num = crop_num + 1
# NO cheng bi li
WH = min(WW, HH)
p_center = [int(WW / 2), int(HH / 2)]
p = [p_center]
scale1_WH = int(4. / 5 * WH) # scale 4/5, 3/5, 2/5, 1/5
scale2_WH = int(3. / 5 * WH)
scale3_WH = int(2. / 5 * WH)
scale4_WH = int(1. / 5 * WH)
scale_WH = [scale1_WH, scale2_WH, scale3_WH, scale4_WH]
for i in range(1): # 1 point
for j in range(4): # 4 scale
point = p[i]
scale_j_WH = scale_WH[j]
rectangle = (point[0] - scale_j_WH / 2, point[1] - scale_j_WH / 2, point[0] + scale_j_WH / 2,
point[1] + scale_j_WH / 2)
img_i_j = img.crop(rectangle)
img_i_j_re = img_i_j.resize((img_size, img_size), Image.BICUBIC)
label.append(train_labels)
train_name.append(index[:-4] + '_9_' + str(crop_num) + '.png')
img_i_j_re.save(path_new + index[:-4] + '_9_' + str(crop_num) + '.png')
crop_num = crop_num + 1
return train_name, label
class_p = 'Training_Add'
num_resize = 224
data_labels = pd.read_csv('data/ISIC-2017_'+class_p+'_Part3_GroundTruth.csv') # number: 3320
labels_ori = np.stack([data_labels.nodiease, data_labels.melanoma, data_labels.seborrheic_keratosis],axis=-1)
labels_ori = np.argmax(labels_ori,axis=-1)
imagePathDir = os.listdir('data/ISIC-2017_'+class_p+'_Data/Images/')
imagePathDir.sort()
path_new = 'cls_data/'+class_p+'_resize_crop_cls/'
if not os.path.isdir(path_new):
os.makedirs(path_new)
num = 0
labels = []
train_names = []
for index in imagePathDir: # classID: 0 or 1
print(num)
# read img
img = Image.open('data/ISIC-2017_'+class_p+'_Data/Images/'+index)
img_re = img.resize((num_resize, num_resize), Image.BICUBIC)
labels.append(labels_ori[num])
train_names.append(index[:-4] + '.png')
img_re.save(path_new + index[:-4] + '.png')
[WW, HH] = img.size
if labels_ori[num] == 0:
train_name_crop, labels_crop = data_arg_9(WW, HH, img, labels_ori[num], path_new, index)
else:
train_name_crop, labels_crop = data_arg_15(WW, HH, img, labels_ori[num], path_new, index)
train_names.extend(train_name_crop)
labels.extend(labels_crop)
num = num + 1
dataframe = pd.DataFrame({'ID':train_names,'Labels':labels})
dataframe.to_csv("cls_data/ISIC-2017_"+class_p+"_Part3_GroundTruth_crop_cls.csv",index=False)
```
#### File: MB-DCNN/dataset/my_datasets.py
```python
import numpy as np
import torchvision.transforms.functional as tf
import random
from torch.utils import data
from torchvision import transforms
from PIL import Image
################# Dataset for Seg
class MyDataSet_seg(data.Dataset):
def __init__(self, root_path, list_path, root_path_coarsemask=None, crop_size=(224, 224), max_iters=None):
self.root_path = root_path
self.root_path_coarsemask = root_path_coarsemask
self.list_path = list_path
self.crop_w, self.crop_h = crop_size
self.img_ids = [i_id.strip() for i_id in open(list_path)]
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
for name in self.img_ids:
img_file = name[0:name.find(' ')]
label_file = name[name.find(' ')+1:]
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
self.train_augmentation = transforms.Compose(
[transforms.RandomAffine(degrees=10, translate=(0, 0.1), scale=(0.9, 1.1), shear=5.729578),
transforms.RandomVerticalFlip(p=0.5),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Resize(224)
])
self.train_coarsemask_augmentation = transforms.Compose(
[transforms.RandomAffine(degrees=10, translate=(0, 0.1), scale=(0.9, 1.1), shear=5.729578),
transforms.RandomVerticalFlip(p=0.5),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Resize(224)
])
self.train_gt_augmentation = transforms.Compose(
[transforms.RandomAffine(degrees=10, translate=(0, 0.1), scale=(0.9, 1.1), shear=5.729578),
transforms.RandomVerticalFlip(p=0.5),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Resize(224)
])
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
if self.root_path_coarsemask is None:
image = Image.open(self.root_path + datafiles["img"])
label = Image.open(self.root_path + datafiles["label"])
is_crop = [0,1]
random.shuffle(is_crop)
if is_crop[0] == 0:
[WW, HH] = image.size
p_center = [int(WW / 2), int(HH / 2)]
crop_num = np.array(range(30, int(np.mean(p_center) / 2), 30))
random.shuffle(crop_num)
crop_p = crop_num[0]
rectangle = (crop_p, crop_p, WW - crop_p, HH - crop_p)
image = image.crop(rectangle)
label = label.crop(rectangle)
image = image.resize((self.crop_w, self.crop_h), Image.BICUBIC)
label = label.resize((self.crop_w, self.crop_h), Image.NEAREST)
else:
image = image.resize((self.crop_w, self.crop_h), Image.BICUBIC)
label = label.resize((self.crop_w, self.crop_h), Image.NEAREST)
seed = np.random.randint(2147483647)
random.seed(seed)
image = self.train_augmentation(image)
random.seed(seed)
label = self.train_gt_augmentation(label)
image = np.array(image) / 255.
image = image.transpose((2, 0, 1))
image = image.astype(np.float32)
label = np.array(label)
label = np.float32(label > 0)
name = datafiles["img"][7:23]
return image.copy(), label.copy(), name
else:
image = Image.open(self.root_path + datafiles["img"])
coarsemask = Image.open(self.root_path_coarsemask + datafiles["img"][7::])
label = Image.open(self.root_path + datafiles["label"])
assert coarsemask.size == label.size
is_crop = [0,1]
random.shuffle(is_crop)
if is_crop[0] == 0:
[WW, HH] = image.size
p_center = [int(WW / 2), int(HH / 2)]
crop_num = np.array(range(30, int(np.mean(p_center) / 2), 30))
random.shuffle(crop_num)
crop_p = crop_num[0]
rectangle = (crop_p, crop_p, WW - crop_p, HH - crop_p)
image = image.crop(rectangle)
coarsemask = coarsemask.crop(rectangle)
label = label.crop(rectangle)
image = image.resize((self.crop_w, self.crop_h), Image.BICUBIC)
coarsemask = coarsemask.resize((self.crop_w, self.crop_h), Image.NEAREST)
label = label.resize((self.crop_w, self.crop_h), Image.NEAREST)
else:
image = image.resize((self.crop_w, self.crop_h), Image.BICUBIC)
coarsemask = coarsemask.resize((self.crop_w, self.crop_h), Image.NEAREST)
label = label.resize((self.crop_w, self.crop_h), Image.NEAREST)
seed = np.random.randint(2147483647)
random.seed(seed)
image = self.train_augmentation(image)
random.seed(seed)
coarsemask = self.train_coarsemask_augmentation(coarsemask)
random.seed(seed)
label = self.train_gt_augmentation(label)
image = np.array(image) / 255.
image = image.transpose((2, 0, 1))
image = image.astype(np.float32)
coarsemask = np.array(coarsemask)
coarsemask = np.float32(coarsemask > 0)
label = np.array(label)
label = np.float32(label > 0)
name = datafiles["img"][7:23]
return image.copy(), coarsemask.copy(), label.copy(), name
class MyValDataSet_seg(data.Dataset):
def __init__(self, root_path, list_path, root_path_coarsemask=None, crop_size=(224, 224)):
self.root_path = root_path
self.root_path_coarsemask = root_path_coarsemask
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.img_ids = [i_id.strip() for i_id in open(list_path)]
self.files = []
for name in self.img_ids:
img_file = name[0:name.find(' ')]
label_file = name[name.find(' ')+1:]
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
if self.root_path_coarsemask is None:
image = Image.open(self.root_path + datafiles["img"])
label = Image.open(self.root_path + datafiles["label"])
image = image.resize((self.crop_h, self.crop_w), Image.BICUBIC)
label = label.resize((self.crop_h, self.crop_w), Image.NEAREST)
image = np.array(image) / 255.
image = image.transpose(2, 0, 1)
image = image.astype(np.float32)
label = np.array(label)
name = datafiles["img"][7:23]
return image.copy(), label.copy(), name
else:
image = Image.open(self.root_path + datafiles["img"])
coarsemask = Image.open(self.root_path_coarsemask + datafiles["img"][7::])
label = Image.open(self.root_path + datafiles["label"])
assert coarsemask.size == label.size
image = image.resize((self.crop_h, self.crop_w), Image.BICUBIC)
coarsemask = coarsemask.resize((self.crop_h, self.crop_w), Image.NEAREST)
label = label.resize((self.crop_h, self.crop_w), Image.NEAREST)
image = np.array(image) / 255.
image = image.transpose(2, 0, 1)
image = image.astype(np.float32)
coarsemask = np.array(coarsemask)
coarsemask = np.float32(coarsemask > 0)
label = np.array(label)
name = datafiles["img"][7:23]
return image.copy(), coarsemask.copy(), label.copy(), name
class MyTestDataSet_seg(data.Dataset):
def __init__(self, root_path, list_path, root_path_coarsemask=None, crop_size=(224, 224)):
self.root_path = root_path
self.root_path_coarsemask = root_path_coarsemask
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.img_ids = [i_id.strip() for i_id in open(list_path)]
self.files = []
for name in self.img_ids:
img_file = name[0:name.find(' ')]
label_file = name[name.find(' ')+1:]
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
if self.root_path_coarsemask is None:
image = Image.open(self.root_path + datafiles["img"])
label = Image.open(self.root_path + datafiles["label"])
image0 = image.resize((self.crop_h, self.crop_w), Image.BICUBIC)
image0 = np.array(image0) / 255.
image0 = image0.transpose(2, 0, 1).astype(np.float32)
image1 = image.resize((self.crop_h + 32, self.crop_w + 32), Image.BICUBIC)
image1 = np.array(image1) / 255.
image1 = image1.transpose(2, 0, 1).astype(np.float32)
image2 = image.resize((self.crop_h + 64, self.crop_w + 64), Image.BICUBIC)
image2 = np.array(image2) / 255.
image2 = image2.transpose(2, 0, 1).astype(np.float32)
label = np.array(label)
name = datafiles["img"][7:23]
return image0.copy(), image1.copy(), image2.copy(), label.copy(), name
else:
image = Image.open(self.root_path + datafiles["img"])
coarsemask = Image.open(self.root_path_coarsemask + datafiles["img"][7::])
label = Image.open(self.root_path + datafiles["label"])
assert coarsemask.size == label.size
image0 = image.resize((self.crop_h, self.crop_w), Image.BICUBIC)
image0 = np.array(image0) / 255.
image0 = image0.transpose(2, 0, 1).astype(np.float32)
coarsemask0 = coarsemask.resize((self.crop_h, self.crop_w), Image.NEAREST)
coarsemask0 = np.float32(np.array(coarsemask0) > 0)
image1 = image.resize((self.crop_h + 32, self.crop_w + 32), Image.BICUBIC)
image1 = np.array(image1) / 255.
image1 = image1.transpose(2, 0, 1).astype(np.float32)
coarsemask1 = coarsemask.resize((self.crop_h + 32, self.crop_w + 32), Image.NEAREST)
coarsemask1 = np.float32(np.array(coarsemask1) > 0)
image2 = image.resize((self.crop_h + 64, self.crop_w + 64), Image.BICUBIC)
image2 = np.array(image2) / 255.
image2 = image2.transpose(2, 0, 1).astype(np.float32)
coarsemask2 = coarsemask.resize((self.crop_h + 64, self.crop_w + 64), Image.NEAREST)
coarsemask2 = np.float32(np.array(coarsemask2) > 0)
label = np.array(label)
name = datafiles["img"][7:23]
return image0.copy(), image1.copy(), image2.copy(), coarsemask0.copy(), coarsemask1.copy(), coarsemask2.copy(), label.copy(), name
################# Dataset for generating Coarsemask
class MyGenDataSet(data.Dataset):
def __init__(self, root_path, list_path, mode=0, crop_size=(224, 224)):
self.root_path = root_path
self.list_path = list_path
self.mode = mode
self.crop_h, self.crop_w = crop_size
self.img_ids = [i_id.strip() for i_id in open(list_path)]
self.files = []
for name in self.img_ids:
img_file = name[0:name.find(' ')]
self.files.append({
"img": img_file,
"name": name
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image_ori = Image.open(self.root_path + datafiles["img"])
if self.mode == 0:
image = np.array(image_ori) / 255.
image = image.transpose(2, 0, 1)
image = image.astype(np.float32)
name = datafiles["img"]
return image.copy(), name
else:
image = image_ori.resize((self.crop_h, self.crop_w), Image.BICUBIC)
image = np.array(image) / 255.
image = image.transpose(2, 0, 1)
image = image.astype(np.float32)
image_ori = np.array(image_ori)
name = datafiles["img"][7:23]
return image_ori.copy(), image.copy(), name
################# Dataset for MaskCN
class MyDataSet_cls(data.Dataset):
def __init__(self, root_path, root_path_coarsemask, list_path, max_iters=None):
self.root_path = root_path
self.root_path_coarsemask = root_path_coarsemask
self.list_path = list_path
self.img_ids = [i_id.strip() for i_id in open(list_path)]
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
for name in self.img_ids:
img_file = name[0:name.find(' ')]
label_file = name[name.find(' ')+1:]
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
self.train_augmentation = transforms.Compose(
[transforms.RandomAffine(degrees=10, translate=(0, 0.1), scale=(0.9, 1.1), shear=5.729578),
transforms.RandomVerticalFlip(p=0.5),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Resize(224)
])
self.train_coarsemask_augmentation = transforms.Compose(
[transforms.RandomAffine(degrees=10, translate=(0, 0.1), scale=(0.9, 1.1), shear=5.729578),
transforms.RandomVerticalFlip(p=0.5),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.ToPILImage(),
transforms.Resize(224)
])
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = Image.open(self.root_path + datafiles["img"])
coarsemask = Image.open(self.root_path_coarsemask + datafiles["img"])
label = np.array(np.int(datafiles["label"]))
seed = np.random.randint(2147483647)
random.seed(seed)
image = self.train_augmentation(image)
random.seed(seed)
coarsemask = self.train_coarsemask_augmentation(coarsemask)
image = np.array(image) / 255.
image = image.transpose((2, 0, 1))
image = image.astype(np.float32)
coarsemask = np.array(coarsemask)
coarsemask = np.float32(coarsemask > 0)
name = datafiles["img"]
return image.copy(), coarsemask.copy(), label, name
class MyValDataSet_cls(data.Dataset):
def __init__(self, root_path, root_path_coarsemask, list_path, crop_size=(224, 224)):
self.root_path = root_path
self.root_path_coarsemask = root_path_coarsemask
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.img_ids = [i_id.strip() for i_id in open(list_path)]
self.files = []
for name in self.img_ids:
img_file = name[0:name.find(' ')]
label_file = name[name.find(' ') + 1:]
self.files.append({
"img": img_file,
"label": label_file,
"name": name
})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = Image.open(self.root_path + datafiles["img"])
coarsemask = Image.open(self.root_path_coarsemask + datafiles["img"])
label = np.array(np.int(datafiles["label"]))
image = np.array(image) / 255.
image = image.transpose(2, 0, 1)
image = image.astype(np.float32)
coarsemask = np.array(coarsemask)
coarsemask = np.float32(coarsemask > 0)
name = datafiles["img"]
return image.copy(), coarsemask.copy(), label, name
```
#### File: jianpengz/MB-DCNN/eval_EnhancedSN.py
```python
import torch
import numpy as np
import torch.backends.cudnn as cudnn
from tqdm import tqdm
from net.models import Xception_dilation, deeplabv3plus_en
from sklearn.metrics import accuracy_score
from apex import amp
from dataset.my_datasets import MyTestDataSet_seg
from torch.utils import data
import torch.nn.functional as F
def val_pred(MaskCN, EnhanceSN, image, coarsemask):
rot_90 = torch.rot90(image, 1, [2, 3])
rot_180 = torch.rot90(image, 2, [2, 3])
rot_270 = torch.rot90(image, 3, [2, 3])
hor_flip = torch.flip(image, [-1])
ver_flip = torch.flip(image, [-2])
image = torch.cat([image, rot_90, rot_180, rot_270, hor_flip, ver_flip], dim=0)
rot_90_cm = torch.rot90(coarsemask, 1, [2, 3])
rot_180_cm = torch.rot90(coarsemask, 2, [2, 3])
rot_270_cm = torch.rot90(coarsemask, 3, [2, 3])
hor_flip_cm = torch.flip(coarsemask, [-1])
ver_flip_cm = torch.flip(coarsemask, [-2])
coarsemask = torch.cat([coarsemask, rot_90_cm, rot_180_cm, rot_270_cm, hor_flip_cm, ver_flip_cm], dim=0)
EnhanceSN.eval()
with torch.no_grad():
data_cla = torch.cat((image, coarsemask), dim=1)
cla_cam = cam(MaskCN, data_cla)
cla_cam = torch.from_numpy(np.stack(cla_cam)).unsqueeze(1).cuda()
pred = EnhanceSN(image, cla_cam)
pred = pred[0:1] + torch.rot90(pred[1:2], 3, [2, 3]) + torch.rot90(pred[2:3], 2, [2, 3]) + torch.rot90(pred[3:4], 1, [2, 3]) + torch.flip(pred[4:5], [-1]) + torch.flip(pred[5:6], [-2])
return pred
def val_mode_seg(valloader, MaskCN, EnhanceSN):
dice = []
sen = []
spe = []
acc = []
jac_score = []
for index, batch in tqdm(enumerate(valloader)):
image0, image1, image2, coarsemask0, coarsemask1, coarsemask2, mask, name = batch
image0 = image0.cuda()
image1 = image1.cuda()
image2 = image2.cuda()
coarsemask0 = coarsemask0.unsqueeze(1).cuda()
coarsemask1 = coarsemask1.unsqueeze(1).cuda()
coarsemask2 = coarsemask2.unsqueeze(1).cuda()
mask = mask[0].data.numpy()
test_mask = np.int64(mask > 0)
# print(name)
pred0 = val_pred(MaskCN, EnhanceSN, image0, coarsemask0)
pred1 = val_pred(MaskCN, EnhanceSN, image1, coarsemask1)
pred2 = val_pred(MaskCN, EnhanceSN, image2, coarsemask2)
pred0 = F.interpolate(pred0, size=(mask.shape[0], mask.shape[1]), mode='bicubic')
pred1 = F.interpolate(pred1, size=(mask.shape[0], mask.shape[1]), mode='bicubic')
pred2 = F.interpolate(pred2, size=(mask.shape[0], mask.shape[1]), mode='bicubic')
pred = pred0 + pred1 + pred2
pred = torch.softmax(pred[0], dim=0).cpu().data.numpy()
pred_arg = np.int16(np.argmax(pred, axis=0))
# y_pred
y_true_f = test_mask.reshape(test_mask.shape[0] * test_mask.shape[1], order='F')
y_pred_f = pred_arg.reshape(pred_arg.shape[0] * pred_arg.shape[1], order='F')
intersection = np.float(np.sum(y_true_f * y_pred_f))
dice.append((2. * intersection) / (np.sum(y_true_f) + np.sum(y_pred_f)))
sen.append(intersection / np.sum(y_true_f))
intersection0 = np.float(np.sum((1 - y_true_f) * (1 - y_pred_f)))
spe.append(intersection0 / np.sum(1 - y_true_f))
acc.append(accuracy_score(y_true_f, y_pred_f))
jac_score.append(intersection / (np.sum(y_true_f) + np.sum(y_pred_f) - intersection))
return np.array(acc), np.array(dice), np.array(sen), np.array(spe), np.array(jac_score)
def cam(model, inputs):
with torch.no_grad():
preds = model(inputs)
class_idx = preds.argmax(dim=1)
model_layers = model.get_layers()
params = list(model.parameters())
weights = np.squeeze(params[-2].data.cpu().numpy())
bz, nc, h, w = model_layers[0].shape
output_cam = []
for idx in range(bz):
cam = np.zeros((h, w), dtype=np.float32)
for i, weight in enumerate(weights[class_idx[idx]]):
cam += weight * model_layers[0][idx][i].data.cpu().numpy()
cam_img = np.maximum(cam, 0)
cam_img = cam / np.max(cam_img)
output_cam.append(cam_img)
return output_cam
model_urls = {'MaskCN': 'models/MaskCN/MaskCN.pth', 'EnhancedSN': 'models/DR_EnhanceSN/CoarseSN.pth'}
INPUT_CHANNEL = 4
NUM_CLASSES_SEG = 2
NUM_CLASSES_CLS = 3
cudnn.enabled = True
############# Load mask-guided classification network and pretrained weights
MaskCN = Xception_dilation(num_classes=NUM_CLASSES_CLS, input_channel=INPUT_CHANNEL)
MaskCN.cuda()
pretrained_dict = torch.load(model_urls['MaskCN'])
MaskCN.load_state_dict(pretrained_dict)
MaskCN.eval()
############# Load enhanced segmentation network and pretrained weights
EnhanceSN = deeplabv3plus_en(num_classes=NUM_CLASSES_SEG)
EnhanceSN.cuda()
# EnhanceSN = amp.initialize(EnhanceSN, opt_level="O1")
EnhanceSN = torch.nn.DataParallel(EnhanceSN)
pretrained_dict = torch.load(model_urls['EnhancedSN'])
EnhanceSN.load_state_dict(pretrained_dict)
EnhanceSN.eval()
############# Load testing data
data_test_root = 'dataset/seg_data/ISIC-2017_Testing_Data/'
data_test_root_mask = 'Coarse_masks/Testing_EnhancedSN/'
data_test_list = 'dataset/ISIC/Testing_seg.txt'
testloader = data.DataLoader(MyTestDataSet_seg(data_test_root, data_test_list, root_path_coarsemask=data_test_root_mask), batch_size=1, shuffle=False,
num_workers=8,
pin_memory=True)
############# Start the testing
[tacc, tdice, tsen, tspe, tjac_score] = val_mode_seg(testloader, MaskCN, EnhanceSN)
line_test = "test: tacc=%f, tdice=%f, tsensitivity=%f, tspecifity=%f, tjac=%f \n" % \
(np.nanmean(tacc), np.nanmean(tdice), np.nanmean(tsen), np.nanmean(tspe),
np.nanmean(tjac_score))
print(line_test)
```
#### File: MB-DCNN/net/models.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import net.xception as xception
class SeparableConv2d(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False):
super(SeparableConv2d,self).__init__()
self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=in_channels,bias=bias)
self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias)
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self,in_filters,out_filters,reps,strides=1,start_with_relu=True,grow_first=True):
super(Block, self).__init__()
if out_filters != in_filters or strides!=1:
self.skip = nn.Conv2d(in_filters,out_filters,1,stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip=None
rep=[]
filters=in_filters
if grow_first:
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range(reps-1):
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(filters,filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
if not start_with_relu:
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=True)
if strides != 1:
rep.append(nn.MaxPool2d(3,strides,1))
self.rep = nn.Sequential(*rep)
def forward(self,inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x+=skip
return x
class ASPP(nn.Module):
def __init__(self, dim_in, dim_out, rate=1, bn_mom=0.1):
super(ASPP, self).__init__()
self.branch1 = nn.Sequential(
nn.Conv2d(dim_in, dim_out, 1, 1, padding=0, dilation=rate, bias=True),
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True),
)
self.branch2 = nn.Sequential(
nn.Conv2d(dim_in, dim_out, 3, 1, padding=6 * rate, dilation=6 * rate, bias=True),
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True),
)
self.branch3 = nn.Sequential(
nn.Conv2d(dim_in, dim_out, 3, 1, padding=12 * rate, dilation=12 * rate, bias=True),
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True),
)
self.branch4 = nn.Sequential(
nn.Conv2d(dim_in, dim_out, 3, 1, padding=18 * rate, dilation=18 * rate, bias=True),
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True),
)
self.branch5_conv = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=True)
self.branch5_bn = nn.BatchNorm2d(dim_out)
self.branch5_relu = nn.ReLU(inplace=True)
self.conv_cat = nn.Sequential(
nn.Conv2d(dim_out * 5, dim_out, 1, 1, padding=0, bias=True),
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True),
)
def forward(self, x):
[b, c, row, col] = x.size()
conv1x1 = self.branch1(x)
conv3x3_1 = self.branch2(x)
conv3x3_2 = self.branch3(x)
conv3x3_3 = self.branch4(x)
global_feature = torch.mean(x, 2, True)
global_feature = torch.mean(global_feature, 3, True)
global_feature = self.branch5_conv(global_feature)
global_feature = self.branch5_bn(global_feature)
global_feature = self.branch5_relu(global_feature)
global_feature = F.interpolate(global_feature, (row, col), None, 'bilinear', True)
feature_cat = torch.cat([conv1x1, conv3x3_1, conv3x3_2, conv3x3_3, global_feature], dim=1)
result = self.conv_cat(feature_cat)
return result
class deeplabv3plus(nn.Module):
def __init__(self, num_classes=None):
super(deeplabv3plus, self).__init__()
self.MODEL_NUM_CLASSES = num_classes
self.backbone = None
self.backbone_layers = None
self.aspp = ASPP(dim_in=2048, dim_out=256, rate=16//16, bn_mom = 0.99)
self.dropout1 = nn.Dropout(0.5)
self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=16//4)
self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)
self.shortcut_conv = nn.Sequential(nn.Conv2d(256, 48, 1, 1, padding=1//2, bias=True),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
)
self.cat_conv = nn.Sequential(
nn.Conv2d(256+48, 256, 3, 1, padding=1, bias=True),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Conv2d(256, 256, 3, 1, padding=1, bias=True),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
)
self.cls_conv = nn.Conv2d(256, self.MODEL_NUM_CLASSES, 1, 1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.backbone = xception.Xception(os=16)
self.backbone_layers = self.backbone.get_layers()
def forward(self, x):
x_bottom = self.backbone(x)
layers = self.backbone.get_layers()
feature_aspp = self.aspp(layers[-1])
feature_aspp = self.dropout1(feature_aspp)
feature_aspp = self.upsample_sub(feature_aspp)
feature_shallow = self.shortcut_conv(layers[0])
feature_cat = torch.cat([feature_aspp,feature_shallow],1)
result = self.cat_conv(feature_cat)
result = self.cls_conv(result)
result = self.upsample4(result)
return result
class deeplabv3plus_en(nn.Module):
def __init__(self, num_classes=None):
super(deeplabv3plus_en, self).__init__()
self.MODEL_NUM_CLASSES = num_classes
self.backbone = None
self.backbone_layers = None
self.aspp = ASPP(dim_in=2048, dim_out=256, rate=16//16, bn_mom = 0.99)
self.dropout1 = nn.Dropout(0.5)
self.cam_conv = nn.Sequential(nn.Conv2d(256+1, 256, 1, 1, padding=1//2, bias=True),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
)
self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=16//4)
self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4)
self.shortcut_conv = nn.Sequential(nn.Conv2d(256, 48, 1, 1, padding=1//2, bias=True),
nn.BatchNorm2d(48),
nn.ReLU(inplace=True),
)
self.cat_conv = nn.Sequential(
nn.Conv2d(256+48, 256, 3, 1, padding=1, bias=True),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Conv2d(256, 256, 3, 1, padding=1, bias=True),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
)
self.cls_conv = nn.Conv2d(256, self.MODEL_NUM_CLASSES, 1, 1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.backbone = xception.Xception(os=16)
self.backbone_layers = self.backbone.get_layers()
def forward(self, x, cla_cam):
x_bottom = self.backbone(x)
layers = self.backbone.get_layers()
feature_aspp = self.aspp(layers[-1])
feature_aspp = self.dropout1(feature_aspp)
feature_cat0 = torch.cat([feature_aspp, cla_cam], 1)
feature_cam = self.cam_conv(feature_cat0)
feature_cam = self.upsample_sub(feature_cam)
feature_shallow = self.shortcut_conv(layers[0])
feature_cat1 = torch.cat([feature_cam, feature_shallow], 1)
result = self.cat_conv(feature_cat1)
result = self.cls_conv(result)
result = self.upsample4(result)
return result
class Xception_dilation(nn.Module):
def __init__(self, input_channel=None, num_classes=None):
super(Xception_dilation, self).__init__()
self.num_classes = num_classes
self.conv1 = nn.Conv2d(input_channel, 32, 3, 2, 0, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
# do relu here
self.block1 = Block(64, 128, 2, 2, start_with_relu=False, grow_first=True)
self.block2 = Block(128, 256, 2, 2, start_with_relu=True, grow_first=True)
self.block3 = Block(256, 728, 2, 2, start_with_relu=True, grow_first=True)
self.block4 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True)
self.residual = nn.Sequential(
nn.Conv2d(728, 1024, 1, 1, dilation=2, bias=False),
nn.BatchNorm2d(1024),
)
self.SepConv1 = nn.Sequential(
nn.ReLU(inplace=False),
SeparableConv2d(728, 728, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(728)
)
self.SepConv2 = nn.Sequential(
nn.ReLU(inplace=False),
SeparableConv2d(728, 1024, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(1024)
)
self.SepConv3 = nn.Sequential(
SeparableConv2d(1024, 1536, 3, dilation=2, stride=1, padding=2, bias=False),
nn.BatchNorm2d(1536),
nn.ReLU(inplace=False)
)
self.SepConv4 = nn.Sequential(
SeparableConv2d(1536, 2048, 3, dilation=2, stride=1, padding=2, bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(inplace=False)
)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cls = nn.Linear(2048, num_classes)
def get_layers(self):
return self.layers
def forward(self, x):
self.layers = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
res = self.residual(x)
x = self.SepConv1(x)
x = self.SepConv2(x)
x += res
x = self.SepConv3(x)
x = self.SepConv4(x)
self.layers.append(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.cls(x)
self.layers.append(x)
return x
``` |
{
"source": "jianpingbadao/leetcode",
"score": 3
} |
#### File: problems/first-bad-version/first_bad_version.py
```python
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
```
#### File: problems/populating-next-right-pointers-in-each-node/populating_next_right_pointers_in_each_node.py
```python
class Solution:
def connect(self, root: 'Node') -> 'Node':
``` |
{
"source": "JianpingChen/azure-sdk-for-python",
"score": 2
} |
#### File: scripts/devops_tasks/build_conda_artifacts.py
```python
import argparse
import sys
import os
import shutil
import re
import yaml
from common_tasks import process_glob_string, run_check_call, str_to_bool, parse_setup
from subprocess import check_call
from distutils.dir_util import copy_tree
VERSION_REGEX = re.compile(r"\s*AZURESDK_CONDA_VERSION\s*:\s*[\'](.*)[\']\s*")
SUMMARY_TEMPLATE = " - Generated from {}."
NAMESPACE_EXTENSION_TEMPLATE = """__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: str
"""
MANIFEST_TEMPLATE = """include *.md
{namespace_includes}
recursive-include tests *.py
recursive-include samples *.py *.md
"""
SETUP_CFG = """
[bdist_wheel]
universal=1
"""
CONDA_PKG_SETUP_TEMPLATE = """from setuptools import find_packages, setup
setup(
name=\"{conda_package_name}\",
version=\"{version}\",
description='Microsoft Azure SDK For Python {service} Combined Conda Library',
long_description_content_type='text/markdown',
license='MIT License',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/{service}/',
classifiers=[
"Development Status :: 5 - Production/Stable",
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(),
install_requires=[]
)
"""
def create_package(pkg_directory, output_directory):
check_call(
[
sys.executable,
"setup.py",
"sdist",
"--format",
"zip",
"-d",
output_directory,
],
cwd=pkg_directory,
)
def create_namespace_extension(target_directory):
with open(os.path.join(target_directory, "__init__.py"), "w") as f:
f.write(NAMESPACE_EXTENSION_TEMPLATE)
def get_pkgs_from_build_directory(build_directory, artifact_name):
return [
os.path.join(build_directory, p)
for p in os.listdir(build_directory)
if p != artifact_name
]
def create_sdist_skeleton(build_directory, artifact_name, common_root):
sdist_directory = os.path.join(build_directory, artifact_name)
if os.path.exists(sdist_directory):
shutil.rmtree(sdist_directory)
os.makedirs(sdist_directory)
namespaces = common_root.split("/")
# after the below function, ns_dir will be the target destination for copying from our pkgs_from_consumption
ns_dir = sdist_directory
for ns in namespaces:
ns_dir = os.path.join(ns_dir, ns)
if not os.path.exists(ns_dir):
os.mkdir(ns_dir)
create_namespace_extension(ns_dir)
# get all the directories in the build folder, we will pull in all of them
pkgs_for_consumption = get_pkgs_from_build_directory(build_directory, artifact_name)
print("I see the following packages in the build directory")
print(pkgs_for_consumption)
for pkg in pkgs_for_consumption:
pkg_till_common_root = os.path.join(pkg, common_root)
if os.path.exists(pkg_till_common_root):
directories_for_copy = [
file
for file in os.listdir(pkg_till_common_root)
if os.path.isdir(os.path.join(pkg_till_common_root, file))
]
for directory in directories_for_copy:
src = os.path.join(pkg_till_common_root, directory)
dest = os.path.join(ns_dir, directory)
shutil.copytree(src, dest)
def get_version_from_config(environment_config):
with open(os.path.abspath((environment_config)), "r") as f:
lines = f.readlines()
for line in lines:
result = VERSION_REGEX.match(line)
if result:
return result.group(1)
return "0.0.0"
def get_manifest_includes(common_root):
levels = common_root.split("/")
breadcrumbs = []
breadcrumb_string = ""
for ns in levels:
breadcrumb_string += ns + "/"
breadcrumbs.append(breadcrumb_string + "__init__.py")
return breadcrumbs
def create_setup_files(
build_directory, common_root, artifact_name, service, meta_yaml, environment_config
):
sdist_directory = os.path.join(build_directory, artifact_name)
setup_location = os.path.join(sdist_directory, "setup.py")
manifest_location = os.path.join(sdist_directory, "MANIFEST.in")
cfg_location = os.path.join(sdist_directory, "setup.cfg")
setup_template = CONDA_PKG_SETUP_TEMPLATE.format(
conda_package_name=artifact_name,
version=get_version_from_config(environment_config),
service=service,
package_excludes="'azure', 'tests', '{}'".format(common_root.replace("/", ".")),
)
with open(setup_location, "w") as f:
f.write(setup_template)
manifest_template = MANIFEST_TEMPLATE.format(
namespace_includes="\n".join(
["include " + ns for ns in get_manifest_includes(common_root)]
)
)
with open(manifest_location, "w") as f:
f.write(manifest_template)
with open(cfg_location, "w") as f:
f.write(SETUP_CFG)
def create_combined_sdist(
output_directory,
build_directory,
artifact_name,
common_root,
service,
meta_yaml,
environment_config,
):
singular_dependency = (
len(get_pkgs_from_build_directory(build_directory, artifact_name)) == 0
)
if not singular_dependency:
create_sdist_skeleton(build_directory, artifact_name, common_root)
create_setup_files(
build_directory,
common_root,
artifact_name,
service,
meta_yaml,
environment_config,
)
sdist_location = os.path.join(build_directory, artifact_name)
output_sdist_location = os.path.join(output_directory, "sdist", artifact_name)
create_package(sdist_location, output_sdist_location)
output_location = os.path.join(
output_sdist_location, os.listdir(output_sdist_location)[0]
)
print(
"Generated Sdist for artifact {} is present at {}".format(
artifact_name, output_location
)
)
return output_location
def get_summary(ci_yml, artifact_name):
pkg_list = []
with open(ci_yml, "r") as f:
data = f.read()
config = yaml.safe_load(data)
conda_artifact = [
conda_artifact
for conda_artifact in config["extends"]["parameters"]["CondaArtifacts"]
if conda_artifact["name"] == artifact_name
]
if conda_artifact:
dependencies = conda_artifact[0]["checkout"]
for dep in dependencies:
pkg_list.append("{}=={}".format(dep["package"], dep["version"]))
return SUMMARY_TEMPLATE.format(", ".join(pkg_list))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Build a Conda Package, given a properly formatted build directory, and input configuration. This script assumes that the build directory has been set up w/ the necessary sdists in each location."
)
parser.add_argument(
"-d",
"--distribution-directory",
dest="distribution_directory",
help="The output conda sdist will be dropped into this directory under a folder named the same as argument artifact_name.",
required=True,
)
parser.add_argument(
"-b",
"--build-directory",
dest="build_directory",
help="The 'working' directory. This top level path will contain all the necessary sdist code from the appropriate historical tag. EG: <build-directory>/azure-storage-blob, <build-directory/azure-storage-queue",
required=True,
)
parser.add_argument(
"-m",
"--meta-yml",
dest="meta_yml",
help="The path to the meta yaml that will be used to generate this conda distribution.",
required=True,
)
parser.add_argument(
"-r",
"--common-root",
dest="common_root",
help="The common root namespace. For instance, when outputting the artifact 'azure-storage', the common root will be azure/storage.",
required=False,
)
parser.add_argument(
"-n",
"--artifact-name",
dest="artifact_name",
help="The name of the output conda package.",
required=True,
)
parser.add_argument(
"-s",
"--service-name",
dest="service",
help="The name of the service this package is being generated for.",
required=True,
)
parser.add_argument(
"-e",
"--environment_config",
dest="environment_config",
help="The location of the yml config file used to create the conda environments. This file has necessary common configuration information within.",
required=True,
)
parser.add_argument(
"-c",
"--ci_yml",
dest="ci_yml",
help="The location of the ci.yml that is used to define our conda artifacts. Used when to easily grab summary information.",
required=True,
)
args = parser.parse_args()
output_source_location = create_combined_sdist(
args.distribution_directory,
args.build_directory,
args.artifact_name,
args.common_root,
args.service,
args.meta_yml,
args.environment_config,
)
summary = get_summary(args.ci_yml, args.artifact_name)
if output_source_location:
print(
"##vso[task.setvariable variable={}]{}".format(
args.service.upper() + "_SOURCE_DISTRIBUTION", output_source_location
)
)
if summary:
print(
"##vso[task.setvariable variable={}]{}".format(
args.service.upper() + "_SUMMARY", summary
)
)
```
#### File: scripts/devops_tasks/test_regression.py
```python
import argparse
import glob
import sys
import os
import logging
from common_tasks import (
process_glob_string,
parse_setup,
run_check_call,
parse_require,
install_package_from_whl,
filter_dev_requirements,
find_packages_missing_on_pypi,
find_whl,
find_tools_packages,
get_installed_packages,
extend_dev_requirements,
str_to_bool
)
from git_helper import get_release_tag, git_checkout_tag, git_checkout_branch, clone_repo
AZURE_GLOB_STRING = "azure*"
root_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", ".."))
test_tools_req_file = os.path.abspath(os.path.join(root_dir, "eng", "test_tools.txt"))
GIT_REPO_NAME = "azure-sdk-for-python"
GIT_MASTER_BRANCH = "main"
VENV_NAME = "regressionenv"
AZURE_SDK_FOR_PYTHON_GIT_URL = "https://github.com/Azure/azure-sdk-for-python.git"
TEMP_FOLDER_NAME = ".tmp_code_path"
OLDEST_EXTENSION_PKGS = ['msrestazure','adal']
logging.getLogger().setLevel(logging.INFO)
class CustomVirtualEnv:
def __init__(self, path):
self.path = os.path.join(path, VENV_NAME)
def create(self):
logging.info("Creating virtual environment [{}]".format(self.path))
run_check_call([sys.executable, "-m", "venv", "ENV_DIR", self.path], root_dir)
self.python_executable = self._find_python_executable()
self.lib_paths = self._find_lib_paths()
def clear_venv(self):
# clear any previously installed packages
run_check_call(
[sys.executable, "-m", "venv", "--clear", "ENV_DIR", self.path], root_dir
)
def _find_python_executable(self):
paths = glob.glob(os.path.join(self.path, "*", "python")) + glob.glob(os.path.join(self.path, "*", "python.exe"))
if not paths:
logging.error("Failed to find path to python executable in virtual env:{}".format(self.path))
sys.exit(1)
return paths[0]
def _find_lib_paths(self):
paths = glob.glob(os.path.join(self.path, "*", "site-packages")) + glob.glob(os.path.join(self.path, "lib", "*", "site-packages"))
if not paths:
logging.error("Failed to find site-packages directory in virtual env:{}".format(self.path))
sys.exit(1)
return paths
class RegressionContext:
def __init__(self, whl_dir, tmp_path, is_latest, pytest_mark_arg):
self.whl_directory = whl_dir
self.temp_path = tmp_path
self.is_latest_depend_test = is_latest
self.venv = CustomVirtualEnv(self.temp_path)
self.pytest_mark_arg = pytest_mark_arg
self.venv.create()
def init_for_pkg(self, pkg_root):
# This method is called each time context is switched to test regression for new package
self.package_root_path = pkg_root
self.package_name, self.pkg_version, _, _ = parse_setup(self.package_root_path)
def initialize(self, dep_pkg_root_path):
self.dep_pkg_root_path = dep_pkg_root_path
self.venv.clear_venv()
def deinitialize(self, dep_pkg_root_path):
# This function can be used to reset code repo to master branch
# Revert to master branch
run_check_call(["git", "clean", "-fd"], dep_pkg_root_path)
run_check_call(["git", "checkout", GIT_MASTER_BRANCH], dep_pkg_root_path)
class RegressionTest:
def __init__(self, context, package_dependency_dict):
self.context = context
self.package_dependency_dict = package_dependency_dict
def run(self):
pkg_name = self.context.package_name
if pkg_name in self.package_dependency_dict:
logging.info("Running regression test for {}".format(pkg_name))
self.whl_path = os.path.join(self.context.whl_directory, find_whl(pkg_name, self.context.pkg_version, self.context.whl_directory))
if find_packages_missing_on_pypi(self.whl_path):
logging.error("Required packages are not available on PyPI. Skipping regression test")
exit(0)
dep_packages = self.package_dependency_dict[pkg_name]
logging.info("Dependent packages for [{0}]: {1}".format(pkg_name, dep_packages))
for dep_pkg_path in dep_packages:
dep_pkg_name, _, _, _ = parse_setup(dep_pkg_path)
logging.info(
"Starting regression test of {0} against released {1}".format(
pkg_name, dep_pkg_name
)
)
self._run_test(dep_pkg_path)
logging.info(
"Completed regression test of {0} against released {1}".format(
pkg_name, dep_pkg_name
)
)
logging.info("Completed regression test for {}".format(pkg_name))
else:
logging.info(
"Package {} is not added as required by any package".format(pkg_name)
)
def _run_test(self, dep_pkg_path):
self.context.initialize(dep_pkg_path)
# find GA released tags for package and run test using that code base
dep_pkg_name, version, _, _ = parse_setup(dep_pkg_path)
release_tag = get_release_tag(dep_pkg_name, self.context.is_latest_depend_test)
if not release_tag:
logging.error("Release tag is not available. Skipping package {} from test".format(dep_pkg_name))
return
test_branch_name = "{0}_tests".format(release_tag)
try:
git_checkout_branch(test_branch_name, dep_pkg_path)
except:
# If git checkout failed for "tests" branch then checkout branch with release tag
logging.info("Failed to checkout branch {}. Checking out release tagged git repo".format(test_branch_name))
git_checkout_tag(release_tag, dep_pkg_path)
try:
# install packages required to run tests
run_check_call(
[
self.context.venv.python_executable,
"-m",
"pip",
"install",
"-r",
test_tools_req_file,
],
dep_pkg_path
)
# Install pre-built whl for current package.
install_package_from_whl(
self.whl_path,
self.context.temp_path,
self.context.venv.python_executable,
)
# install dependent package from source
self._install_packages(dep_pkg_path, self.context.package_name)
# try install of pre-built whl for current package again. if unnecessary, pip does nothing.
# we do this to ensure that the correct development version is installed. on non-dev builds
# this step will just skip through.
install_package_from_whl(
self.whl_path,
self.context.temp_path,
self.context.venv.python_executable,
)
self._execute_test(dep_pkg_path)
finally:
self.context.deinitialize(dep_pkg_path)
def _execute_test(self, dep_pkg_path):
# Ensure correct version of package is installed
if not self._is_package_installed(self.context.package_name, self.context.pkg_version):
logging.error("Incorrect version of package {0} is installed. Expected version {1}".format(self.context.package_name, self.context.pkg_version))
sys.exit(1)
logging.info("Running test for {}".format(dep_pkg_path))
commands = [
self.context.venv.python_executable,
"-m",
"pytest",
"--verbose",
"--durations",
"10",
]
# add any pytest mark arg if present. for e.g. 'not cosmosEmulator'
if self.context.pytest_mark_arg:
commands.extend(["-m", self.context.pytest_mark_arg])
test_dir = self._get_package_test_dir(dep_pkg_path)
if test_dir:
commands.append(test_dir)
run_check_call(commands, self.context.temp_path)
else:
logging.info("Test directory is not found in package root. Skipping {} from regression test.".format(self.context.package_name))
def _get_package_test_dir(self, pkg_root_path):
# Returns path to test or tests folder within package root directory.
paths = glob.glob(os.path.join(pkg_root_path, "test")) + glob.glob(os.path.join(pkg_root_path, "tests"))
if not paths:
# We will run into this situation only if test and tests are missing in repo.
# For now, running test for package repo itself to keep it same as regular CI in such cases
logging.error("'test' folder is not found in {}".format(pkg_root_path))
return
return paths[0]
def _install_packages(self, dependent_pkg_path, pkg_to_exclude):
python_executable = self.context.venv.python_executable
working_dir = self.context.package_root_path
temp_dir = self.context.temp_path
list_to_exclude = [pkg_to_exclude, 'azure-sdk-tools', 'azure-devtools' ]
installed_pkgs = [p.split('==')[0] for p in get_installed_packages(self.context.venv.lib_paths) if p.startswith('azure-')]
logging.info("Installed azure sdk packages:{}".format(installed_pkgs))
# Do not exclude list of packages in tools directory and so these tools packages will be reinstalled from repo branch we are testing
root_path = os.path.abspath(os.path.join(dependent_pkg_path, "..", "..", ".."))
tools_packages = find_tools_packages(root_path)
installed_pkgs = [req for req in installed_pkgs if req not in tools_packages]
list_to_exclude.extend(installed_pkgs)
# install dev requirement but skip already installed package which is being tested or present in dev requirement
filtered_dev_req_path = filter_dev_requirements(
dependent_pkg_path, list_to_exclude, dependent_pkg_path
)
# early versions of azure-sdk-tools had an unpinned version of azure-mgmt packages.
# that unpinned version hits an a code path in azure-sdk-tools that hits this error.
if filtered_dev_req_path and self.context.is_latest_depend_test == False:
logging.info(
"Extending dev requirements with {}".format(OLDEST_EXTENSION_PKGS)
)
extend_dev_requirements(
filtered_dev_req_path, OLDEST_EXTENSION_PKGS
)
else:
logging.info("Not extending dev requirements {} {}".format(filtered_dev_req_path, self.context.is_latest_depend_test))
if filtered_dev_req_path:
logging.info("Extending dev requirement to include azure-sdk-tools")
extend_dev_requirements(
filtered_dev_req_path, [
"../../../tools/azure-sdk-tools",
"../../../tools/azure-devtools"
]
)
logging.info(
"Installing filtered dev requirements from {}".format(filtered_dev_req_path)
)
run_check_call(
[python_executable, "-m", "pip", "install", "-r", filtered_dev_req_path],
dependent_pkg_path,
)
else:
logging.info("dev requirements is not found to install")
# install dependent package which is being verified
run_check_call(
[python_executable, "-m", "pip", "install", dependent_pkg_path], temp_dir
)
def _is_package_installed(self, package, version):
# find env root and pacakge locations
venv_root = self.context.venv.path
site_packages = self.context.venv.lib_paths
logging.info("Searching for packages in :{}".format(site_packages))
installed_pkgs = get_installed_packages(site_packages)
logging.info("Installed packages: {}".format(installed_pkgs))
# Verify installed package version
# Search for exact version or alpha build version of current version.
pkg_search_string = "{0}=={1}".format(package, version)
alpha_build_search_string = "{0}=={1}a".format(package, version)
return any(p == pkg_search_string or p.startswith(alpha_build_search_string) for p in installed_pkgs)
# This method identifies package dependency map for all packages in azure sdk
def find_package_dependency(glob_string, repo_root_dir):
package_paths = process_glob_string(
glob_string, repo_root_dir, "", "Regression"
)
dependency_map = {}
for pkg_root in package_paths:
_, _, _, requires = parse_setup(pkg_root)
# Get a list of package names from install requires
required_pkgs = [parse_require(r)[0] for r in requires]
required_pkgs = [p for p in required_pkgs if p.startswith("azure")]
for req_pkg in required_pkgs:
if req_pkg not in dependency_map:
dependency_map[req_pkg] = []
dependency_map[req_pkg].append(pkg_root)
logging.info("Package dependency: {}".format(dependency_map))
return dependency_map
# This is the main function which identifies packages to test, find dependency matrix and trigger test
def run_main(args):
temp_dir = ""
if args.temp_dir:
temp_dir = args.temp_dir
else:
temp_dir = os.path.abspath(os.path.join(root_dir, "..", TEMP_FOLDER_NAME))
code_repo_root = os.path.join(temp_dir, GIT_REPO_NAME)
# Make sure root_dir where script is running is not same as code repo which will be reverted to old released branch to run test
if root_dir == code_repo_root:
logging.error(
"Invalid path to clone github code repo. Temporary path can not be same as current source root directory"
)
exit(1)
# Make sure temp path exists
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
if args.service:
service_dir = os.path.join("sdk", args.service)
target_dir = os.path.join(root_dir, service_dir)
else:
target_dir = root_dir
targeted_packages = process_glob_string(args.glob_string, target_dir, "", "Regression")
if len(targeted_packages) == 0:
exit(0)
# clone code repo only if it doesn't exist
if not os.path.exists(code_repo_root):
clone_repo(temp_dir, AZURE_SDK_FOR_PYTHON_GIT_URL)
else:
logging.info(
"Path {} already exists. Skipping step to clone github repo".format(
code_repo_root
)
)
# find package dependency map for azure sdk
pkg_dependency = find_package_dependency(AZURE_GLOB_STRING, code_repo_root)
# Create regression text context. One context object will be reused for all packages
context = RegressionContext(
args.whl_dir, temp_dir, str_to_bool(args.verify_latest), args.mark_arg
)
for pkg_path in targeted_packages:
context.init_for_pkg(pkg_path)
RegressionTest(context, pkg_dependency).run()
logging.info("Regression test is completed successfully")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run regression test for a package against released dependent packages"
)
parser.add_argument(
"glob_string",
nargs="?",
help=(
"A comma separated list of glob strings that will target the top level directories that contain packages."
'Examples: All = "azure*", Single = "azure-keyvault", Targeted Multiple = "azure-keyvault,azure-mgmt-resource"'
),
)
parser.add_argument(
"--service",
help=(
"Name of service directory (under sdk/) to test."
"Example: --service applicationinsights"
),
)
parser.add_argument(
"--whl-dir",
required=True,
help=("Directory in which whl is pre built for all eligible package"),
)
parser.add_argument(
"--verify-latest",
default=True,
help=(
"Set this parameter to true to verify regression against latest released version."
"Default behavior is to test regression for oldest released version of dependent packages"
),
)
parser.add_argument(
"--temp-dir",
help=(
"Temporary path to clone github repo of azure-sdk-for-python to run tests. Any changes in this path will be overwritten"
),
)
parser.add_argument(
"--mark-arg",
dest="mark_arg",
help=(
'The complete argument for `pytest -m "<input>"`. This can be used to exclude or include specific pytest markers.'
'--mark_arg="not cosmosEmulator"'
),
)
args = parser.parse_args()
run_main(args)
```
#### File: v2020_06_01/models/_models_py3.py
```python
import datetime
from typing import Any, Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._web_site_management_client_enums import *
class AbnormalTimePeriod(msrest.serialization.Model):
"""Class representing Abnormal Time Period identified in diagnosis.
:param start_time: Start time of the downtime.
:type start_time: ~datetime.datetime
:param end_time: End time of the downtime.
:type end_time: ~datetime.datetime
:param events: List of Possible Cause of downtime.
:type events: list[~azure.mgmt.web.v2020_06_01.models.DetectorAbnormalTimePeriod]
:param solutions: List of proposed solutions.
:type solutions: list[~azure.mgmt.web.v2020_06_01.models.Solution]
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'events': {'key': 'events', 'type': '[DetectorAbnormalTimePeriod]'},
'solutions': {'key': 'solutions', 'type': '[Solution]'},
}
def __init__(
self,
*,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
events: Optional[List["DetectorAbnormalTimePeriod"]] = None,
solutions: Optional[List["Solution"]] = None,
**kwargs
):
super(AbnormalTimePeriod, self).__init__(**kwargs)
self.start_time = start_time
self.end_time = end_time
self.events = events
self.solutions = solutions
class Address(msrest.serialization.Model):
"""Address information for domain registration.
All required parameters must be populated in order to send to Azure.
:param address1: Required. First line of an Address.
:type address1: str
:param address2: The second line of the Address. Optional.
:type address2: str
:param city: Required. The city for the address.
:type city: str
:param country: Required. The country for the address.
:type country: str
:param postal_code: Required. The postal code for the address.
:type postal_code: str
:param state: Required. The state or province for the address.
:type state: str
"""
_validation = {
'address1': {'required': True},
'city': {'required': True},
'country': {'required': True},
'postal_code': {'required': True},
'state': {'required': True},
}
_attribute_map = {
'address1': {'key': 'address1', 'type': 'str'},
'address2': {'key': 'address2', 'type': 'str'},
'city': {'key': 'city', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
'postal_code': {'key': 'postalCode', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
*,
address1: str,
city: str,
country: str,
postal_code: str,
state: str,
address2: Optional[str] = None,
**kwargs
):
super(Address, self).__init__(**kwargs)
self.address1 = address1
self.address2 = address2
self.city = city
self.country = country
self.postal_code = postal_code
self.state = state
class ProxyOnlyResource(msrest.serialization.Model):
"""Azure proxy only resource. This resource is not tracked by Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(ProxyOnlyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.kind = kind
self.type = None
class AddressResponse(ProxyOnlyResource):
"""Describes main public IP address and any extra virtual IPs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param service_ip_address: Main public virtual IP.
:type service_ip_address: str
:param internal_ip_address: Virtual Network internal IP address of the App Service Environment
if it is in internal load-balancing mode.
:type internal_ip_address: str
:param outbound_ip_addresses: IP addresses appearing on outbound connections.
:type outbound_ip_addresses: list[str]
:param vip_mappings: Additional virtual IPs.
:type vip_mappings: list[~azure.mgmt.web.v2020_06_01.models.VirtualIPMapping]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'service_ip_address': {'key': 'properties.serviceIpAddress', 'type': 'str'},
'internal_ip_address': {'key': 'properties.internalIpAddress', 'type': 'str'},
'outbound_ip_addresses': {'key': 'properties.outboundIpAddresses', 'type': '[str]'},
'vip_mappings': {'key': 'properties.vipMappings', 'type': '[VirtualIPMapping]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
service_ip_address: Optional[str] = None,
internal_ip_address: Optional[str] = None,
outbound_ip_addresses: Optional[List[str]] = None,
vip_mappings: Optional[List["VirtualIPMapping"]] = None,
**kwargs
):
super(AddressResponse, self).__init__(kind=kind, **kwargs)
self.service_ip_address = service_ip_address
self.internal_ip_address = internal_ip_address
self.outbound_ip_addresses = outbound_ip_addresses
self.vip_mappings = vip_mappings
class AllowedAudiencesValidation(ProxyOnlyResource):
"""AllowedAudiencesValidation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param allowed_audiences:
:type allowed_audiences: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'allowed_audiences': {'key': 'properties.allowedAudiences', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
allowed_audiences: Optional[List[str]] = None,
**kwargs
):
super(AllowedAudiencesValidation, self).__init__(kind=kind, **kwargs)
self.allowed_audiences = allowed_audiences
class AnalysisData(msrest.serialization.Model):
"""Class Representing Detector Evidence used for analysis.
:param source: Name of the Detector.
:type source: str
:param detector_definition: Detector Definition.
:type detector_definition: ~azure.mgmt.web.v2020_06_01.models.DetectorDefinition
:param metrics: Source Metrics.
:type metrics: list[~azure.mgmt.web.v2020_06_01.models.DiagnosticMetricSet]
:param data: Additional Source Data.
:type data: list[list[~azure.mgmt.web.v2020_06_01.models.NameValuePair]]
:param detector_meta_data: Detector Meta Data.
:type detector_meta_data: ~azure.mgmt.web.v2020_06_01.models.ResponseMetaData
"""
_attribute_map = {
'source': {'key': 'source', 'type': 'str'},
'detector_definition': {'key': 'detectorDefinition', 'type': 'DetectorDefinition'},
'metrics': {'key': 'metrics', 'type': '[DiagnosticMetricSet]'},
'data': {'key': 'data', 'type': '[[NameValuePair]]'},
'detector_meta_data': {'key': 'detectorMetaData', 'type': 'ResponseMetaData'},
}
def __init__(
self,
*,
source: Optional[str] = None,
detector_definition: Optional["DetectorDefinition"] = None,
metrics: Optional[List["DiagnosticMetricSet"]] = None,
data: Optional[List[List["NameValuePair"]]] = None,
detector_meta_data: Optional["ResponseMetaData"] = None,
**kwargs
):
super(AnalysisData, self).__init__(**kwargs)
self.source = source
self.detector_definition = detector_definition
self.metrics = metrics
self.data = data
self.detector_meta_data = detector_meta_data
class AnalysisDefinition(ProxyOnlyResource):
"""Definition of Analysis.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar description: Description of the Analysis.
:vartype description: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(AnalysisDefinition, self).__init__(kind=kind, **kwargs)
self.description = None
class ApiDefinitionInfo(msrest.serialization.Model):
"""Information about the formal API definition for the app.
:param url: The URL of the API definition.
:type url: str
"""
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
}
def __init__(
self,
*,
url: Optional[str] = None,
**kwargs
):
super(ApiDefinitionInfo, self).__init__(**kwargs)
self.url = url
class ApiManagementConfig(msrest.serialization.Model):
"""Azure API management (APIM) configuration linked to the app.
:param id: APIM-Api Identifier.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(ApiManagementConfig, self).__init__(**kwargs)
self.id = id
class ApplicationLogsConfig(msrest.serialization.Model):
"""Application logs configuration.
:param file_system: Application logs to file system configuration.
:type file_system: ~azure.mgmt.web.v2020_06_01.models.FileSystemApplicationLogsConfig
:param azure_table_storage: Application logs to azure table storage configuration.
:type azure_table_storage:
~azure.mgmt.web.v2020_06_01.models.AzureTableStorageApplicationLogsConfig
:param azure_blob_storage: Application logs to blob storage configuration.
:type azure_blob_storage:
~azure.mgmt.web.v2020_06_01.models.AzureBlobStorageApplicationLogsConfig
"""
_attribute_map = {
'file_system': {'key': 'fileSystem', 'type': 'FileSystemApplicationLogsConfig'},
'azure_table_storage': {'key': 'azureTableStorage', 'type': 'AzureTableStorageApplicationLogsConfig'},
'azure_blob_storage': {'key': 'azureBlobStorage', 'type': 'AzureBlobStorageApplicationLogsConfig'},
}
def __init__(
self,
*,
file_system: Optional["FileSystemApplicationLogsConfig"] = None,
azure_table_storage: Optional["AzureTableStorageApplicationLogsConfig"] = None,
azure_blob_storage: Optional["AzureBlobStorageApplicationLogsConfig"] = None,
**kwargs
):
super(ApplicationLogsConfig, self).__init__(**kwargs)
self.file_system = file_system
self.azure_table_storage = azure_table_storage
self.azure_blob_storage = azure_blob_storage
class ApplicationStack(msrest.serialization.Model):
"""Application stack.
:param name: Application stack name.
:type name: str
:param display: Application stack display name.
:type display: str
:param dependency: Application stack dependency.
:type dependency: str
:param major_versions: List of major versions available.
:type major_versions: list[~azure.mgmt.web.v2020_06_01.models.StackMajorVersion]
:param frameworks: List of frameworks associated with application stack.
:type frameworks: list[~azure.mgmt.web.v2020_06_01.models.ApplicationStack]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'str'},
'dependency': {'key': 'dependency', 'type': 'str'},
'major_versions': {'key': 'majorVersions', 'type': '[StackMajorVersion]'},
'frameworks': {'key': 'frameworks', 'type': '[ApplicationStack]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional[str] = None,
dependency: Optional[str] = None,
major_versions: Optional[List["StackMajorVersion"]] = None,
frameworks: Optional[List["ApplicationStack"]] = None,
**kwargs
):
super(ApplicationStack, self).__init__(**kwargs)
self.name = name
self.display = display
self.dependency = dependency
self.major_versions = major_versions
self.frameworks = frameworks
class ApplicationStackCollection(msrest.serialization.Model):
"""Collection of Application Stacks.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.ApplicationStackResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ApplicationStackResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ApplicationStackResource"],
**kwargs
):
super(ApplicationStackCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ApplicationStackResource(ProxyOnlyResource):
"""ARM resource for a ApplicationStack.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param name_properties_name: Application stack name.
:type name_properties_name: str
:param display: Application stack display name.
:type display: str
:param dependency: Application stack dependency.
:type dependency: str
:param major_versions: List of major versions available.
:type major_versions: list[~azure.mgmt.web.v2020_06_01.models.StackMajorVersion]
:param frameworks: List of frameworks associated with application stack.
:type frameworks: list[~azure.mgmt.web.v2020_06_01.models.ApplicationStack]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'display': {'key': 'properties.display', 'type': 'str'},
'dependency': {'key': 'properties.dependency', 'type': 'str'},
'major_versions': {'key': 'properties.majorVersions', 'type': '[StackMajorVersion]'},
'frameworks': {'key': 'properties.frameworks', 'type': '[ApplicationStack]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
name_properties_name: Optional[str] = None,
display: Optional[str] = None,
dependency: Optional[str] = None,
major_versions: Optional[List["StackMajorVersion"]] = None,
frameworks: Optional[List["ApplicationStack"]] = None,
**kwargs
):
super(ApplicationStackResource, self).__init__(kind=kind, **kwargs)
self.name_properties_name = name_properties_name
self.display = display
self.dependency = dependency
self.major_versions = major_versions
self.frameworks = frameworks
class AppRegistration(ProxyOnlyResource):
"""AppRegistration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param app_id:
:type app_id: str
:param app_secret_setting_name:
:type app_secret_setting_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'app_id': {'key': 'properties.appId', 'type': 'str'},
'app_secret_setting_name': {'key': 'properties.appSecretSettingName', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
app_id: Optional[str] = None,
app_secret_setting_name: Optional[str] = None,
**kwargs
):
super(AppRegistration, self).__init__(kind=kind, **kwargs)
self.app_id = app_id
self.app_secret_setting_name = app_secret_setting_name
class AppServiceCertificate(msrest.serialization.Model):
"""Key Vault container for a certificate that is purchased through Azure.
Variables are only populated by the server, and will be ignored when sending a request.
:param key_vault_id: Key Vault resource Id.
:type key_vault_id: str
:param key_vault_secret_name: Key Vault secret name.
:type key_vault_secret_name: str
:ivar provisioning_state: Status of the Key Vault secret. Possible values include:
"Initialized", "WaitingOnCertificateOrder", "Succeeded", "CertificateOrderFailed",
"OperationNotPermittedOnKeyVault", "AzureServiceUnauthorizedToAccessKeyVault",
"KeyVaultDoesNotExist", "KeyVaultSecretDoesNotExist", "UnknownError", "ExternalPrivateKey",
"Unknown".
:vartype provisioning_state: str or ~azure.mgmt.web.v2020_06_01.models.KeyVaultSecretStatus
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'key_vault_id': {'key': 'keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'keyVaultSecretName', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
key_vault_id: Optional[str] = None,
key_vault_secret_name: Optional[str] = None,
**kwargs
):
super(AppServiceCertificate, self).__init__(**kwargs)
self.key_vault_id = key_vault_id
self.key_vault_secret_name = key_vault_secret_name
self.provisioning_state = None
class AppServiceCertificateCollection(msrest.serialization.Model):
"""Collection of certificate order certificates.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AppServiceCertificateResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["AppServiceCertificateResource"],
**kwargs
):
super(AppServiceCertificateCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class Resource(msrest.serialization.Model):
"""Azure resource. This resource is tracked in Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.kind = kind
self.location = location
self.type = None
self.tags = tags
class AppServiceCertificateOrder(Resource):
"""SSL certificate purchase order.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param certificates: State of the Key Vault secret.
:type certificates: dict[str, ~azure.mgmt.web.v2020_06_01.models.AppServiceCertificate]
:param distinguished_name: Certificate distinguished name.
:type distinguished_name: str
:ivar domain_verification_token: Domain verification token.
:vartype domain_verification_token: str
:param validity_in_years: Duration in years (must be between 1 and 3).
:type validity_in_years: int
:param key_size: Certificate key size.
:type key_size: int
:param product_type: Certificate product type. Possible values include:
"StandardDomainValidatedSsl", "StandardDomainValidatedWildCardSsl".
:type product_type: str or ~azure.mgmt.web.v2020_06_01.models.CertificateProductType
:param auto_renew: :code:`<code>true</code>` if the certificate should be automatically renewed
when it expires; otherwise, :code:`<code>false</code>`.
:type auto_renew: bool
:ivar provisioning_state: Status of certificate order. Possible values include: "Succeeded",
"Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2020_06_01.models.ProvisioningState
:ivar status: Current order status. Possible values include: "Pendingissuance", "Issued",
"Revoked", "Canceled", "Denied", "Pendingrevocation", "PendingRekey", "Unused", "Expired",
"NotSubmitted".
:vartype status: str or ~azure.mgmt.web.v2020_06_01.models.CertificateOrderStatus
:ivar signed_certificate: Signed certificate.
:vartype signed_certificate: ~azure.mgmt.web.v2020_06_01.models.CertificateDetails
:param csr: Last CSR that was created for this order.
:type csr: str
:ivar intermediate: Intermediate certificate.
:vartype intermediate: ~azure.mgmt.web.v2020_06_01.models.CertificateDetails
:ivar root: Root certificate.
:vartype root: ~azure.mgmt.web.v2020_06_01.models.CertificateDetails
:ivar serial_number: Current serial number of the certificate.
:vartype serial_number: str
:ivar last_certificate_issuance_time: Certificate last issuance time.
:vartype last_certificate_issuance_time: ~datetime.datetime
:ivar expiration_time: Certificate expiration time.
:vartype expiration_time: ~datetime.datetime
:ivar is_private_key_external: :code:`<code>true</code>` if private key is external; otherwise,
:code:`<code>false</code>`.
:vartype is_private_key_external: bool
:ivar app_service_certificate_not_renewable_reasons: Reasons why App Service Certificate is not
renewable at the current moment.
:vartype app_service_certificate_not_renewable_reasons: list[str or
~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrderPropertiesAppServiceCertificateNotRenewableReasonsItem]
:ivar next_auto_renewal_time_stamp: Time stamp when the certificate would be auto renewed next.
:vartype next_auto_renewal_time_stamp: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'domain_verification_token': {'readonly': True},
'validity_in_years': {'maximum': 3, 'minimum': 1},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'signed_certificate': {'readonly': True},
'intermediate': {'readonly': True},
'root': {'readonly': True},
'serial_number': {'readonly': True},
'last_certificate_issuance_time': {'readonly': True},
'expiration_time': {'readonly': True},
'is_private_key_external': {'readonly': True},
'app_service_certificate_not_renewable_reasons': {'readonly': True},
'next_auto_renewal_time_stamp': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'certificates': {'key': 'properties.certificates', 'type': '{AppServiceCertificate}'},
'distinguished_name': {'key': 'properties.distinguishedName', 'type': 'str'},
'domain_verification_token': {'key': 'properties.domainVerificationToken', 'type': 'str'},
'validity_in_years': {'key': 'properties.validityInYears', 'type': 'int'},
'key_size': {'key': 'properties.keySize', 'type': 'int'},
'product_type': {'key': 'properties.productType', 'type': 'str'},
'auto_renew': {'key': 'properties.autoRenew', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'signed_certificate': {'key': 'properties.signedCertificate', 'type': 'CertificateDetails'},
'csr': {'key': 'properties.csr', 'type': 'str'},
'intermediate': {'key': 'properties.intermediate', 'type': 'CertificateDetails'},
'root': {'key': 'properties.root', 'type': 'CertificateDetails'},
'serial_number': {'key': 'properties.serialNumber', 'type': 'str'},
'last_certificate_issuance_time': {'key': 'properties.lastCertificateIssuanceTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'properties.expirationTime', 'type': 'iso-8601'},
'is_private_key_external': {'key': 'properties.isPrivateKeyExternal', 'type': 'bool'},
'app_service_certificate_not_renewable_reasons': {'key': 'properties.appServiceCertificateNotRenewableReasons', 'type': '[str]'},
'next_auto_renewal_time_stamp': {'key': 'properties.nextAutoRenewalTimeStamp', 'type': 'iso-8601'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
certificates: Optional[Dict[str, "AppServiceCertificate"]] = None,
distinguished_name: Optional[str] = None,
validity_in_years: Optional[int] = 1,
key_size: Optional[int] = 2048,
product_type: Optional[Union[str, "CertificateProductType"]] = None,
auto_renew: Optional[bool] = True,
csr: Optional[str] = None,
**kwargs
):
super(AppServiceCertificateOrder, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.certificates = certificates
self.distinguished_name = distinguished_name
self.domain_verification_token = None
self.validity_in_years = validity_in_years
self.key_size = key_size
self.product_type = product_type
self.auto_renew = auto_renew
self.provisioning_state = None
self.status = None
self.signed_certificate = None
self.csr = csr
self.intermediate = None
self.root = None
self.serial_number = None
self.last_certificate_issuance_time = None
self.expiration_time = None
self.is_private_key_external = None
self.app_service_certificate_not_renewable_reasons = None
self.next_auto_renewal_time_stamp = None
class AppServiceCertificateOrderCollection(msrest.serialization.Model):
"""Collection of certificate orders.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrder]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AppServiceCertificateOrder]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["AppServiceCertificateOrder"],
**kwargs
):
super(AppServiceCertificateOrderCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class AppServiceCertificateOrderPatchResource(ProxyOnlyResource):
"""ARM resource for a certificate order that is purchased through Azure.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param certificates: State of the Key Vault secret.
:type certificates: dict[str, ~azure.mgmt.web.v2020_06_01.models.AppServiceCertificate]
:param distinguished_name: Certificate distinguished name.
:type distinguished_name: str
:ivar domain_verification_token: Domain verification token.
:vartype domain_verification_token: str
:param validity_in_years: Duration in years (must be between 1 and 3).
:type validity_in_years: int
:param key_size: Certificate key size.
:type key_size: int
:param product_type: Certificate product type. Possible values include:
"StandardDomainValidatedSsl", "StandardDomainValidatedWildCardSsl".
:type product_type: str or ~azure.mgmt.web.v2020_06_01.models.CertificateProductType
:param auto_renew: :code:`<code>true</code>` if the certificate should be automatically renewed
when it expires; otherwise, :code:`<code>false</code>`.
:type auto_renew: bool
:ivar provisioning_state: Status of certificate order. Possible values include: "Succeeded",
"Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2020_06_01.models.ProvisioningState
:ivar status: Current order status. Possible values include: "Pendingissuance", "Issued",
"Revoked", "Canceled", "Denied", "Pendingrevocation", "PendingRekey", "Unused", "Expired",
"NotSubmitted".
:vartype status: str or ~azure.mgmt.web.v2020_06_01.models.CertificateOrderStatus
:ivar signed_certificate: Signed certificate.
:vartype signed_certificate: ~azure.mgmt.web.v2020_06_01.models.CertificateDetails
:param csr: Last CSR that was created for this order.
:type csr: str
:ivar intermediate: Intermediate certificate.
:vartype intermediate: ~azure.mgmt.web.v2020_06_01.models.CertificateDetails
:ivar root: Root certificate.
:vartype root: ~azure.mgmt.web.v2020_06_01.models.CertificateDetails
:ivar serial_number: Current serial number of the certificate.
:vartype serial_number: str
:ivar last_certificate_issuance_time: Certificate last issuance time.
:vartype last_certificate_issuance_time: ~datetime.datetime
:ivar expiration_time: Certificate expiration time.
:vartype expiration_time: ~datetime.datetime
:ivar is_private_key_external: :code:`<code>true</code>` if private key is external; otherwise,
:code:`<code>false</code>`.
:vartype is_private_key_external: bool
:ivar app_service_certificate_not_renewable_reasons: Reasons why App Service Certificate is not
renewable at the current moment.
:vartype app_service_certificate_not_renewable_reasons: list[str or
~azure.mgmt.web.v2020_06_01.models.AppServiceCertificateOrderPatchResourcePropertiesAppServiceCertificateNotRenewableReasonsItem]
:ivar next_auto_renewal_time_stamp: Time stamp when the certificate would be auto renewed next.
:vartype next_auto_renewal_time_stamp: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'domain_verification_token': {'readonly': True},
'validity_in_years': {'maximum': 3, 'minimum': 1},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'signed_certificate': {'readonly': True},
'intermediate': {'readonly': True},
'root': {'readonly': True},
'serial_number': {'readonly': True},
'last_certificate_issuance_time': {'readonly': True},
'expiration_time': {'readonly': True},
'is_private_key_external': {'readonly': True},
'app_service_certificate_not_renewable_reasons': {'readonly': True},
'next_auto_renewal_time_stamp': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'certificates': {'key': 'properties.certificates', 'type': '{AppServiceCertificate}'},
'distinguished_name': {'key': 'properties.distinguishedName', 'type': 'str'},
'domain_verification_token': {'key': 'properties.domainVerificationToken', 'type': 'str'},
'validity_in_years': {'key': 'properties.validityInYears', 'type': 'int'},
'key_size': {'key': 'properties.keySize', 'type': 'int'},
'product_type': {'key': 'properties.productType', 'type': 'str'},
'auto_renew': {'key': 'properties.autoRenew', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'signed_certificate': {'key': 'properties.signedCertificate', 'type': 'CertificateDetails'},
'csr': {'key': 'properties.csr', 'type': 'str'},
'intermediate': {'key': 'properties.intermediate', 'type': 'CertificateDetails'},
'root': {'key': 'properties.root', 'type': 'CertificateDetails'},
'serial_number': {'key': 'properties.serialNumber', 'type': 'str'},
'last_certificate_issuance_time': {'key': 'properties.lastCertificateIssuanceTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'properties.expirationTime', 'type': 'iso-8601'},
'is_private_key_external': {'key': 'properties.isPrivateKeyExternal', 'type': 'bool'},
'app_service_certificate_not_renewable_reasons': {'key': 'properties.appServiceCertificateNotRenewableReasons', 'type': '[str]'},
'next_auto_renewal_time_stamp': {'key': 'properties.nextAutoRenewalTimeStamp', 'type': 'iso-8601'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
certificates: Optional[Dict[str, "AppServiceCertificate"]] = None,
distinguished_name: Optional[str] = None,
validity_in_years: Optional[int] = 1,
key_size: Optional[int] = 2048,
product_type: Optional[Union[str, "CertificateProductType"]] = None,
auto_renew: Optional[bool] = True,
csr: Optional[str] = None,
**kwargs
):
super(AppServiceCertificateOrderPatchResource, self).__init__(kind=kind, **kwargs)
self.certificates = certificates
self.distinguished_name = distinguished_name
self.domain_verification_token = None
self.validity_in_years = validity_in_years
self.key_size = key_size
self.product_type = product_type
self.auto_renew = auto_renew
self.provisioning_state = None
self.status = None
self.signed_certificate = None
self.csr = csr
self.intermediate = None
self.root = None
self.serial_number = None
self.last_certificate_issuance_time = None
self.expiration_time = None
self.is_private_key_external = None
self.app_service_certificate_not_renewable_reasons = None
self.next_auto_renewal_time_stamp = None
class AppServiceCertificatePatchResource(ProxyOnlyResource):
"""Key Vault container ARM resource for a certificate that is purchased through Azure.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param key_vault_id: Key Vault resource Id.
:type key_vault_id: str
:param key_vault_secret_name: Key Vault secret name.
:type key_vault_secret_name: str
:ivar provisioning_state: Status of the Key Vault secret. Possible values include:
"Initialized", "WaitingOnCertificateOrder", "Succeeded", "CertificateOrderFailed",
"OperationNotPermittedOnKeyVault", "AzureServiceUnauthorizedToAccessKeyVault",
"KeyVaultDoesNotExist", "KeyVaultSecretDoesNotExist", "UnknownError", "ExternalPrivateKey",
"Unknown".
:vartype provisioning_state: str or ~azure.mgmt.web.v2020_06_01.models.KeyVaultSecretStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
key_vault_id: Optional[str] = None,
key_vault_secret_name: Optional[str] = None,
**kwargs
):
super(AppServiceCertificatePatchResource, self).__init__(kind=kind, **kwargs)
self.key_vault_id = key_vault_id
self.key_vault_secret_name = key_vault_secret_name
self.provisioning_state = None
class AppServiceCertificateResource(Resource):
"""Key Vault container ARM resource for a certificate that is purchased through Azure.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param key_vault_id: Key Vault resource Id.
:type key_vault_id: str
:param key_vault_secret_name: Key Vault secret name.
:type key_vault_secret_name: str
:ivar provisioning_state: Status of the Key Vault secret. Possible values include:
"Initialized", "WaitingOnCertificateOrder", "Succeeded", "CertificateOrderFailed",
"OperationNotPermittedOnKeyVault", "AzureServiceUnauthorizedToAccessKeyVault",
"KeyVaultDoesNotExist", "KeyVaultSecretDoesNotExist", "UnknownError", "ExternalPrivateKey",
"Unknown".
:vartype provisioning_state: str or ~azure.mgmt.web.v2020_06_01.models.KeyVaultSecretStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
key_vault_id: Optional[str] = None,
key_vault_secret_name: Optional[str] = None,
**kwargs
):
super(AppServiceCertificateResource, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.key_vault_id = key_vault_id
self.key_vault_secret_name = key_vault_secret_name
self.provisioning_state = None
class AppServiceEnvironment(msrest.serialization.Model):
"""Description of an App Service Environment.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the App Service Environment.
:type name: str
:param location: Required. Location of the App Service Environment, e.g. "West US".
:type location: str
:ivar provisioning_state: Provisioning state of the App Service Environment. Possible values
include: "Succeeded", "Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2020_06_01.models.ProvisioningState
:ivar status: Current status of the App Service Environment. Possible values include:
"Preparing", "Ready", "Scaling", "Deleting".
:vartype status: str or ~azure.mgmt.web.v2020_06_01.models.HostingEnvironmentStatus
:param vnet_name: Name of the Virtual Network for the App Service Environment.
:type vnet_name: str
:param vnet_resource_group_name: Resource group of the Virtual Network.
:type vnet_resource_group_name: str
:param vnet_subnet_name: Subnet of the Virtual Network.
:type vnet_subnet_name: str
:param virtual_network: Required. Description of the Virtual Network.
:type virtual_network: ~azure.mgmt.web.v2020_06_01.models.VirtualNetworkProfile
:param internal_load_balancing_mode: Specifies which endpoints to serve internally in the
Virtual Network for the App Service Environment. Possible values include: "None", "Web",
"Publishing", "Web,Publishing".
:type internal_load_balancing_mode: str or ~azure.mgmt.web.v2020_06_01.models.LoadBalancingMode
:param multi_size: Front-end VM size, e.g. "Medium", "Large".
:type multi_size: str
:param multi_role_count: Number of front-end instances.
:type multi_role_count: int
:param worker_pools: Required. Description of worker pools with worker size IDs, VM sizes, and
number of workers in each pool.
:type worker_pools: list[~azure.mgmt.web.v2020_06_01.models.WorkerPool]
:param ipssl_address_count: Number of IP SSL addresses reserved for the App Service
Environment.
:type ipssl_address_count: int
:ivar database_edition: Edition of the metadata database for the App Service Environment, e.g.
"Standard".
:vartype database_edition: str
:ivar database_service_objective: Service objective of the metadata database for the App
Service Environment, e.g. "S0".
:vartype database_service_objective: str
:ivar upgrade_domains: Number of upgrade domains of the App Service Environment.
:vartype upgrade_domains: int
:ivar subscription_id: Subscription of the App Service Environment.
:vartype subscription_id: str
:param dns_suffix: DNS suffix of the App Service Environment.
:type dns_suffix: str
:ivar last_action: Last deployment action on the App Service Environment.
:vartype last_action: str
:ivar last_action_result: Result of the last deployment action on the App Service Environment.
:vartype last_action_result: str
:ivar allowed_multi_sizes: List of comma separated strings describing which VM sizes are
allowed for front-ends.
:vartype allowed_multi_sizes: str
:ivar allowed_worker_sizes: List of comma separated strings describing which VM sizes are
allowed for workers.
:vartype allowed_worker_sizes: str
:ivar maximum_number_of_machines: Maximum number of VMs in the App Service Environment.
:vartype maximum_number_of_machines: int
:ivar vip_mappings: Description of IP SSL mapping for the App Service Environment.
:vartype vip_mappings: list[~azure.mgmt.web.v2020_06_01.models.VirtualIPMapping]
:ivar environment_capacities: Current total, used, and available worker capacities.
:vartype environment_capacities: list[~azure.mgmt.web.v2020_06_01.models.StampCapacity]
:param network_access_control_list: Access control list for controlling traffic to the App
Service Environment.
:type network_access_control_list:
list[~azure.mgmt.web.v2020_06_01.models.NetworkAccessControlEntry]
:ivar environment_is_healthy: True/false indicating whether the App Service Environment is
healthy.
:vartype environment_is_healthy: bool
:ivar environment_status: Detailed message about with results of the last check of the App
Service Environment.
:vartype environment_status: str
:ivar resource_group: Resource group of the App Service Environment.
:vartype resource_group: str
:param front_end_scale_factor: Scale factor for front-ends.
:type front_end_scale_factor: int
:ivar default_front_end_scale_factor: Default Scale Factor for FrontEnds.
:vartype default_front_end_scale_factor: int
:param api_management_account_id: API Management Account associated with the App Service
Environment.
:type api_management_account_id: str
:param suspended: :code:`<code>true</code>` if the App Service Environment is suspended;
otherwise, :code:`<code>false</code>`. The environment can be suspended, e.g. when the
management endpoint is no longer available
(most likely because NSG blocked the incoming traffic).
:type suspended: bool
:param dynamic_cache_enabled: True/false indicating whether the App Service Environment is
suspended. The environment can be suspended e.g. when the management endpoint is no longer
available
(most likely because NSG blocked the incoming traffic).
:type dynamic_cache_enabled: bool
:param cluster_settings: Custom settings for changing the behavior of the App Service
Environment.
:type cluster_settings: list[~azure.mgmt.web.v2020_06_01.models.NameValuePair]
:param user_whitelisted_ip_ranges: User added ip ranges to whitelist on ASE db.
:type user_whitelisted_ip_ranges: list[str]
:param has_linux_workers: Flag that displays whether an ASE has linux workers or not.
:type has_linux_workers: bool
:param ssl_cert_key_vault_id: Key Vault ID for ILB App Service Environment default SSL
certificate.
:type ssl_cert_key_vault_id: str
:param ssl_cert_key_vault_secret_name: Key Vault Secret Name for ILB App Service Environment
default SSL certificate.
:type ssl_cert_key_vault_secret_name: str
"""
_validation = {
'name': {'required': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'virtual_network': {'required': True},
'worker_pools': {'required': True},
'database_edition': {'readonly': True},
'database_service_objective': {'readonly': True},
'upgrade_domains': {'readonly': True},
'subscription_id': {'readonly': True},
'last_action': {'readonly': True},
'last_action_result': {'readonly': True},
'allowed_multi_sizes': {'readonly': True},
'allowed_worker_sizes': {'readonly': True},
'maximum_number_of_machines': {'readonly': True},
'vip_mappings': {'readonly': True},
'environment_capacities': {'readonly': True},
'environment_is_healthy': {'readonly': True},
'environment_status': {'readonly': True},
'resource_group': {'readonly': True},
'default_front_end_scale_factor': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'vnet_name': {'key': 'vnetName', 'type': 'str'},
'vnet_resource_group_name': {'key': 'vnetResourceGroupName', 'type': 'str'},
'vnet_subnet_name': {'key': 'vnetSubnetName', 'type': 'str'},
'virtual_network': {'key': 'virtualNetwork', 'type': 'VirtualNetworkProfile'},
'internal_load_balancing_mode': {'key': 'internalLoadBalancingMode', 'type': 'str'},
'multi_size': {'key': 'multiSize', 'type': 'str'},
'multi_role_count': {'key': 'multiRoleCount', 'type': 'int'},
'worker_pools': {'key': 'workerPools', 'type': '[WorkerPool]'},
'ipssl_address_count': {'key': 'ipsslAddressCount', 'type': 'int'},
'database_edition': {'key': 'databaseEdition', 'type': 'str'},
'database_service_objective': {'key': 'databaseServiceObjective', 'type': 'str'},
'upgrade_domains': {'key': 'upgradeDomains', 'type': 'int'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'dns_suffix': {'key': 'dnsSuffix', 'type': 'str'},
'last_action': {'key': 'lastAction', 'type': 'str'},
'last_action_result': {'key': 'lastActionResult', 'type': 'str'},
'allowed_multi_sizes': {'key': 'allowedMultiSizes', 'type': 'str'},
'allowed_worker_sizes': {'key': 'allowedWorkerSizes', 'type': 'str'},
'maximum_number_of_machines': {'key': 'maximumNumberOfMachines', 'type': 'int'},
'vip_mappings': {'key': 'vipMappings', 'type': '[VirtualIPMapping]'},
'environment_capacities': {'key': 'environmentCapacities', 'type': '[StampCapacity]'},
'network_access_control_list': {'key': 'networkAccessControlList', 'type': '[NetworkAccessControlEntry]'},
'environment_is_healthy': {'key': 'environmentIsHealthy', 'type': 'bool'},
'environment_status': {'key': 'environmentStatus', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
'front_end_scale_factor': {'key': 'frontEndScaleFactor', 'type': 'int'},
'default_front_end_scale_factor': {'key': 'defaultFrontEndScaleFactor', 'type': 'int'},
'api_management_account_id': {'key': 'apiManagementAccountId', 'type': 'str'},
'suspended': {'key': 'suspended', 'type': 'bool'},
'dynamic_cache_enabled': {'key': 'dynamicCacheEnabled', 'type': 'bool'},
'cluster_settings': {'key': 'clusterSettings', 'type': '[NameValuePair]'},
'user_whitelisted_ip_ranges': {'key': 'userWhitelistedIpRanges', 'type': '[str]'},
'has_linux_workers': {'key': 'hasLinuxWorkers', 'type': 'bool'},
'ssl_cert_key_vault_id': {'key': 'sslCertKeyVaultId', 'type': 'str'},
'ssl_cert_key_vault_secret_name': {'key': 'sslCertKeyVaultSecretName', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
location: str,
virtual_network: "VirtualNetworkProfile",
worker_pools: List["WorkerPool"],
vnet_name: Optional[str] = None,
vnet_resource_group_name: Optional[str] = None,
vnet_subnet_name: Optional[str] = None,
internal_load_balancing_mode: Optional[Union[str, "LoadBalancingMode"]] = None,
multi_size: Optional[str] = None,
multi_role_count: Optional[int] = None,
ipssl_address_count: Optional[int] = None,
dns_suffix: Optional[str] = None,
network_access_control_list: Optional[List["NetworkAccessControlEntry"]] = None,
front_end_scale_factor: Optional[int] = None,
api_management_account_id: Optional[str] = None,
suspended: Optional[bool] = None,
dynamic_cache_enabled: Optional[bool] = None,
cluster_settings: Optional[List["NameValuePair"]] = None,
user_whitelisted_ip_ranges: Optional[List[str]] = None,
has_linux_workers: Optional[bool] = None,
ssl_cert_key_vault_id: Optional[str] = None,
ssl_cert_key_vault_secret_name: Optional[str] = None,
**kwargs
):
super(AppServiceEnvironment, self).__init__(**kwargs)
self.name = name
self.location = location
self.provisioning_state = None
self.status = None
self.vnet_name = vnet_name
self.vnet_resource_group_name = vnet_resource_group_name
self.vnet_subnet_name = vnet_subnet_name
self.virtual_network = virtual_network
self.internal_load_balancing_mode = internal_load_balancing_mode
self.multi_size = multi_size
self.multi_role_count = multi_role_count
self.worker_pools = worker_pools
self.ipssl_address_count = ipssl_address_count
self.database_edition = None
self.database_service_objective = None
self.upgrade_domains = None
self.subscription_id = None
self.dns_suffix = dns_suffix
self.last_action = None
self.last_action_result = None
self.allowed_multi_sizes = None
self.allowed_worker_sizes = None
self.maximum_number_of_machines = None
self.vip_mappings = None
self.environment_capacities = None
self.network_access_control_list = network_access_control_list
self.environment_is_healthy = None
self.environment_status = None
self.resource_group = None
self.front_end_scale_factor = front_end_scale_factor
self.default_front_end_scale_factor = None
self.api_management_account_id = api_management_account_id
self.suspended = suspended
self.dynamic_cache_enabled = dynamic_cache_enabled
self.cluster_settings = cluster_settings
self.user_whitelisted_ip_ranges = user_whitelisted_ip_ranges
self.has_linux_workers = has_linux_workers
self.ssl_cert_key_vault_id = ssl_cert_key_vault_id
self.ssl_cert_key_vault_secret_name = ssl_cert_key_vault_secret_name
class AppServiceEnvironmentCollection(msrest.serialization.Model):
"""Collection of App Service Environments.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.AppServiceEnvironmentResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AppServiceEnvironmentResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["AppServiceEnvironmentResource"],
**kwargs
):
super(AppServiceEnvironmentCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class AppServiceEnvironmentPatchResource(ProxyOnlyResource):
"""ARM resource for a app service environment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param name_properties_name: Name of the App Service Environment.
:type name_properties_name: str
:param location: Location of the App Service Environment, e.g. "West US".
:type location: str
:ivar provisioning_state: Provisioning state of the App Service Environment. Possible values
include: "Succeeded", "Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2020_06_01.models.ProvisioningState
:ivar status: Current status of the App Service Environment. Possible values include:
"Preparing", "Ready", "Scaling", "Deleting".
:vartype status: str or ~azure.mgmt.web.v2020_06_01.models.HostingEnvironmentStatus
:param vnet_name: Name of the Virtual Network for the App Service Environment.
:type vnet_name: str
:param vnet_resource_group_name: Resource group of the Virtual Network.
:type vnet_resource_group_name: str
:param vnet_subnet_name: Subnet of the Virtual Network.
:type vnet_subnet_name: str
:param virtual_network: Description of the Virtual Network.
:type virtual_network: ~azure.mgmt.web.v2020_06_01.models.VirtualNetworkProfile
:param internal_load_balancing_mode: Specifies which endpoints to serve internally in the
Virtual Network for the App Service Environment. Possible values include: "None", "Web",
"Publishing", "Web,Publishing".
:type internal_load_balancing_mode: str or ~azure.mgmt.web.v2020_06_01.models.LoadBalancingMode
:param multi_size: Front-end VM size, e.g. "Medium", "Large".
:type multi_size: str
:param multi_role_count: Number of front-end instances.
:type multi_role_count: int
:param worker_pools: Description of worker pools with worker size IDs, VM sizes, and number of
workers in each pool.
:type worker_pools: list[~azure.mgmt.web.v2020_06_01.models.WorkerPool]
:param ipssl_address_count: Number of IP SSL addresses reserved for the App Service
Environment.
:type ipssl_address_count: int
:ivar database_edition: Edition of the metadata database for the App Service Environment, e.g.
"Standard".
:vartype database_edition: str
:ivar database_service_objective: Service objective of the metadata database for the App
Service Environment, e.g. "S0".
:vartype database_service_objective: str
:ivar upgrade_domains: Number of upgrade domains of the App Service Environment.
:vartype upgrade_domains: int
:ivar subscription_id: Subscription of the App Service Environment.
:vartype subscription_id: str
:param dns_suffix: DNS suffix of the App Service Environment.
:type dns_suffix: str
:ivar last_action: Last deployment action on the App Service Environment.
:vartype last_action: str
:ivar last_action_result: Result of the last deployment action on the App Service Environment.
:vartype last_action_result: str
:ivar allowed_multi_sizes: List of comma separated strings describing which VM sizes are
allowed for front-ends.
:vartype allowed_multi_sizes: str
:ivar allowed_worker_sizes: List of comma separated strings describing which VM sizes are
allowed for workers.
:vartype allowed_worker_sizes: str
:ivar maximum_number_of_machines: Maximum number of VMs in the App Service Environment.
:vartype maximum_number_of_machines: int
:ivar vip_mappings: Description of IP SSL mapping for the App Service Environment.
:vartype vip_mappings: list[~azure.mgmt.web.v2020_06_01.models.VirtualIPMapping]
:ivar environment_capacities: Current total, used, and available worker capacities.
:vartype environment_capacities: list[~azure.mgmt.web.v2020_06_01.models.StampCapacity]
:param network_access_control_list: Access control list for controlling traffic to the App
Service Environment.
:type network_access_control_list:
list[~azure.mgmt.web.v2020_06_01.models.NetworkAccessControlEntry]
:ivar environment_is_healthy: True/false indicating whether the App Service Environment is
healthy.
:vartype environment_is_healthy: bool
:ivar environment_status: Detailed message about with results of the last check of the App
Service Environment.
:vartype environment_status: str
:ivar resource_group: Resource group of the App Service Environment.
:vartype resource_group: str
:param front_end_scale_factor: Scale factor for front-ends.
:type front_end_scale_factor: int
:ivar default_front_end_scale_factor: Default Scale Factor for FrontEnds.
:vartype default_front_end_scale_factor: int
:param api_management_account_id: API Management Account associated with the App Service
Environment.
:type api_management_account_id: str
:param suspended: :code:`<code>true</code>` if the App Service Environment is suspended;
otherwise, :code:`<code>false</code>`. The environment can be suspended, e.g. when the
management endpoint is no longer available
(most likely because NSG blocked the incoming traffic).
:type suspended: bool
:param dynamic_cache_enabled: True/false indicating whether the App Service Environment is
suspended. The environment can be suspended e.g. when the management endpoint is no longer
available
(most likely because NSG blocked the incoming traffic).
:type dynamic_cache_enabled: bool
:param cluster_settings: Custom settings for changing the behavior of the App Service
Environment.
:type cluster_settings: list[~azure.mgmt.web.v2020_06_01.models.NameValuePair]
:param user_whitelisted_ip_ranges: User added ip ranges to whitelist on ASE db.
:type user_whitelisted_ip_ranges: list[str]
:param has_linux_workers: Flag that displays whether an ASE has linux workers or not.
:type has_linux_workers: bool
:param ssl_cert_key_vault_id: Key Vault ID for ILB App Service Environment default SSL
certificate.
:type ssl_cert_key_vault_id: str
:param ssl_cert_key_vault_secret_name: Key Vault Secret Name for ILB App Service Environment
default SSL certificate.
:type ssl_cert_key_vault_secret_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'database_edition': {'readonly': True},
'database_service_objective': {'readonly': True},
'upgrade_domains': {'readonly': True},
'subscription_id': {'readonly': True},
'last_action': {'readonly': True},
'last_action_result': {'readonly': True},
'allowed_multi_sizes': {'readonly': True},
'allowed_worker_sizes': {'readonly': True},
'maximum_number_of_machines': {'readonly': True},
'vip_mappings': {'readonly': True},
'environment_capacities': {'readonly': True},
'environment_is_healthy': {'readonly': True},
'environment_status': {'readonly': True},
'resource_group': {'readonly': True},
'default_front_end_scale_factor': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'location': {'key': 'properties.location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vnet_resource_group_name': {'key': 'properties.vnetResourceGroupName', 'type': 'str'},
'vnet_subnet_name': {'key': 'properties.vnetSubnetName', 'type': 'str'},
'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'VirtualNetworkProfile'},
'internal_load_balancing_mode': {'key': 'properties.internalLoadBalancingMode', 'type': 'str'},
'multi_size': {'key': 'properties.multiSize', 'type': 'str'},
'multi_role_count': {'key': 'properties.multiRoleCount', 'type': 'int'},
'worker_pools': {'key': 'properties.workerPools', 'type': '[WorkerPool]'},
'ipssl_address_count': {'key': 'properties.ipsslAddressCount', 'type': 'int'},
'database_edition': {'key': 'properties.databaseEdition', 'type': 'str'},
'database_service_objective': {'key': 'properties.databaseServiceObjective', 'type': 'str'},
'upgrade_domains': {'key': 'properties.upgradeDomains', 'type': 'int'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'dns_suffix': {'key': 'properties.dnsSuffix', 'type': 'str'},
'last_action': {'key': 'properties.lastAction', 'type': 'str'},
'last_action_result': {'key': 'properties.lastActionResult', 'type': 'str'},
'allowed_multi_sizes': {'key': 'properties.allowedMultiSizes', 'type': 'str'},
'allowed_worker_sizes': {'key': 'properties.allowedWorkerSizes', 'type': 'str'},
'maximum_number_of_machines': {'key': 'properties.maximumNumberOfMachines', 'type': 'int'},
'vip_mappings': {'key': 'properties.vipMappings', 'type': '[VirtualIPMapping]'},
'environment_capacities': {'key': 'properties.environmentCapacities', 'type': '[StampCapacity]'},
'network_access_control_list': {'key': 'properties.networkAccessControlList', 'type': '[NetworkAccessControlEntry]'},
'environment_is_healthy': {'key': 'properties.environmentIsHealthy', 'type': 'bool'},
'environment_status': {'key': 'properties.environmentStatus', 'type': 'str'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'front_end_scale_factor': {'key': 'properties.frontEndScaleFactor', 'type': 'int'},
'default_front_end_scale_factor': {'key': 'properties.defaultFrontEndScaleFactor', 'type': 'int'},
'api_management_account_id': {'key': 'properties.apiManagementAccountId', 'type': 'str'},
'suspended': {'key': 'properties.suspended', 'type': 'bool'},
'dynamic_cache_enabled': {'key': 'properties.dynamicCacheEnabled', 'type': 'bool'},
'cluster_settings': {'key': 'properties.clusterSettings', 'type': '[NameValuePair]'},
'user_whitelisted_ip_ranges': {'key': 'properties.userWhitelistedIpRanges', 'type': '[str]'},
'has_linux_workers': {'key': 'properties.hasLinuxWorkers', 'type': 'bool'},
'ssl_cert_key_vault_id': {'key': 'properties.sslCertKeyVaultId', 'type': 'str'},
'ssl_cert_key_vault_secret_name': {'key': 'properties.sslCertKeyVaultSecretName', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
name_properties_name: Optional[str] = None,
location: Optional[str] = None,
vnet_name: Optional[str] = None,
vnet_resource_group_name: Optional[str] = None,
vnet_subnet_name: Optional[str] = None,
virtual_network: Optional["VirtualNetworkProfile"] = None,
internal_load_balancing_mode: Optional[Union[str, "LoadBalancingMode"]] = None,
multi_size: Optional[str] = None,
multi_role_count: Optional[int] = None,
worker_pools: Optional[List["WorkerPool"]] = None,
ipssl_address_count: Optional[int] = None,
dns_suffix: Optional[str] = None,
network_access_control_list: Optional[List["NetworkAccessControlEntry"]] = None,
front_end_scale_factor: Optional[int] = None,
api_management_account_id: Optional[str] = None,
suspended: Optional[bool] = None,
dynamic_cache_enabled: Optional[bool] = None,
cluster_settings: Optional[List["NameValuePair"]] = None,
user_whitelisted_ip_ranges: Optional[List[str]] = None,
has_linux_workers: Optional[bool] = None,
ssl_cert_key_vault_id: Optional[str] = None,
ssl_cert_key_vault_secret_name: Optional[str] = None,
**kwargs
):
super(AppServiceEnvironmentPatchResource, self).__init__(kind=kind, **kwargs)
self.name_properties_name = name_properties_name
self.location = location
self.provisioning_state = None
self.status = None
self.vnet_name = vnet_name
self.vnet_resource_group_name = vnet_resource_group_name
self.vnet_subnet_name = vnet_subnet_name
self.virtual_network = virtual_network
self.internal_load_balancing_mode = internal_load_balancing_mode
self.multi_size = multi_size
self.multi_role_count = multi_role_count
self.worker_pools = worker_pools
self.ipssl_address_count = ipssl_address_count
self.database_edition = None
self.database_service_objective = None
self.upgrade_domains = None
self.subscription_id = None
self.dns_suffix = dns_suffix
self.last_action = None
self.last_action_result = None
self.allowed_multi_sizes = None
self.allowed_worker_sizes = None
self.maximum_number_of_machines = None
self.vip_mappings = None
self.environment_capacities = None
self.network_access_control_list = network_access_control_list
self.environment_is_healthy = None
self.environment_status = None
self.resource_group = None
self.front_end_scale_factor = front_end_scale_factor
self.default_front_end_scale_factor = None
self.api_management_account_id = api_management_account_id
self.suspended = suspended
self.dynamic_cache_enabled = dynamic_cache_enabled
self.cluster_settings = cluster_settings
self.user_whitelisted_ip_ranges = user_whitelisted_ip_ranges
self.has_linux_workers = has_linux_workers
self.ssl_cert_key_vault_id = ssl_cert_key_vault_id
self.ssl_cert_key_vault_secret_name = ssl_cert_key_vault_secret_name
class AppServiceEnvironmentResource(Resource):
"""App Service Environment ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param name_properties_name: Name of the App Service Environment.
:type name_properties_name: str
:param location_properties_location: Location of the App Service Environment, e.g. "West US".
:type location_properties_location: str
:ivar provisioning_state: Provisioning state of the App Service Environment. Possible values
include: "Succeeded", "Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2020_06_01.models.ProvisioningState
:ivar status: Current status of the App Service Environment. Possible values include:
"Preparing", "Ready", "Scaling", "Deleting".
:vartype status: str or ~azure.mgmt.web.v2020_06_01.models.HostingEnvironmentStatus
:param vnet_name: Name of the Virtual Network for the App Service Environment.
:type vnet_name: str
:param vnet_resource_group_name: Resource group of the Virtual Network.
:type vnet_resource_group_name: str
:param vnet_subnet_name: Subnet of the Virtual Network.
:type vnet_subnet_name: str
:param virtual_network: Description of the Virtual Network.
:type virtual_network: ~azure.mgmt.web.v2020_06_01.models.VirtualNetworkProfile
:param internal_load_balancing_mode: Specifies which endpoints to serve internally in the
Virtual Network for the App Service Environment. Possible values include: "None", "Web",
"Publishing", "Web,Publishing".
:type internal_load_balancing_mode: str or ~azure.mgmt.web.v2020_06_01.models.LoadBalancingMode
:param multi_size: Front-end VM size, e.g. "Medium", "Large".
:type multi_size: str
:param multi_role_count: Number of front-end instances.
:type multi_role_count: int
:param worker_pools: Description of worker pools with worker size IDs, VM sizes, and number of
workers in each pool.
:type worker_pools: list[~azure.mgmt.web.v2020_06_01.models.WorkerPool]
:param ipssl_address_count: Number of IP SSL addresses reserved for the App Service
Environment.
:type ipssl_address_count: int
:ivar database_edition: Edition of the metadata database for the App Service Environment, e.g.
"Standard".
:vartype database_edition: str
:ivar database_service_objective: Service objective of the metadata database for the App
Service Environment, e.g. "S0".
:vartype database_service_objective: str
:ivar upgrade_domains: Number of upgrade domains of the App Service Environment.
:vartype upgrade_domains: int
:ivar subscription_id: Subscription of the App Service Environment.
:vartype subscription_id: str
:param dns_suffix: DNS suffix of the App Service Environment.
:type dns_suffix: str
:ivar last_action: Last deployment action on the App Service Environment.
:vartype last_action: str
:ivar last_action_result: Result of the last deployment action on the App Service Environment.
:vartype last_action_result: str
:ivar allowed_multi_sizes: List of comma separated strings describing which VM sizes are
allowed for front-ends.
:vartype allowed_multi_sizes: str
:ivar allowed_worker_sizes: List of comma separated strings describing which VM sizes are
allowed for workers.
:vartype allowed_worker_sizes: str
:ivar maximum_number_of_machines: Maximum number of VMs in the App Service Environment.
:vartype maximum_number_of_machines: int
:ivar vip_mappings: Description of IP SSL mapping for the App Service Environment.
:vartype vip_mappings: list[~azure.mgmt.web.v2020_06_01.models.VirtualIPMapping]
:ivar environment_capacities: Current total, used, and available worker capacities.
:vartype environment_capacities: list[~azure.mgmt.web.v2020_06_01.models.StampCapacity]
:param network_access_control_list: Access control list for controlling traffic to the App
Service Environment.
:type network_access_control_list:
list[~azure.mgmt.web.v2020_06_01.models.NetworkAccessControlEntry]
:ivar environment_is_healthy: True/false indicating whether the App Service Environment is
healthy.
:vartype environment_is_healthy: bool
:ivar environment_status: Detailed message about with results of the last check of the App
Service Environment.
:vartype environment_status: str
:ivar resource_group: Resource group of the App Service Environment.
:vartype resource_group: str
:param front_end_scale_factor: Scale factor for front-ends.
:type front_end_scale_factor: int
:ivar default_front_end_scale_factor: Default Scale Factor for FrontEnds.
:vartype default_front_end_scale_factor: int
:param api_management_account_id: API Management Account associated with the App Service
Environment.
:type api_management_account_id: str
:param suspended: :code:`<code>true</code>` if the App Service Environment is suspended;
otherwise, :code:`<code>false</code>`. The environment can be suspended, e.g. when the
management endpoint is no longer available
(most likely because NSG blocked the incoming traffic).
:type suspended: bool
:param dynamic_cache_enabled: True/false indicating whether the App Service Environment is
suspended. The environment can be suspended e.g. when the management endpoint is no longer
available
(most likely because NSG blocked the incoming traffic).
:type dynamic_cache_enabled: bool
:param cluster_settings: Custom settings for changing the behavior of the App Service
Environment.
:type cluster_settings: list[~azure.mgmt.web.v2020_06_01.models.NameValuePair]
:param user_whitelisted_ip_ranges: User added ip ranges to whitelist on ASE db.
:type user_whitelisted_ip_ranges: list[str]
:param has_linux_workers: Flag that displays whether an ASE has linux workers or not.
:type has_linux_workers: bool
:param ssl_cert_key_vault_id: Key Vault ID for ILB App Service Environment default SSL
certificate.
:type ssl_cert_key_vault_id: str
:param ssl_cert_key_vault_secret_name: Key Vault Secret Name for ILB App Service Environment
default SSL certificate.
:type ssl_cert_key_vault_secret_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'database_edition': {'readonly': True},
'database_service_objective': {'readonly': True},
'upgrade_domains': {'readonly': True},
'subscription_id': {'readonly': True},
'last_action': {'readonly': True},
'last_action_result': {'readonly': True},
'allowed_multi_sizes': {'readonly': True},
'allowed_worker_sizes': {'readonly': True},
'maximum_number_of_machines': {'readonly': True},
'vip_mappings': {'readonly': True},
'environment_capacities': {'readonly': True},
'environment_is_healthy': {'readonly': True},
'environment_status': {'readonly': True},
'resource_group': {'readonly': True},
'default_front_end_scale_factor': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'location_properties_location': {'key': 'properties.location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vnet_resource_group_name': {'key': 'properties.vnetResourceGroupName', 'type': 'str'},
'vnet_subnet_name': {'key': 'properties.vnetSubnetName', 'type': 'str'},
'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'VirtualNetworkProfile'},
'internal_load_balancing_mode': {'key': 'properties.internalLoadBalancingMode', 'type': 'str'},
'multi_size': {'key': 'properties.multiSize', 'type': 'str'},
'multi_role_count': {'key': 'properties.multiRoleCount', 'type': 'int'},
'worker_pools': {'key': 'properties.workerPools', 'type': '[WorkerPool]'},
'ipssl_address_count': {'key': 'properties.ipsslAddressCount', 'type': 'int'},
'database_edition': {'key': 'properties.databaseEdition', 'type': 'str'},
'database_service_objective': {'key': 'properties.databaseServiceObjective', 'type': 'str'},
'upgrade_domains': {'key': 'properties.upgradeDomains', 'type': 'int'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'dns_suffix': {'key': 'properties.dnsSuffix', 'type': 'str'},
'last_action': {'key': 'properties.lastAction', 'type': 'str'},
'last_action_result': {'key': 'properties.lastActionResult', 'type': 'str'},
'allowed_multi_sizes': {'key': 'properties.allowedMultiSizes', 'type': 'str'},
'allowed_worker_sizes': {'key': 'properties.allowedWorkerSizes', 'type': 'str'},
'maximum_number_of_machines': {'key': 'properties.maximumNumberOfMachines', 'type': 'int'},
'vip_mappings': {'key': 'properties.vipMappings', 'type': '[VirtualIPMapping]'},
'environment_capacities': {'key': 'properties.environmentCapacities', 'type': '[StampCapacity]'},
'network_access_control_list': {'key': 'properties.networkAccessControlList', 'type': '[NetworkAccessControlEntry]'},
'environment_is_healthy': {'key': 'properties.environmentIsHealthy', 'type': 'bool'},
'environment_status': {'key': 'properties.environmentStatus', 'type': 'str'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'front_end_scale_factor': {'key': 'properties.frontEndScaleFactor', 'type': 'int'},
'default_front_end_scale_factor': {'key': 'properties.defaultFrontEndScaleFactor', 'type': 'int'},
'api_management_account_id': {'key': 'properties.apiManagementAccountId', 'type': 'str'},
'suspended': {'key': 'properties.suspended', 'type': 'bool'},
'dynamic_cache_enabled': {'key': 'properties.dynamicCacheEnabled', 'type': 'bool'},
'cluster_settings': {'key': 'properties.clusterSettings', 'type': '[NameValuePair]'},
'user_whitelisted_ip_ranges': {'key': 'properties.userWhitelistedIpRanges', 'type': '[str]'},
'has_linux_workers': {'key': 'properties.hasLinuxWorkers', 'type': 'bool'},
'ssl_cert_key_vault_id': {'key': 'properties.sslCertKeyVaultId', 'type': 'str'},
'ssl_cert_key_vault_secret_name': {'key': 'properties.sslCertKeyVaultSecretName', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
name_properties_name: Optional[str] = None,
location_properties_location: Optional[str] = None,
vnet_name: Optional[str] = None,
vnet_resource_group_name: Optional[str] = None,
vnet_subnet_name: Optional[str] = None,
virtual_network: Optional["VirtualNetworkProfile"] = None,
internal_load_balancing_mode: Optional[Union[str, "LoadBalancingMode"]] = None,
multi_size: Optional[str] = None,
multi_role_count: Optional[int] = None,
worker_pools: Optional[List["WorkerPool"]] = None,
ipssl_address_count: Optional[int] = None,
dns_suffix: Optional[str] = None,
network_access_control_list: Optional[List["NetworkAccessControlEntry"]] = None,
front_end_scale_factor: Optional[int] = None,
api_management_account_id: Optional[str] = None,
suspended: Optional[bool] = None,
dynamic_cache_enabled: Optional[bool] = None,
cluster_settings: Optional[List["NameValuePair"]] = None,
user_whitelisted_ip_ranges: Optional[List[str]] = None,
has_linux_workers: Optional[bool] = None,
ssl_cert_key_vault_id: Optional[str] = None,
ssl_cert_key_vault_secret_name: Optional[str] = None,
**kwargs
):
super(AppServiceEnvironmentResource, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.name_properties_name = name_properties_name
self.location_properties_location = location_properties_location
self.provisioning_state = None
self.status = None
self.vnet_name = vnet_name
self.vnet_resource_group_name = vnet_resource_group_name
self.vnet_subnet_name = vnet_subnet_name
self.virtual_network = virtual_network
self.internal_load_balancing_mode = internal_load_balancing_mode
self.multi_size = multi_size
self.multi_role_count = multi_role_count
self.worker_pools = worker_pools
self.ipssl_address_count = ipssl_address_count
self.database_edition = None
self.database_service_objective = None
self.upgrade_domains = None
self.subscription_id = None
self.dns_suffix = dns_suffix
self.last_action = None
self.last_action_result = None
self.allowed_multi_sizes = None
self.allowed_worker_sizes = None
self.maximum_number_of_machines = None
self.vip_mappings = None
self.environment_capacities = None
self.network_access_control_list = network_access_control_list
self.environment_is_healthy = None
self.environment_status = None
self.resource_group = None
self.front_end_scale_factor = front_end_scale_factor
self.default_front_end_scale_factor = None
self.api_management_account_id = api_management_account_id
self.suspended = suspended
self.dynamic_cache_enabled = dynamic_cache_enabled
self.cluster_settings = cluster_settings
self.user_whitelisted_ip_ranges = user_whitelisted_ip_ranges
self.has_linux_workers = has_linux_workers
self.ssl_cert_key_vault_id = ssl_cert_key_vault_id
self.ssl_cert_key_vault_secret_name = ssl_cert_key_vault_secret_name
class AppServicePlan(Resource):
"""App Service plan.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: Description of a SKU for a scalable resource.
:type sku: ~azure.mgmt.web.v2020_06_01.models.SkuDescription
:param worker_tier_name: Target worker tier assigned to the App Service plan.
:type worker_tier_name: str
:ivar status: App Service plan status. Possible values include: "Ready", "Pending", "Creating".
:vartype status: str or ~azure.mgmt.web.v2020_06_01.models.StatusOptions
:ivar subscription: App Service plan subscription.
:vartype subscription: str
:param hosting_environment_profile: Specification for the App Service Environment to use for
the App Service plan.
:type hosting_environment_profile: ~azure.mgmt.web.v2020_06_01.models.HostingEnvironmentProfile
:ivar maximum_number_of_workers: Maximum number of instances that can be assigned to this App
Service plan.
:vartype maximum_number_of_workers: int
:ivar geo_region: Geographical location for the App Service plan.
:vartype geo_region: str
:param per_site_scaling: If :code:`<code>true</code>`, apps assigned to this App Service plan
can be scaled independently.
If :code:`<code>false</code>`, apps assigned to this App Service plan will scale to all
instances of the plan.
:type per_site_scaling: bool
:param maximum_elastic_worker_count: Maximum number of total workers allowed for this
ElasticScaleEnabled App Service Plan.
:type maximum_elastic_worker_count: int
:ivar number_of_sites: Number of apps assigned to this App Service plan.
:vartype number_of_sites: int
:param is_spot: If :code:`<code>true</code>`, this App Service Plan owns spot instances.
:type is_spot: bool
:param spot_expiration_time: The time when the server farm expires. Valid only if it is a spot
server farm.
:type spot_expiration_time: ~datetime.datetime
:param free_offer_expiration_time: The time when the server farm free offer expires.
:type free_offer_expiration_time: ~datetime.datetime
:ivar resource_group: Resource group of the App Service plan.
:vartype resource_group: str
:param reserved: If Linux app service plan :code:`<code>true</code>`,
:code:`<code>false</code>` otherwise.
:type reserved: bool
:param is_xenon: Obsolete: If Hyper-V container app service plan :code:`<code>true</code>`,
:code:`<code>false</code>` otherwise.
:type is_xenon: bool
:param hyper_v: If Hyper-V container app service plan :code:`<code>true</code>`,
:code:`<code>false</code>` otherwise.
:type hyper_v: bool
:param target_worker_count: Scaling worker count.
:type target_worker_count: int
:param target_worker_size_id: Scaling worker size ID.
:type target_worker_size_id: int
:ivar provisioning_state: Provisioning state of the App Service Environment. Possible values
include: "Succeeded", "Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2020_06_01.models.ProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'status': {'readonly': True},
'subscription': {'readonly': True},
'maximum_number_of_workers': {'readonly': True},
'geo_region': {'readonly': True},
'number_of_sites': {'readonly': True},
'resource_group': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'SkuDescription'},
'worker_tier_name': {'key': 'properties.workerTierName', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'subscription': {'key': 'properties.subscription', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'maximum_number_of_workers': {'key': 'properties.maximumNumberOfWorkers', 'type': 'int'},
'geo_region': {'key': 'properties.geoRegion', 'type': 'str'},
'per_site_scaling': {'key': 'properties.perSiteScaling', 'type': 'bool'},
'maximum_elastic_worker_count': {'key': 'properties.maximumElasticWorkerCount', 'type': 'int'},
'number_of_sites': {'key': 'properties.numberOfSites', 'type': 'int'},
'is_spot': {'key': 'properties.isSpot', 'type': 'bool'},
'spot_expiration_time': {'key': 'properties.spotExpirationTime', 'type': 'iso-8601'},
'free_offer_expiration_time': {'key': 'properties.freeOfferExpirationTime', 'type': 'iso-8601'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'reserved': {'key': 'properties.reserved', 'type': 'bool'},
'is_xenon': {'key': 'properties.isXenon', 'type': 'bool'},
'hyper_v': {'key': 'properties.hyperV', 'type': 'bool'},
'target_worker_count': {'key': 'properties.targetWorkerCount', 'type': 'int'},
'target_worker_size_id': {'key': 'properties.targetWorkerSizeId', 'type': 'int'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["SkuDescription"] = None,
worker_tier_name: Optional[str] = None,
hosting_environment_profile: Optional["HostingEnvironmentProfile"] = None,
per_site_scaling: Optional[bool] = False,
maximum_elastic_worker_count: Optional[int] = None,
is_spot: Optional[bool] = None,
spot_expiration_time: Optional[datetime.datetime] = None,
free_offer_expiration_time: Optional[datetime.datetime] = None,
reserved: Optional[bool] = False,
is_xenon: Optional[bool] = False,
hyper_v: Optional[bool] = False,
target_worker_count: Optional[int] = None,
target_worker_size_id: Optional[int] = None,
**kwargs
):
super(AppServicePlan, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.sku = sku
self.worker_tier_name = worker_tier_name
self.status = None
self.subscription = None
self.hosting_environment_profile = hosting_environment_profile
self.maximum_number_of_workers = None
self.geo_region = None
self.per_site_scaling = per_site_scaling
self.maximum_elastic_worker_count = maximum_elastic_worker_count
self.number_of_sites = None
self.is_spot = is_spot
self.spot_expiration_time = spot_expiration_time
self.free_offer_expiration_time = free_offer_expiration_time
self.resource_group = None
self.reserved = reserved
self.is_xenon = is_xenon
self.hyper_v = hyper_v
self.target_worker_count = target_worker_count
self.target_worker_size_id = target_worker_size_id
self.provisioning_state = None
class AppServicePlanCollection(msrest.serialization.Model):
"""Collection of App Service plans.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.AppServicePlan]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AppServicePlan]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["AppServicePlan"],
**kwargs
):
super(AppServicePlanCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class AppServicePlanPatchResource(ProxyOnlyResource):
"""ARM resource for a app service plan.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param worker_tier_name: Target worker tier assigned to the App Service plan.
:type worker_tier_name: str
:ivar status: App Service plan status. Possible values include: "Ready", "Pending", "Creating".
:vartype status: str or ~azure.mgmt.web.v2020_06_01.models.StatusOptions
:ivar subscription: App Service plan subscription.
:vartype subscription: str
:param hosting_environment_profile: Specification for the App Service Environment to use for
the App Service plan.
:type hosting_environment_profile: ~azure.mgmt.web.v2020_06_01.models.HostingEnvironmentProfile
:ivar maximum_number_of_workers: Maximum number of instances that can be assigned to this App
Service plan.
:vartype maximum_number_of_workers: int
:ivar geo_region: Geographical location for the App Service plan.
:vartype geo_region: str
:param per_site_scaling: If :code:`<code>true</code>`, apps assigned to this App Service plan
can be scaled independently.
If :code:`<code>false</code>`, apps assigned to this App Service plan will scale to all
instances of the plan.
:type per_site_scaling: bool
:param maximum_elastic_worker_count: Maximum number of total workers allowed for this
ElasticScaleEnabled App Service Plan.
:type maximum_elastic_worker_count: int
:ivar number_of_sites: Number of apps assigned to this App Service plan.
:vartype number_of_sites: int
:param is_spot: If :code:`<code>true</code>`, this App Service Plan owns spot instances.
:type is_spot: bool
:param spot_expiration_time: The time when the server farm expires. Valid only if it is a spot
server farm.
:type spot_expiration_time: ~datetime.datetime
:param free_offer_expiration_time: The time when the server farm free offer expires.
:type free_offer_expiration_time: ~datetime.datetime
:ivar resource_group: Resource group of the App Service plan.
:vartype resource_group: str
:param reserved: This needs to set to :code:`<code>true</code>` when creating a Linux App
Service Plan, along with :code:`<code>kind</code>` set to :code:`<code>Linux</code>`. It should
be :code:`<code>false</code>` otherwise.
:type reserved: bool
:param is_xenon: Obsolete: If Hyper-V container app service plan :code:`<code>true</code>`,
:code:`<code>false</code>` otherwise.
:type is_xenon: bool
:param hyper_v: If Hyper-V container app service plan :code:`<code>true</code>`,
:code:`<code>false</code>` otherwise.
:type hyper_v: bool
:param target_worker_count: Scaling worker count.
:type target_worker_count: int
:param target_worker_size_id: Scaling worker size ID.
:type target_worker_size_id: int
:ivar provisioning_state: Provisioning state of the App Service Environment. Possible values
include: "Succeeded", "Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2020_06_01.models.ProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'status': {'readonly': True},
'subscription': {'readonly': True},
'maximum_number_of_workers': {'readonly': True},
'geo_region': {'readonly': True},
'number_of_sites': {'readonly': True},
'resource_group': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'worker_tier_name': {'key': 'properties.workerTierName', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'subscription': {'key': 'properties.subscription', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'maximum_number_of_workers': {'key': 'properties.maximumNumberOfWorkers', 'type': 'int'},
'geo_region': {'key': 'properties.geoRegion', 'type': 'str'},
'per_site_scaling': {'key': 'properties.perSiteScaling', 'type': 'bool'},
'maximum_elastic_worker_count': {'key': 'properties.maximumElasticWorkerCount', 'type': 'int'},
'number_of_sites': {'key': 'properties.numberOfSites', 'type': 'int'},
'is_spot': {'key': 'properties.isSpot', 'type': 'bool'},
'spot_expiration_time': {'key': 'properties.spotExpirationTime', 'type': 'iso-8601'},
'free_offer_expiration_time': {'key': 'properties.freeOfferExpirationTime', 'type': 'iso-8601'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'reserved': {'key': 'properties.reserved', 'type': 'bool'},
'is_xenon': {'key': 'properties.isXenon', 'type': 'bool'},
'hyper_v': {'key': 'properties.hyperV', 'type': 'bool'},
'target_worker_count': {'key': 'properties.targetWorkerCount', 'type': 'int'},
'target_worker_size_id': {'key': 'properties.targetWorkerSizeId', 'type': 'int'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
worker_tier_name: Optional[str] = None,
hosting_environment_profile: Optional["HostingEnvironmentProfile"] = None,
per_site_scaling: Optional[bool] = False,
maximum_elastic_worker_count: Optional[int] = None,
is_spot: Optional[bool] = None,
spot_expiration_time: Optional[datetime.datetime] = None,
free_offer_expiration_time: Optional[datetime.datetime] = None,
reserved: Optional[bool] = False,
is_xenon: Optional[bool] = False,
hyper_v: Optional[bool] = False,
target_worker_count: Optional[int] = None,
target_worker_size_id: Optional[int] = None,
**kwargs
):
super(AppServicePlanPatchResource, self).__init__(kind=kind, **kwargs)
self.worker_tier_name = worker_tier_name
self.status = None
self.subscription = None
self.hosting_environment_profile = hosting_environment_profile
self.maximum_number_of_workers = None
self.geo_region = None
self.per_site_scaling = per_site_scaling
self.maximum_elastic_worker_count = maximum_elastic_worker_count
self.number_of_sites = None
self.is_spot = is_spot
self.spot_expiration_time = spot_expiration_time
self.free_offer_expiration_time = free_offer_expiration_time
self.resource_group = None
self.reserved = reserved
self.is_xenon = is_xenon
self.hyper_v = hyper_v
self.target_worker_count = target_worker_count
self.target_worker_size_id = target_worker_size_id
self.provisioning_state = None
class ArmIdWrapper(msrest.serialization.Model):
"""A wrapper for an ARM resource id.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id:
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ArmIdWrapper, self).__init__(**kwargs)
self.id = None
class AuthPlatform(ProxyOnlyResource):
"""AuthPlatform.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param enabled:
:type enabled: bool
:param runtime_version:
:type runtime_version: str
:param config_file_path:
:type config_file_path: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'runtime_version': {'key': 'properties.runtimeVersion', 'type': 'str'},
'config_file_path': {'key': 'properties.configFilePath', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
runtime_version: Optional[str] = None,
config_file_path: Optional[str] = None,
**kwargs
):
super(AuthPlatform, self).__init__(kind=kind, **kwargs)
self.enabled = enabled
self.runtime_version = runtime_version
self.config_file_path = config_file_path
class AutoHealActions(msrest.serialization.Model):
"""Actions which to take by the auto-heal module when a rule is triggered.
:param action_type: Predefined action to be taken. Possible values include: "Recycle",
"LogEvent", "CustomAction".
:type action_type: str or ~azure.mgmt.web.v2020_06_01.models.AutoHealActionType
:param custom_action: Custom action to be taken.
:type custom_action: ~azure.mgmt.web.v2020_06_01.models.AutoHealCustomAction
:param min_process_execution_time: Minimum time the process must execute
before taking the action.
:type min_process_execution_time: str
"""
_attribute_map = {
'action_type': {'key': 'actionType', 'type': 'str'},
'custom_action': {'key': 'customAction', 'type': 'AutoHealCustomAction'},
'min_process_execution_time': {'key': 'minProcessExecutionTime', 'type': 'str'},
}
def __init__(
self,
*,
action_type: Optional[Union[str, "AutoHealActionType"]] = None,
custom_action: Optional["AutoHealCustomAction"] = None,
min_process_execution_time: Optional[str] = None,
**kwargs
):
super(AutoHealActions, self).__init__(**kwargs)
self.action_type = action_type
self.custom_action = custom_action
self.min_process_execution_time = min_process_execution_time
class AutoHealCustomAction(msrest.serialization.Model):
"""Custom action to be executed
when an auto heal rule is triggered.
:param exe: Executable to be run.
:type exe: str
:param parameters: Parameters for the executable.
:type parameters: str
"""
_attribute_map = {
'exe': {'key': 'exe', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'str'},
}
def __init__(
self,
*,
exe: Optional[str] = None,
parameters: Optional[str] = None,
**kwargs
):
super(AutoHealCustomAction, self).__init__(**kwargs)
self.exe = exe
self.parameters = parameters
class AutoHealRules(msrest.serialization.Model):
"""Rules that can be defined for auto-heal.
:param triggers: Conditions that describe when to execute the auto-heal actions.
:type triggers: ~azure.mgmt.web.v2020_06_01.models.AutoHealTriggers
:param actions: Actions to be executed when a rule is triggered.
:type actions: ~azure.mgmt.web.v2020_06_01.models.AutoHealActions
"""
_attribute_map = {
'triggers': {'key': 'triggers', 'type': 'AutoHealTriggers'},
'actions': {'key': 'actions', 'type': 'AutoHealActions'},
}
def __init__(
self,
*,
triggers: Optional["AutoHealTriggers"] = None,
actions: Optional["AutoHealActions"] = None,
**kwargs
):
super(AutoHealRules, self).__init__(**kwargs)
self.triggers = triggers
self.actions = actions
class AutoHealTriggers(msrest.serialization.Model):
"""Triggers for auto-heal.
:param requests: A rule based on total requests.
:type requests: ~azure.mgmt.web.v2020_06_01.models.RequestsBasedTrigger
:param private_bytes_in_kb: A rule based on private bytes.
:type private_bytes_in_kb: int
:param status_codes: A rule based on status codes.
:type status_codes: list[~azure.mgmt.web.v2020_06_01.models.StatusCodesBasedTrigger]
:param slow_requests: A rule based on request execution time.
:type slow_requests: ~azure.mgmt.web.v2020_06_01.models.SlowRequestsBasedTrigger
"""
_attribute_map = {
'requests': {'key': 'requests', 'type': 'RequestsBasedTrigger'},
'private_bytes_in_kb': {'key': 'privateBytesInKB', 'type': 'int'},
'status_codes': {'key': 'statusCodes', 'type': '[StatusCodesBasedTrigger]'},
'slow_requests': {'key': 'slowRequests', 'type': 'SlowRequestsBasedTrigger'},
}
def __init__(
self,
*,
requests: Optional["RequestsBasedTrigger"] = None,
private_bytes_in_kb: Optional[int] = None,
status_codes: Optional[List["StatusCodesBasedTrigger"]] = None,
slow_requests: Optional["SlowRequestsBasedTrigger"] = None,
**kwargs
):
super(AutoHealTriggers, self).__init__(**kwargs)
self.requests = requests
self.private_bytes_in_kb = private_bytes_in_kb
self.status_codes = status_codes
self.slow_requests = slow_requests
class AzureActiveDirectory(ProxyOnlyResource):
"""AzureActiveDirectory.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param enabled:
:type enabled: bool
:param registration:
:type registration: ~azure.mgmt.web.v2020_06_01.models.AzureActiveDirectoryRegistration
:param login:
:type login: ~azure.mgmt.web.v2020_06_01.models.AzureActiveDirectoryLogin
:param validation:
:type validation: ~azure.mgmt.web.v2020_06_01.models.AzureActiveDirectoryValidation
:param is_auto_provisioned:
:type is_auto_provisioned: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'registration': {'key': 'properties.registration', 'type': 'AzureActiveDirectoryRegistration'},
'login': {'key': 'properties.login', 'type': 'AzureActiveDirectoryLogin'},
'validation': {'key': 'properties.validation', 'type': 'AzureActiveDirectoryValidation'},
'is_auto_provisioned': {'key': 'properties.isAutoProvisioned', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
registration: Optional["AzureActiveDirectoryRegistration"] = None,
login: Optional["AzureActiveDirectoryLogin"] = None,
validation: Optional["AzureActiveDirectoryValidation"] = None,
is_auto_provisioned: Optional[bool] = None,
**kwargs
):
super(AzureActiveDirectory, self).__init__(kind=kind, **kwargs)
self.enabled = enabled
self.registration = registration
self.login = login
self.validation = validation
self.is_auto_provisioned = is_auto_provisioned
class AzureActiveDirectoryLogin(ProxyOnlyResource):
"""AzureActiveDirectoryLogin.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param disable_www_authenticate:
:type disable_www_authenticate: bool
:param login_parameters:
:type login_parameters: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'disable_www_authenticate': {'key': 'properties.disableWWWAuthenticate', 'type': 'bool'},
'login_parameters': {'key': 'properties.loginParameters', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
disable_www_authenticate: Optional[bool] = None,
login_parameters: Optional[List[str]] = None,
**kwargs
):
super(AzureActiveDirectoryLogin, self).__init__(kind=kind, **kwargs)
self.disable_www_authenticate = disable_www_authenticate
self.login_parameters = login_parameters
class AzureActiveDirectoryRegistration(ProxyOnlyResource):
"""AzureActiveDirectoryRegistration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param open_id_issuer:
:type open_id_issuer: str
:param client_id:
:type client_id: str
:param client_secret_setting_name:
:type client_secret_setting_name: str
:param client_secret_certificate_thumbprint:
:type client_secret_certificate_thumbprint: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'open_id_issuer': {'key': 'properties.openIdIssuer', 'type': 'str'},
'client_id': {'key': 'properties.clientId', 'type': 'str'},
'client_secret_setting_name': {'key': 'properties.clientSecretSettingName', 'type': 'str'},
'client_secret_certificate_thumbprint': {'key': 'properties.clientSecretCertificateThumbprint', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
open_id_issuer: Optional[str] = None,
client_id: Optional[str] = None,
client_secret_setting_name: Optional[str] = None,
client_secret_certificate_thumbprint: Optional[str] = None,
**kwargs
):
super(AzureActiveDirectoryRegistration, self).__init__(kind=kind, **kwargs)
self.open_id_issuer = open_id_issuer
self.client_id = client_id
self.client_secret_setting_name = client_secret_setting_name
self.client_secret_certificate_thumbprint = client_secret_certificate_thumbprint
class AzureActiveDirectoryValidation(ProxyOnlyResource):
"""AzureActiveDirectoryValidation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param jwt_claim_checks:
:type jwt_claim_checks: ~azure.mgmt.web.v2020_06_01.models.JwtClaimChecks
:param allowed_audiences:
:type allowed_audiences: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'jwt_claim_checks': {'key': 'properties.jwtClaimChecks', 'type': 'JwtClaimChecks'},
'allowed_audiences': {'key': 'properties.allowedAudiences', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
jwt_claim_checks: Optional["JwtClaimChecks"] = None,
allowed_audiences: Optional[List[str]] = None,
**kwargs
):
super(AzureActiveDirectoryValidation, self).__init__(kind=kind, **kwargs)
self.jwt_claim_checks = jwt_claim_checks
self.allowed_audiences = allowed_audiences
class AzureBlobStorageApplicationLogsConfig(msrest.serialization.Model):
"""Application logs azure blob storage configuration.
:param level: Log level. Possible values include: "Off", "Verbose", "Information", "Warning",
"Error".
:type level: str or ~azure.mgmt.web.v2020_06_01.models.LogLevel
:param sas_url: SAS url to a azure blob container with read/write/list/delete permissions.
:type sas_url: str
:param retention_in_days: Retention in days.
Remove blobs older than X days.
0 or lower means no retention.
:type retention_in_days: int
"""
_attribute_map = {
'level': {'key': 'level', 'type': 'str'},
'sas_url': {'key': 'sasUrl', 'type': 'str'},
'retention_in_days': {'key': 'retentionInDays', 'type': 'int'},
}
def __init__(
self,
*,
level: Optional[Union[str, "LogLevel"]] = None,
sas_url: Optional[str] = None,
retention_in_days: Optional[int] = None,
**kwargs
):
super(AzureBlobStorageApplicationLogsConfig, self).__init__(**kwargs)
self.level = level
self.sas_url = sas_url
self.retention_in_days = retention_in_days
class AzureBlobStorageHttpLogsConfig(msrest.serialization.Model):
"""Http logs to azure blob storage configuration.
:param sas_url: SAS url to a azure blob container with read/write/list/delete permissions.
:type sas_url: str
:param retention_in_days: Retention in days.
Remove blobs older than X days.
0 or lower means no retention.
:type retention_in_days: int
:param enabled: True if configuration is enabled, false if it is disabled and null if
configuration is not set.
:type enabled: bool
"""
_attribute_map = {
'sas_url': {'key': 'sasUrl', 'type': 'str'},
'retention_in_days': {'key': 'retentionInDays', 'type': 'int'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
sas_url: Optional[str] = None,
retention_in_days: Optional[int] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(AzureBlobStorageHttpLogsConfig, self).__init__(**kwargs)
self.sas_url = sas_url
self.retention_in_days = retention_in_days
self.enabled = enabled
class AzureStorageInfoValue(msrest.serialization.Model):
"""Azure Files or Blob Storage access information value for dictionary storage.
Variables are only populated by the server, and will be ignored when sending a request.
:param type: Type of storage. Possible values include: "AzureFiles", "AzureBlob".
:type type: str or ~azure.mgmt.web.v2020_06_01.models.AzureStorageType
:param account_name: Name of the storage account.
:type account_name: str
:param share_name: Name of the file share (container name, for Blob storage).
:type share_name: str
:param access_key: Access key for the storage account.
:type access_key: str
:param mount_path: Path to mount the storage within the site's runtime environment.
:type mount_path: str
:ivar state: State of the storage account. Possible values include: "Ok", "InvalidCredentials",
"InvalidShare".
:vartype state: str or ~azure.mgmt.web.v2020_06_01.models.AzureStorageState
"""
_validation = {
'state': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'share_name': {'key': 'shareName', 'type': 'str'},
'access_key': {'key': 'accessKey', 'type': 'str'},
'mount_path': {'key': 'mountPath', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "AzureStorageType"]] = None,
account_name: Optional[str] = None,
share_name: Optional[str] = None,
access_key: Optional[str] = None,
mount_path: Optional[str] = None,
**kwargs
):
super(AzureStorageInfoValue, self).__init__(**kwargs)
self.type = type
self.account_name = account_name
self.share_name = share_name
self.access_key = access_key
self.mount_path = mount_path
self.state = None
class AzureStoragePropertyDictionaryResource(ProxyOnlyResource):
"""AzureStorageInfo dictionary resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param properties: Azure storage accounts.
:type properties: dict[str, ~azure.mgmt.web.v2020_06_01.models.AzureStorageInfoValue]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{AzureStorageInfoValue}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
properties: Optional[Dict[str, "AzureStorageInfoValue"]] = None,
**kwargs
):
super(AzureStoragePropertyDictionaryResource, self).__init__(kind=kind, **kwargs)
self.properties = properties
class AzureTableStorageApplicationLogsConfig(msrest.serialization.Model):
"""Application logs to Azure table storage configuration.
All required parameters must be populated in order to send to Azure.
:param level: Log level. Possible values include: "Off", "Verbose", "Information", "Warning",
"Error".
:type level: str or ~azure.mgmt.web.v2020_06_01.models.LogLevel
:param sas_url: Required. SAS URL to an Azure table with add/query/delete permissions.
:type sas_url: str
"""
_validation = {
'sas_url': {'required': True},
}
_attribute_map = {
'level': {'key': 'level', 'type': 'str'},
'sas_url': {'key': 'sasUrl', 'type': 'str'},
}
def __init__(
self,
*,
sas_url: str,
level: Optional[Union[str, "LogLevel"]] = None,
**kwargs
):
super(AzureTableStorageApplicationLogsConfig, self).__init__(**kwargs)
self.level = level
self.sas_url = sas_url
class BackupItem(ProxyOnlyResource):
"""Backup description.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar backup_id: Id of the backup.
:vartype backup_id: int
:ivar storage_account_url: SAS URL for the storage account container which contains this
backup.
:vartype storage_account_url: str
:ivar blob_name: Name of the blob which contains data for this backup.
:vartype blob_name: str
:ivar name_properties_name: Name of this backup.
:vartype name_properties_name: str
:ivar status: Backup status. Possible values include: "InProgress", "Failed", "Succeeded",
"TimedOut", "Created", "Skipped", "PartiallySucceeded", "DeleteInProgress", "DeleteFailed",
"Deleted".
:vartype status: str or ~azure.mgmt.web.v2020_06_01.models.BackupItemStatus
:ivar size_in_bytes: Size of the backup in bytes.
:vartype size_in_bytes: long
:ivar created: Timestamp of the backup creation.
:vartype created: ~datetime.datetime
:ivar log: Details regarding this backup. Might contain an error message.
:vartype log: str
:ivar databases: List of databases included in the backup.
:vartype databases: list[~azure.mgmt.web.v2020_06_01.models.DatabaseBackupSetting]
:ivar scheduled: True if this backup has been created due to a schedule being triggered.
:vartype scheduled: bool
:ivar last_restore_time_stamp: Timestamp of a last restore operation which used this backup.
:vartype last_restore_time_stamp: ~datetime.datetime
:ivar finished_time_stamp: Timestamp when this backup finished.
:vartype finished_time_stamp: ~datetime.datetime
:ivar correlation_id: Unique correlation identifier. Please use this along with the timestamp
while communicating with Azure support.
:vartype correlation_id: str
:ivar website_size_in_bytes: Size of the original web app which has been backed up.
:vartype website_size_in_bytes: long
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'backup_id': {'readonly': True},
'storage_account_url': {'readonly': True},
'blob_name': {'readonly': True},
'name_properties_name': {'readonly': True},
'status': {'readonly': True},
'size_in_bytes': {'readonly': True},
'created': {'readonly': True},
'log': {'readonly': True},
'databases': {'readonly': True},
'scheduled': {'readonly': True},
'last_restore_time_stamp': {'readonly': True},
'finished_time_stamp': {'readonly': True},
'correlation_id': {'readonly': True},
'website_size_in_bytes': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'backup_id': {'key': 'properties.id', 'type': 'int'},
'storage_account_url': {'key': 'properties.storageAccountUrl', 'type': 'str'},
'blob_name': {'key': 'properties.blobName', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'size_in_bytes': {'key': 'properties.sizeInBytes', 'type': 'long'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'log': {'key': 'properties.log', 'type': 'str'},
'databases': {'key': 'properties.databases', 'type': '[DatabaseBackupSetting]'},
'scheduled': {'key': 'properties.scheduled', 'type': 'bool'},
'last_restore_time_stamp': {'key': 'properties.lastRestoreTimeStamp', 'type': 'iso-8601'},
'finished_time_stamp': {'key': 'properties.finishedTimeStamp', 'type': 'iso-8601'},
'correlation_id': {'key': 'properties.correlationId', 'type': 'str'},
'website_size_in_bytes': {'key': 'properties.websiteSizeInBytes', 'type': 'long'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(BackupItem, self).__init__(kind=kind, **kwargs)
self.backup_id = None
self.storage_account_url = None
self.blob_name = None
self.name_properties_name = None
self.status = None
self.size_in_bytes = None
self.created = None
self.log = None
self.databases = None
self.scheduled = None
self.last_restore_time_stamp = None
self.finished_time_stamp = None
self.correlation_id = None
self.website_size_in_bytes = None
class BackupItemCollection(msrest.serialization.Model):
"""Collection of backup items.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.BackupItem]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[BackupItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["BackupItem"],
**kwargs
):
super(BackupItemCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class BackupRequest(ProxyOnlyResource):
"""Description of a backup which will be performed.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param backup_name: Name of the backup.
:type backup_name: str
:param enabled: True if the backup schedule is enabled (must be included in that case), false
if the backup schedule should be disabled.
:type enabled: bool
:param storage_account_url: SAS URL to the container.
:type storage_account_url: str
:param backup_schedule: Schedule for the backup if it is executed periodically.
:type backup_schedule: ~azure.mgmt.web.v2020_06_01.models.BackupSchedule
:param databases: Databases included in the backup.
:type databases: list[~azure.mgmt.web.v2020_06_01.models.DatabaseBackupSetting]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'backup_name': {'key': 'properties.backupName', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'storage_account_url': {'key': 'properties.storageAccountUrl', 'type': 'str'},
'backup_schedule': {'key': 'properties.backupSchedule', 'type': 'BackupSchedule'},
'databases': {'key': 'properties.databases', 'type': '[DatabaseBackupSetting]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
backup_name: Optional[str] = None,
enabled: Optional[bool] = None,
storage_account_url: Optional[str] = None,
backup_schedule: Optional["BackupSchedule"] = None,
databases: Optional[List["DatabaseBackupSetting"]] = None,
**kwargs
):
super(BackupRequest, self).__init__(kind=kind, **kwargs)
self.backup_name = backup_name
self.enabled = enabled
self.storage_account_url = storage_account_url
self.backup_schedule = backup_schedule
self.databases = databases
class BackupSchedule(msrest.serialization.Model):
"""Description of a backup schedule. Describes how often should be the backup performed and what should be the retention policy.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param frequency_interval: Required. How often the backup should be executed (e.g. for weekly
backup, this should be set to 7 and FrequencyUnit should be set to Day).
:type frequency_interval: int
:param frequency_unit: Required. The unit of time for how often the backup should be executed
(e.g. for weekly backup, this should be set to Day and FrequencyInterval should be set to 7).
Possible values include: "Day", "Hour". Default value: "Day".
:type frequency_unit: str or ~azure.mgmt.web.v2020_06_01.models.FrequencyUnit
:param keep_at_least_one_backup: Required. True if the retention policy should always keep at
least one backup in the storage account, regardless how old it is; false otherwise.
:type keep_at_least_one_backup: bool
:param retention_period_in_days: Required. After how many days backups should be deleted.
:type retention_period_in_days: int
:param start_time: When the schedule should start working.
:type start_time: ~datetime.datetime
:ivar last_execution_time: Last time when this schedule was triggered.
:vartype last_execution_time: ~datetime.datetime
"""
_validation = {
'frequency_interval': {'required': True},
'frequency_unit': {'required': True},
'keep_at_least_one_backup': {'required': True},
'retention_period_in_days': {'required': True},
'last_execution_time': {'readonly': True},
}
_attribute_map = {
'frequency_interval': {'key': 'frequencyInterval', 'type': 'int'},
'frequency_unit': {'key': 'frequencyUnit', 'type': 'str'},
'keep_at_least_one_backup': {'key': 'keepAtLeastOneBackup', 'type': 'bool'},
'retention_period_in_days': {'key': 'retentionPeriodInDays', 'type': 'int'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_execution_time': {'key': 'lastExecutionTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
frequency_interval: int = 7,
frequency_unit: Union[str, "FrequencyUnit"] = "Day",
keep_at_least_one_backup: bool = True,
retention_period_in_days: int = 30,
start_time: Optional[datetime.datetime] = None,
**kwargs
):
super(BackupSchedule, self).__init__(**kwargs)
self.frequency_interval = frequency_interval
self.frequency_unit = frequency_unit
self.keep_at_least_one_backup = keep_at_least_one_backup
self.retention_period_in_days = retention_period_in_days
self.start_time = start_time
self.last_execution_time = None
class BillingMeter(ProxyOnlyResource):
"""App Service billing entity that contains information about meter which the Azure billing system utilizes to charge users for services.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param meter_id: Meter GUID onboarded in Commerce.
:type meter_id: str
:param billing_location: Azure Location of billable resource.
:type billing_location: str
:param short_name: Short Name from App Service Azure pricing Page.
:type short_name: str
:param friendly_name: Friendly name of the meter.
:type friendly_name: str
:param resource_type: App Service ResourceType meter used for.
:type resource_type: str
:param os_type: App Service OS type meter used for.
:type os_type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'meter_id': {'key': 'properties.meterId', 'type': 'str'},
'billing_location': {'key': 'properties.billingLocation', 'type': 'str'},
'short_name': {'key': 'properties.shortName', 'type': 'str'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'resource_type': {'key': 'properties.resourceType', 'type': 'str'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
meter_id: Optional[str] = None,
billing_location: Optional[str] = None,
short_name: Optional[str] = None,
friendly_name: Optional[str] = None,
resource_type: Optional[str] = None,
os_type: Optional[str] = None,
**kwargs
):
super(BillingMeter, self).__init__(kind=kind, **kwargs)
self.meter_id = meter_id
self.billing_location = billing_location
self.short_name = short_name
self.friendly_name = friendly_name
self.resource_type = resource_type
self.os_type = os_type
class BillingMeterCollection(msrest.serialization.Model):
"""Collection of Billing Meters.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.BillingMeter]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[BillingMeter]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["BillingMeter"],
**kwargs
):
super(BillingMeterCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class BlobStorageTokenStore(ProxyOnlyResource):
"""BlobStorageTokenStore.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param sas_url_setting_name:
:type sas_url_setting_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'sas_url_setting_name': {'key': 'properties.sasUrlSettingName', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
sas_url_setting_name: Optional[str] = None,
**kwargs
):
super(BlobStorageTokenStore, self).__init__(kind=kind, **kwargs)
self.sas_url_setting_name = sas_url_setting_name
class Capability(msrest.serialization.Model):
"""Describes the capabilities/features allowed for a specific SKU.
:param name: Name of the SKU capability.
:type name: str
:param value: Value of the SKU capability.
:type value: str
:param reason: Reason of the SKU capability.
:type reason: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
reason: Optional[str] = None,
**kwargs
):
super(Capability, self).__init__(**kwargs)
self.name = name
self.value = value
self.reason = reason
class Certificate(Resource):
"""SSL certificate for an app.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar friendly_name: Friendly name of the certificate.
:vartype friendly_name: str
:ivar subject_name: Subject name of the certificate.
:vartype subject_name: str
:param host_names: Host names the certificate applies to.
:type host_names: list[str]
:param pfx_blob: Pfx blob.
:type pfx_blob: bytearray
:ivar site_name: App name.
:vartype site_name: str
:ivar self_link: Self link.
:vartype self_link: str
:ivar issuer: Certificate issuer.
:vartype issuer: str
:ivar issue_date: Certificate issue Date.
:vartype issue_date: ~datetime.datetime
:ivar expiration_date: Certificate expiration date.
:vartype expiration_date: ~datetime.datetime
:param password: <PASSWORD>.
:type password: str
:ivar thumbprint: Certificate thumbprint.
:vartype thumbprint: str
:ivar valid: Is the certificate valid?.
:vartype valid: bool
:ivar cer_blob: Raw bytes of .cer file.
:vartype cer_blob: bytearray
:ivar public_key_hash: Public key hash.
:vartype public_key_hash: str
:ivar hosting_environment_profile: Specification for the App Service Environment to use for the
certificate.
:vartype hosting_environment_profile:
~azure.mgmt.web.v2020_06_01.models.HostingEnvironmentProfile
:param key_vault_id: Key Vault Csm resource Id.
:type key_vault_id: str
:param key_vault_secret_name: Key Vault secret name.
:type key_vault_secret_name: str
:ivar key_vault_secret_status: Status of the Key Vault secret. Possible values include:
"Initialized", "WaitingOnCertificateOrder", "Succeeded", "CertificateOrderFailed",
"OperationNotPermittedOnKeyVault", "AzureServiceUnauthorizedToAccessKeyVault",
"KeyVaultDoesNotExist", "KeyVaultSecretDoesNotExist", "UnknownError", "ExternalPrivateKey",
"Unknown".
:vartype key_vault_secret_status: str or
~azure.mgmt.web.v2020_06_01.models.KeyVaultSecretStatus
:param server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:type server_farm_id: str
:param canonical_name: CNAME of the certificate to be issued via free certificate.
:type canonical_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'friendly_name': {'readonly': True},
'subject_name': {'readonly': True},
'site_name': {'readonly': True},
'self_link': {'readonly': True},
'issuer': {'readonly': True},
'issue_date': {'readonly': True},
'expiration_date': {'readonly': True},
'thumbprint': {'readonly': True},
'valid': {'readonly': True},
'cer_blob': {'readonly': True},
'public_key_hash': {'readonly': True},
'hosting_environment_profile': {'readonly': True},
'key_vault_secret_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'subject_name': {'key': 'properties.subjectName', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'pfx_blob': {'key': 'properties.pfxBlob', 'type': 'bytearray'},
'site_name': {'key': 'properties.siteName', 'type': 'str'},
'self_link': {'key': 'properties.selfLink', 'type': 'str'},
'issuer': {'key': 'properties.issuer', 'type': 'str'},
'issue_date': {'key': 'properties.issueDate', 'type': 'iso-8601'},
'expiration_date': {'key': 'properties.expirationDate', 'type': 'iso-8601'},
'password': {'key': 'properties.password', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
'valid': {'key': 'properties.valid', 'type': 'bool'},
'cer_blob': {'key': 'properties.cerBlob', 'type': 'bytearray'},
'public_key_hash': {'key': 'properties.publicKeyHash', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'key_vault_secret_status': {'key': 'properties.keyVaultSecretStatus', 'type': 'str'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
'canonical_name': {'key': 'properties.canonicalName', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
host_names: Optional[List[str]] = None,
pfx_blob: Optional[bytearray] = None,
password: Optional[str] = None,
key_vault_id: Optional[str] = None,
key_vault_secret_name: Optional[str] = None,
server_farm_id: Optional[str] = None,
canonical_name: Optional[str] = None,
**kwargs
):
super(Certificate, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.friendly_name = None
self.subject_name = None
self.host_names = host_names
self.pfx_blob = pfx_blob
self.site_name = None
self.self_link = None
self.issuer = None
self.issue_date = None
self.expiration_date = None
self.password = password
self.thumbprint = None
self.valid = None
self.cer_blob = None
self.public_key_hash = None
self.hosting_environment_profile = None
self.key_vault_id = key_vault_id
self.key_vault_secret_name = key_vault_secret_name
self.key_vault_secret_status = None
self.server_farm_id = server_farm_id
self.canonical_name = canonical_name
class CertificateCollection(msrest.serialization.Model):
"""Collection of certificates.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.Certificate]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Certificate]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Certificate"],
**kwargs
):
super(CertificateCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class CertificateDetails(msrest.serialization.Model):
"""SSL certificate details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar version: Certificate Version.
:vartype version: int
:ivar serial_number: Certificate Serial Number.
:vartype serial_number: str
:ivar thumbprint: Certificate Thumbprint.
:vartype thumbprint: str
:ivar subject: Certificate Subject.
:vartype subject: str
:ivar not_before: Date Certificate is valid from.
:vartype not_before: ~datetime.datetime
:ivar not_after: Date Certificate is valid to.
:vartype not_after: ~datetime.datetime
:ivar signature_algorithm: Certificate Signature algorithm.
:vartype signature_algorithm: str
:ivar issuer: Certificate Issuer.
:vartype issuer: str
:ivar raw_data: Raw certificate data.
:vartype raw_data: str
"""
_validation = {
'version': {'readonly': True},
'serial_number': {'readonly': True},
'thumbprint': {'readonly': True},
'subject': {'readonly': True},
'not_before': {'readonly': True},
'not_after': {'readonly': True},
'signature_algorithm': {'readonly': True},
'issuer': {'readonly': True},
'raw_data': {'readonly': True},
}
_attribute_map = {
'version': {'key': 'version', 'type': 'int'},
'serial_number': {'key': 'serialNumber', 'type': 'str'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'subject': {'key': 'subject', 'type': 'str'},
'not_before': {'key': 'notBefore', 'type': 'iso-8601'},
'not_after': {'key': 'notAfter', 'type': 'iso-8601'},
'signature_algorithm': {'key': 'signatureAlgorithm', 'type': 'str'},
'issuer': {'key': 'issuer', 'type': 'str'},
'raw_data': {'key': 'rawData', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CertificateDetails, self).__init__(**kwargs)
self.version = None
self.serial_number = None
self.thumbprint = None
self.subject = None
self.not_before = None
self.not_after = None
self.signature_algorithm = None
self.issuer = None
self.raw_data = None
class CertificateEmail(ProxyOnlyResource):
"""SSL certificate email.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param email_id: Email id.
:type email_id: str
:param time_stamp: Time stamp.
:type time_stamp: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'email_id': {'key': 'properties.emailId', 'type': 'str'},
'time_stamp': {'key': 'properties.timeStamp', 'type': 'iso-8601'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
email_id: Optional[str] = None,
time_stamp: Optional[datetime.datetime] = None,
**kwargs
):
super(CertificateEmail, self).__init__(kind=kind, **kwargs)
self.email_id = email_id
self.time_stamp = time_stamp
class CertificateOrderAction(ProxyOnlyResource):
"""Certificate order action.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar action_type: Action type. Possible values include: "CertificateIssued",
"CertificateOrderCanceled", "CertificateOrderCreated", "CertificateRevoked",
"DomainValidationComplete", "FraudDetected", "OrgNameChange", "OrgValidationComplete",
"SanDrop", "FraudCleared", "CertificateExpired", "CertificateExpirationWarning",
"FraudDocumentationRequired", "Unknown".
:vartype action_type: str or ~azure.mgmt.web.v2020_06_01.models.CertificateOrderActionType
:ivar created_at: Time at which the certificate action was performed.
:vartype created_at: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'action_type': {'readonly': True},
'created_at': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'action_type': {'key': 'properties.actionType', 'type': 'str'},
'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(CertificateOrderAction, self).__init__(kind=kind, **kwargs)
self.action_type = None
self.created_at = None
class CertificatePatchResource(ProxyOnlyResource):
"""ARM resource for a certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar friendly_name: Friendly name of the certificate.
:vartype friendly_name: str
:ivar subject_name: Subject name of the certificate.
:vartype subject_name: str
:param host_names: Host names the certificate applies to.
:type host_names: list[str]
:param pfx_blob: Pfx blob.
:type pfx_blob: bytearray
:ivar site_name: App name.
:vartype site_name: str
:ivar self_link: Self link.
:vartype self_link: str
:ivar issuer: Certificate issuer.
:vartype issuer: str
:ivar issue_date: Certificate issue Date.
:vartype issue_date: ~datetime.datetime
:ivar expiration_date: Certificate expiration date.
:vartype expiration_date: ~datetime.datetime
:param password: Certificate password.
:type password: str
:ivar thumbprint: Certificate thumbprint.
:vartype thumbprint: str
:ivar valid: Is the certificate valid?.
:vartype valid: bool
:ivar cer_blob: Raw bytes of .cer file.
:vartype cer_blob: bytearray
:ivar public_key_hash: Public key hash.
:vartype public_key_hash: str
:ivar hosting_environment_profile: Specification for the App Service Environment to use for the
certificate.
:vartype hosting_environment_profile:
~azure.mgmt.web.v2020_06_01.models.HostingEnvironmentProfile
:param key_vault_id: Key Vault Csm resource Id.
:type key_vault_id: str
:param key_vault_secret_name: Key Vault secret name.
:type key_vault_secret_name: str
:ivar key_vault_secret_status: Status of the Key Vault secret. Possible values include:
"Initialized", "WaitingOnCertificateOrder", "Succeeded", "CertificateOrderFailed",
"OperationNotPermittedOnKeyVault", "AzureServiceUnauthorizedToAccessKeyVault",
"KeyVaultDoesNotExist", "KeyVaultSecretDoesNotExist", "UnknownError", "ExternalPrivateKey",
"Unknown".
:vartype key_vault_secret_status: str or
~azure.mgmt.web.v2020_06_01.models.KeyVaultSecretStatus
:param server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:type server_farm_id: str
:param canonical_name: CNAME of the certificate to be issued via free certificate.
:type canonical_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'friendly_name': {'readonly': True},
'subject_name': {'readonly': True},
'site_name': {'readonly': True},
'self_link': {'readonly': True},
'issuer': {'readonly': True},
'issue_date': {'readonly': True},
'expiration_date': {'readonly': True},
'thumbprint': {'readonly': True},
'valid': {'readonly': True},
'cer_blob': {'readonly': True},
'public_key_hash': {'readonly': True},
'hosting_environment_profile': {'readonly': True},
'key_vault_secret_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'subject_name': {'key': 'properties.subjectName', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'pfx_blob': {'key': 'properties.pfxBlob', 'type': 'bytearray'},
'site_name': {'key': 'properties.siteName', 'type': 'str'},
'self_link': {'key': 'properties.selfLink', 'type': 'str'},
'issuer': {'key': 'properties.issuer', 'type': 'str'},
'issue_date': {'key': 'properties.issueDate', 'type': 'iso-8601'},
'expiration_date': {'key': 'properties.expirationDate', 'type': 'iso-8601'},
'password': {'key': 'properties.password', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
'valid': {'key': 'properties.valid', 'type': 'bool'},
'cer_blob': {'key': 'properties.cerBlob', 'type': 'bytearray'},
'public_key_hash': {'key': 'properties.publicKeyHash', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'key_vault_secret_status': {'key': 'properties.keyVaultSecretStatus', 'type': 'str'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
'canonical_name': {'key': 'properties.canonicalName', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
host_names: Optional[List[str]] = None,
pfx_blob: Optional[bytearray] = None,
password: Optional[str] = None,
key_vault_id: Optional[str] = None,
key_vault_secret_name: Optional[str] = None,
server_farm_id: Optional[str] = None,
canonical_name: Optional[str] = None,
**kwargs
):
super(CertificatePatchResource, self).__init__(kind=kind, **kwargs)
self.friendly_name = None
self.subject_name = None
self.host_names = host_names
self.pfx_blob = pfx_blob
self.site_name = None
self.self_link = None
self.issuer = None
self.issue_date = None
self.expiration_date = None
self.password = password
self.thumbprint = None
self.valid = None
self.cer_blob = None
self.public_key_hash = None
self.hosting_environment_profile = None
self.key_vault_id = key_vault_id
self.key_vault_secret_name = key_vault_secret_name
self.key_vault_secret_status = None
self.server_farm_id = server_farm_id
self.canonical_name = canonical_name
class ClientRegistration(ProxyOnlyResource):
"""ClientRegistration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param client_id:
:type client_id: str
:param client_secret_setting_name:
:type client_secret_setting_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'client_id': {'key': 'properties.clientId', 'type': 'str'},
'client_secret_setting_name': {'key': 'properties.clientSecretSettingName', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
client_id: Optional[str] = None,
client_secret_setting_name: Optional[str] = None,
**kwargs
):
super(ClientRegistration, self).__init__(kind=kind, **kwargs)
self.client_id = client_id
self.client_secret_setting_name = client_secret_setting_name
class CloningInfo(msrest.serialization.Model):
"""Information needed for cloning operation.
All required parameters must be populated in order to send to Azure.
:param correlation_id: Correlation ID of cloning operation. This ID ties multiple cloning
operations
together to use the same snapshot.
:type correlation_id: str
:param overwrite: :code:`<code>true</code>` to overwrite destination app; otherwise,
:code:`<code>false</code>`.
:type overwrite: bool
:param clone_custom_host_names: :code:`<code>true</code>` to clone custom hostnames from source
app; otherwise, :code:`<code>false</code>`.
:type clone_custom_host_names: bool
:param clone_source_control: :code:`<code>true</code>` to clone source control from source app;
otherwise, :code:`<code>false</code>`.
:type clone_source_control: bool
:param source_web_app_id: Required. ARM resource ID of the source app. App resource ID is of
the form
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}
for production slots and
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slotName}
for other slots.
:type source_web_app_id: str
:param source_web_app_location: Location of source app ex: West US or North Europe.
:type source_web_app_location: str
:param hosting_environment: App Service Environment.
:type hosting_environment: str
:param app_settings_overrides: Application setting overrides for cloned app. If specified,
these settings override the settings cloned
from source app. Otherwise, application settings from source app are retained.
:type app_settings_overrides: dict[str, str]
:param configure_load_balancing: :code:`<code>true</code>` to configure load balancing for
source and destination app.
:type configure_load_balancing: bool
:param traffic_manager_profile_id: ARM resource ID of the Traffic Manager profile to use, if it
exists. Traffic Manager resource ID is of the form
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{profileName}.
:type traffic_manager_profile_id: str
:param traffic_manager_profile_name: Name of Traffic Manager profile to create. This is only
needed if Traffic Manager profile does not already exist.
:type traffic_manager_profile_name: str
"""
_validation = {
'source_web_app_id': {'required': True},
}
_attribute_map = {
'correlation_id': {'key': 'correlationId', 'type': 'str'},
'overwrite': {'key': 'overwrite', 'type': 'bool'},
'clone_custom_host_names': {'key': 'cloneCustomHostNames', 'type': 'bool'},
'clone_source_control': {'key': 'cloneSourceControl', 'type': 'bool'},
'source_web_app_id': {'key': 'sourceWebAppId', 'type': 'str'},
'source_web_app_location': {'key': 'sourceWebAppLocation', 'type': 'str'},
'hosting_environment': {'key': 'hostingEnvironment', 'type': 'str'},
'app_settings_overrides': {'key': 'appSettingsOverrides', 'type': '{str}'},
'configure_load_balancing': {'key': 'configureLoadBalancing', 'type': 'bool'},
'traffic_manager_profile_id': {'key': 'trafficManagerProfileId', 'type': 'str'},
'traffic_manager_profile_name': {'key': 'trafficManagerProfileName', 'type': 'str'},
}
def __init__(
self,
*,
source_web_app_id: str,
correlation_id: Optional[str] = None,
overwrite: Optional[bool] = None,
clone_custom_host_names: Optional[bool] = None,
clone_source_control: Optional[bool] = None,
source_web_app_location: Optional[str] = None,
hosting_environment: Optional[str] = None,
app_settings_overrides: Optional[Dict[str, str]] = None,
configure_load_balancing: Optional[bool] = None,
traffic_manager_profile_id: Optional[str] = None,
traffic_manager_profile_name: Optional[str] = None,
**kwargs
):
super(CloningInfo, self).__init__(**kwargs)
self.correlation_id = correlation_id
self.overwrite = overwrite
self.clone_custom_host_names = clone_custom_host_names
self.clone_source_control = clone_source_control
self.source_web_app_id = source_web_app_id
self.source_web_app_location = source_web_app_location
self.hosting_environment = hosting_environment
self.app_settings_overrides = app_settings_overrides
self.configure_load_balancing = configure_load_balancing
self.traffic_manager_profile_id = traffic_manager_profile_id
self.traffic_manager_profile_name = traffic_manager_profile_name
class Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties(msrest.serialization.Model):
"""Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: Principal Id of user assigned identity.
:vartype principal_id: str
:ivar client_id: Client Id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class ConnectionStringDictionary(ProxyOnlyResource):
"""String dictionary resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param properties: Connection strings.
:type properties: dict[str, ~azure.mgmt.web.v2020_06_01.models.ConnStringValueTypePair]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{ConnStringValueTypePair}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
properties: Optional[Dict[str, "ConnStringValueTypePair"]] = None,
**kwargs
):
super(ConnectionStringDictionary, self).__init__(kind=kind, **kwargs)
self.properties = properties
class ConnStringInfo(msrest.serialization.Model):
"""Database connection string information.
:param name: Name of connection string.
:type name: str
:param connection_string: Connection string value.
:type connection_string: str
:param type: Type of database. Possible values include: "MySql", "SQLServer", "SQLAzure",
"Custom", "NotificationHub", "ServiceBus", "EventHub", "ApiHub", "DocDb", "RedisCache",
"PostgreSQL".
:type type: str or ~azure.mgmt.web.v2020_06_01.models.ConnectionStringType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
connection_string: Optional[str] = None,
type: Optional[Union[str, "ConnectionStringType"]] = None,
**kwargs
):
super(ConnStringInfo, self).__init__(**kwargs)
self.name = name
self.connection_string = connection_string
self.type = type
class ConnStringValueTypePair(msrest.serialization.Model):
"""Database connection string value to type pair.
All required parameters must be populated in order to send to Azure.
:param value: Required. Value of pair.
:type value: str
:param type: Required. Type of database. Possible values include: "MySql", "SQLServer",
"SQLAzure", "Custom", "NotificationHub", "ServiceBus", "EventHub", "ApiHub", "DocDb",
"RedisCache", "PostgreSQL".
:type type: str or ~azure.mgmt.web.v2020_06_01.models.ConnectionStringType
"""
_validation = {
'value': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
value: str,
type: Union[str, "ConnectionStringType"],
**kwargs
):
super(ConnStringValueTypePair, self).__init__(**kwargs)
self.value = value
self.type = type
class Contact(msrest.serialization.Model):
"""Contact information for domain registration. If 'Domain Privacy' option is not selected then the contact information is made publicly available through the Whois
directories as per ICANN requirements.
All required parameters must be populated in order to send to Azure.
:param address_mailing: Mailing address.
:type address_mailing: ~azure.mgmt.web.v2020_06_01.models.Address
:param email: Required. Email address.
:type email: str
:param fax: Fax number.
:type fax: str
:param job_title: Job title.
:type job_title: str
:param name_first: Required. First name.
:type name_first: str
:param name_last: Required. Last name.
:type name_last: str
:param name_middle: Middle name.
:type name_middle: str
:param organization: Organization contact belongs to.
:type organization: str
:param phone: Required. Phone number.
:type phone: str
"""
_validation = {
'email': {'required': True},
'name_first': {'required': True},
'name_last': {'required': True},
'phone': {'required': True},
}
_attribute_map = {
'address_mailing': {'key': 'addressMailing', 'type': 'Address'},
'email': {'key': 'email', 'type': 'str'},
'fax': {'key': 'fax', 'type': 'str'},
'job_title': {'key': 'jobTitle', 'type': 'str'},
'name_first': {'key': 'nameFirst', 'type': 'str'},
'name_last': {'key': 'nameLast', 'type': 'str'},
'name_middle': {'key': 'nameMiddle', 'type': 'str'},
'organization': {'key': 'organization', 'type': 'str'},
'phone': {'key': 'phone', 'type': 'str'},
}
def __init__(
self,
*,
email: str,
name_first: str,
name_last: str,
phone: str,
address_mailing: Optional["Address"] = None,
fax: Optional[str] = None,
job_title: Optional[str] = None,
name_middle: Optional[str] = None,
organization: Optional[str] = None,
**kwargs
):
super(Contact, self).__init__(**kwargs)
self.address_mailing = address_mailing
self.email = email
self.fax = fax
self.job_title = job_title
self.name_first = name_first
self.name_last = name_last
self.name_middle = name_middle
self.organization = organization
self.phone = phone
class ContainerCpuStatistics(msrest.serialization.Model):
"""ContainerCpuStatistics.
:param cpu_usage:
:type cpu_usage: ~azure.mgmt.web.v2020_06_01.models.ContainerCpuUsage
:param system_cpu_usage:
:type system_cpu_usage: long
:param online_cpu_count:
:type online_cpu_count: int
:param throttling_data:
:type throttling_data: ~azure.mgmt.web.v2020_06_01.models.ContainerThrottlingData
"""
_attribute_map = {
'cpu_usage': {'key': 'cpuUsage', 'type': 'ContainerCpuUsage'},
'system_cpu_usage': {'key': 'systemCpuUsage', 'type': 'long'},
'online_cpu_count': {'key': 'onlineCpuCount', 'type': 'int'},
'throttling_data': {'key': 'throttlingData', 'type': 'ContainerThrottlingData'},
}
def __init__(
self,
*,
cpu_usage: Optional["ContainerCpuUsage"] = None,
system_cpu_usage: Optional[int] = None,
online_cpu_count: Optional[int] = None,
throttling_data: Optional["ContainerThrottlingData"] = None,
**kwargs
):
super(ContainerCpuStatistics, self).__init__(**kwargs)
self.cpu_usage = cpu_usage
self.system_cpu_usage = system_cpu_usage
self.online_cpu_count = online_cpu_count
self.throttling_data = throttling_data
class ContainerCpuUsage(msrest.serialization.Model):
"""ContainerCpuUsage.
:param total_usage:
:type total_usage: long
:param per_cpu_usage:
:type per_cpu_usage: list[long]
:param kernel_mode_usage:
:type kernel_mode_usage: long
:param user_mode_usage:
:type user_mode_usage: long
"""
_attribute_map = {
'total_usage': {'key': 'totalUsage', 'type': 'long'},
'per_cpu_usage': {'key': 'perCpuUsage', 'type': '[long]'},
'kernel_mode_usage': {'key': 'kernelModeUsage', 'type': 'long'},
'user_mode_usage': {'key': 'userModeUsage', 'type': 'long'},
}
def __init__(
self,
*,
total_usage: Optional[int] = None,
per_cpu_usage: Optional[List[int]] = None,
kernel_mode_usage: Optional[int] = None,
user_mode_usage: Optional[int] = None,
**kwargs
):
super(ContainerCpuUsage, self).__init__(**kwargs)
self.total_usage = total_usage
self.per_cpu_usage = per_cpu_usage
self.kernel_mode_usage = kernel_mode_usage
self.user_mode_usage = user_mode_usage
class ContainerInfo(msrest.serialization.Model):
"""ContainerInfo.
:param current_time_stamp:
:type current_time_stamp: ~datetime.datetime
:param previous_time_stamp:
:type previous_time_stamp: ~datetime.datetime
:param current_cpu_stats:
:type current_cpu_stats: ~azure.mgmt.web.v2020_06_01.models.ContainerCpuStatistics
:param previous_cpu_stats:
:type previous_cpu_stats: ~azure.mgmt.web.v2020_06_01.models.ContainerCpuStatistics
:param memory_stats:
:type memory_stats: ~azure.mgmt.web.v2020_06_01.models.ContainerMemoryStatistics
:param name:
:type name: str
:param id:
:type id: str
:param eth0:
:type eth0: ~azure.mgmt.web.v2020_06_01.models.ContainerNetworkInterfaceStatistics
"""
_attribute_map = {
'current_time_stamp': {'key': 'currentTimeStamp', 'type': 'iso-8601'},
'previous_time_stamp': {'key': 'previousTimeStamp', 'type': 'iso-8601'},
'current_cpu_stats': {'key': 'currentCpuStats', 'type': 'ContainerCpuStatistics'},
'previous_cpu_stats': {'key': 'previousCpuStats', 'type': 'ContainerCpuStatistics'},
'memory_stats': {'key': 'memoryStats', 'type': 'ContainerMemoryStatistics'},
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'eth0': {'key': 'eth0', 'type': 'ContainerNetworkInterfaceStatistics'},
}
def __init__(
self,
*,
current_time_stamp: Optional[datetime.datetime] = None,
previous_time_stamp: Optional[datetime.datetime] = None,
current_cpu_stats: Optional["ContainerCpuStatistics"] = None,
previous_cpu_stats: Optional["ContainerCpuStatistics"] = None,
memory_stats: Optional["ContainerMemoryStatistics"] = None,
name: Optional[str] = None,
id: Optional[str] = None,
eth0: Optional["ContainerNetworkInterfaceStatistics"] = None,
**kwargs
):
super(ContainerInfo, self).__init__(**kwargs)
self.current_time_stamp = current_time_stamp
self.previous_time_stamp = previous_time_stamp
self.current_cpu_stats = current_cpu_stats
self.previous_cpu_stats = previous_cpu_stats
self.memory_stats = memory_stats
self.name = name
self.id = id
self.eth0 = eth0
class ContainerMemoryStatistics(msrest.serialization.Model):
"""ContainerMemoryStatistics.
:param usage:
:type usage: long
:param max_usage:
:type max_usage: long
:param limit:
:type limit: long
"""
_attribute_map = {
'usage': {'key': 'usage', 'type': 'long'},
'max_usage': {'key': 'maxUsage', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
}
def __init__(
self,
*,
usage: Optional[int] = None,
max_usage: Optional[int] = None,
limit: Optional[int] = None,
**kwargs
):
super(ContainerMemoryStatistics, self).__init__(**kwargs)
self.usage = usage
self.max_usage = max_usage
self.limit = limit
class ContainerNetworkInterfaceStatistics(msrest.serialization.Model):
"""ContainerNetworkInterfaceStatistics.
:param rx_bytes:
:type rx_bytes: long
:param rx_packets:
:type rx_packets: long
:param rx_errors:
:type rx_errors: long
:param rx_dropped:
:type rx_dropped: long
:param tx_bytes:
:type tx_bytes: long
:param tx_packets:
:type tx_packets: long
:param tx_errors:
:type tx_errors: long
:param tx_dropped:
:type tx_dropped: long
"""
_attribute_map = {
'rx_bytes': {'key': 'rxBytes', 'type': 'long'},
'rx_packets': {'key': 'rxPackets', 'type': 'long'},
'rx_errors': {'key': 'rxErrors', 'type': 'long'},
'rx_dropped': {'key': 'rxDropped', 'type': 'long'},
'tx_bytes': {'key': 'txBytes', 'type': 'long'},
'tx_packets': {'key': 'txPackets', 'type': 'long'},
'tx_errors': {'key': 'txErrors', 'type': 'long'},
'tx_dropped': {'key': 'txDropped', 'type': 'long'},
}
def __init__(
self,
*,
rx_bytes: Optional[int] = None,
rx_packets: Optional[int] = None,
rx_errors: Optional[int] = None,
rx_dropped: Optional[int] = None,
tx_bytes: Optional[int] = None,
tx_packets: Optional[int] = None,
tx_errors: Optional[int] = None,
tx_dropped: Optional[int] = None,
**kwargs
):
super(ContainerNetworkInterfaceStatistics, self).__init__(**kwargs)
self.rx_bytes = rx_bytes
self.rx_packets = rx_packets
self.rx_errors = rx_errors
self.rx_dropped = rx_dropped
self.tx_bytes = tx_bytes
self.tx_packets = tx_packets
self.tx_errors = tx_errors
self.tx_dropped = tx_dropped
class ContainerThrottlingData(msrest.serialization.Model):
"""ContainerThrottlingData.
:param periods:
:type periods: int
:param throttled_periods:
:type throttled_periods: int
:param throttled_time:
:type throttled_time: int
"""
_attribute_map = {
'periods': {'key': 'periods', 'type': 'int'},
'throttled_periods': {'key': 'throttledPeriods', 'type': 'int'},
'throttled_time': {'key': 'throttledTime', 'type': 'int'},
}
def __init__(
self,
*,
periods: Optional[int] = None,
throttled_periods: Optional[int] = None,
throttled_time: Optional[int] = None,
**kwargs
):
super(ContainerThrottlingData, self).__init__(**kwargs)
self.periods = periods
self.throttled_periods = throttled_periods
self.throttled_time = throttled_time
class ContinuousWebJob(ProxyOnlyResource):
"""Continuous Web Job Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param status: Job status. Possible values include: "Initializing", "Starting", "Running",
"PendingRestart", "Stopped".
:type status: str or ~azure.mgmt.web.v2020_06_01.models.ContinuousWebJobStatus
:param detailed_status: Detailed status.
:type detailed_status: str
:param log_url: Log URL.
:type log_url: str
:param run_command: Run command.
:type run_command: str
:param url: Job URL.
:type url: str
:param extra_info_url: Extra Info URL.
:type extra_info_url: str
:param web_job_type: Job type. Possible values include: "Continuous", "Triggered".
:type web_job_type: str or ~azure.mgmt.web.v2020_06_01.models.WebJobType
:param error: Error information.
:type error: str
:param using_sdk: Using SDK?.
:type using_sdk: bool
:param settings: Job settings.
:type settings: dict[str, any]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'detailed_status': {'key': 'properties.detailed_status', 'type': 'str'},
'log_url': {'key': 'properties.log_url', 'type': 'str'},
'run_command': {'key': 'properties.run_command', 'type': 'str'},
'url': {'key': 'properties.url', 'type': 'str'},
'extra_info_url': {'key': 'properties.extra_info_url', 'type': 'str'},
'web_job_type': {'key': 'properties.web_job_type', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'str'},
'using_sdk': {'key': 'properties.using_sdk', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': '{object}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
status: Optional[Union[str, "ContinuousWebJobStatus"]] = None,
detailed_status: Optional[str] = None,
log_url: Optional[str] = None,
run_command: Optional[str] = None,
url: Optional[str] = None,
extra_info_url: Optional[str] = None,
web_job_type: Optional[Union[str, "WebJobType"]] = None,
error: Optional[str] = None,
using_sdk: Optional[bool] = None,
settings: Optional[Dict[str, Any]] = None,
**kwargs
):
super(ContinuousWebJob, self).__init__(kind=kind, **kwargs)
self.status = status
self.detailed_status = detailed_status
self.log_url = log_url
self.run_command = run_command
self.url = url
self.extra_info_url = extra_info_url
self.web_job_type = web_job_type
self.error = error
self.using_sdk = using_sdk
self.settings = settings
class ContinuousWebJobCollection(msrest.serialization.Model):
"""Collection of Kudu continuous web job information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.ContinuousWebJob]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ContinuousWebJob]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ContinuousWebJob"],
**kwargs
):
super(ContinuousWebJobCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class CookieExpiration(ProxyOnlyResource):
"""CookieExpiration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param convention: Possible values include: "FixedTime", "IdentityProviderDerived".
:type convention: str or ~azure.mgmt.web.v2020_06_01.models.CookieExpirationConvention
:param time_to_expiration:
:type time_to_expiration: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'convention': {'key': 'properties.convention', 'type': 'str'},
'time_to_expiration': {'key': 'properties.timeToExpiration', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
convention: Optional[Union[str, "CookieExpirationConvention"]] = None,
time_to_expiration: Optional[str] = None,
**kwargs
):
super(CookieExpiration, self).__init__(kind=kind, **kwargs)
self.convention = convention
self.time_to_expiration = time_to_expiration
class CorsSettings(msrest.serialization.Model):
"""Cross-Origin Resource Sharing (CORS) settings for the app.
:param allowed_origins: Gets or sets the list of origins that should be allowed to make
cross-origin
calls (for example: http://example.com:12345). Use "*" to allow all.
:type allowed_origins: list[str]
:param support_credentials: Gets or sets whether CORS requests with credentials are allowed.
See
https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#Requests_with_credentials
for more details.
:type support_credentials: bool
"""
_attribute_map = {
'allowed_origins': {'key': 'allowedOrigins', 'type': '[str]'},
'support_credentials': {'key': 'supportCredentials', 'type': 'bool'},
}
def __init__(
self,
*,
allowed_origins: Optional[List[str]] = None,
support_credentials: Optional[bool] = None,
**kwargs
):
super(CorsSettings, self).__init__(**kwargs)
self.allowed_origins = allowed_origins
self.support_credentials = support_credentials
class CsmCopySlotEntity(msrest.serialization.Model):
"""Copy deployment slot parameters.
All required parameters must be populated in order to send to Azure.
:param target_slot: Required. Destination deployment slot during copy operation.
:type target_slot: str
:param site_config: Required. The site object which will be merged with the source slot site
to produce new destination slot site object.
:code:`<code>null</code>` to just copy source slot content. Otherwise a
:code:`<code>Site</code>`
object with properties to override source slot site.
:type site_config: ~azure.mgmt.web.v2020_06_01.models.SiteConfig
"""
_validation = {
'target_slot': {'required': True},
'site_config': {'required': True},
}
_attribute_map = {
'target_slot': {'key': 'targetSlot', 'type': 'str'},
'site_config': {'key': 'siteConfig', 'type': 'SiteConfig'},
}
def __init__(
self,
*,
target_slot: str,
site_config: "SiteConfig",
**kwargs
):
super(CsmCopySlotEntity, self).__init__(**kwargs)
self.target_slot = target_slot
self.site_config = site_config
class CsmMoveResourceEnvelope(msrest.serialization.Model):
"""Object with a list of the resources that need to be moved and the resource group they should be moved to.
:param target_resource_group:
:type target_resource_group: str
:param resources:
:type resources: list[str]
"""
_validation = {
'target_resource_group': {'max_length': 90, 'min_length': 1, 'pattern': r' ^[-\w\._\(\)]+[^\.]$'},
}
_attribute_map = {
'target_resource_group': {'key': 'targetResourceGroup', 'type': 'str'},
'resources': {'key': 'resources', 'type': '[str]'},
}
def __init__(
self,
*,
target_resource_group: Optional[str] = None,
resources: Optional[List[str]] = None,
**kwargs
):
super(CsmMoveResourceEnvelope, self).__init__(**kwargs)
self.target_resource_group = target_resource_group
self.resources = resources
class CsmOperationCollection(msrest.serialization.Model):
"""Collection of Azure resource manager operation metadata.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.CsmOperationDescription]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CsmOperationDescription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["CsmOperationDescription"],
**kwargs
):
super(CsmOperationCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class CsmOperationDescription(msrest.serialization.Model):
"""Description of an operation available for Microsoft.Web resource provider.
:param name:
:type name: str
:param display: Meta data about operation used for display in portal.
:type display: ~azure.mgmt.web.v2020_06_01.models.CsmOperationDisplay
:param origin:
:type origin: str
:param properties: Properties available for a Microsoft.Web resource provider operation.
:type properties: ~azure.mgmt.web.v2020_06_01.models.CsmOperationDescriptionProperties
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'CsmOperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'CsmOperationDescriptionProperties'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["CsmOperationDisplay"] = None,
origin: Optional[str] = None,
properties: Optional["CsmOperationDescriptionProperties"] = None,
**kwargs
):
super(CsmOperationDescription, self).__init__(**kwargs)
self.name = name
self.display = display
self.origin = origin
self.properties = properties
class CsmOperationDescriptionProperties(msrest.serialization.Model):
"""Properties available for a Microsoft.Web resource provider operation.
:param service_specification: Resource metrics service provided by Microsoft.Insights resource
provider.
:type service_specification: ~azure.mgmt.web.v2020_06_01.models.ServiceSpecification
"""
_attribute_map = {
'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
*,
service_specification: Optional["ServiceSpecification"] = None,
**kwargs
):
super(CsmOperationDescriptionProperties, self).__init__(**kwargs)
self.service_specification = service_specification
class CsmOperationDisplay(msrest.serialization.Model):
"""Meta data about operation used for display in portal.
:param provider:
:type provider: str
:param resource:
:type resource: str
:param operation:
:type operation: str
:param description:
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(CsmOperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class CsmPublishingCredentialsPoliciesCollection(ProxyOnlyResource):
"""Publishing Credentials Policies collection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param ftp: Whether FTP is allowed.
:type ftp: ~azure.mgmt.web.v2020_06_01.models.CsmPublishingCredentialsPoliciesEntity
:param scm: Whether Scm Basic Auth is allowed.
:type scm: ~azure.mgmt.web.v2020_06_01.models.CsmPublishingCredentialsPoliciesEntity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'ftp': {'key': 'properties.ftp', 'type': 'CsmPublishingCredentialsPoliciesEntity'},
'scm': {'key': 'properties.scm', 'type': 'CsmPublishingCredentialsPoliciesEntity'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
ftp: Optional["CsmPublishingCredentialsPoliciesEntity"] = None,
scm: Optional["CsmPublishingCredentialsPoliciesEntity"] = None,
**kwargs
):
super(CsmPublishingCredentialsPoliciesCollection, self).__init__(kind=kind, **kwargs)
self.ftp = ftp
self.scm = scm
class CsmPublishingCredentialsPoliciesEntity(ProxyOnlyResource):
"""Publishing Credentials Policies parameters.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param allow: :code:`<code>true</code>` to allow access to a publishing method; otherwise,
:code:`<code>false</code>`.
:type allow: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'allow': {'key': 'properties.allow', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
allow: Optional[bool] = None,
**kwargs
):
super(CsmPublishingCredentialsPoliciesEntity, self).__init__(kind=kind, **kwargs)
self.allow = allow
class CsmPublishingProfileOptions(msrest.serialization.Model):
"""Publishing options for requested profile.
:param format: Name of the format. Valid values are:
FileZilla3
WebDeploy -- default
Ftp. Possible values include: "FileZilla3", "WebDeploy", "Ftp".
:type format: str or ~azure.mgmt.web.v2020_06_01.models.PublishingProfileFormat
:param include_disaster_recovery_endpoints: Include the DisasterRecover endpoint if true.
:type include_disaster_recovery_endpoints: bool
"""
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
'include_disaster_recovery_endpoints': {'key': 'includeDisasterRecoveryEndpoints', 'type': 'bool'},
}
def __init__(
self,
*,
format: Optional[Union[str, "PublishingProfileFormat"]] = None,
include_disaster_recovery_endpoints: Optional[bool] = None,
**kwargs
):
super(CsmPublishingProfileOptions, self).__init__(**kwargs)
self.format = format
self.include_disaster_recovery_endpoints = include_disaster_recovery_endpoints
class CsmSlotEntity(msrest.serialization.Model):
"""Deployment slot parameters.
All required parameters must be populated in order to send to Azure.
:param target_slot: Required. Destination deployment slot during swap operation.
:type target_slot: str
:param preserve_vnet: Required. :code:`<code>true</code>` to preserve Virtual Network to the
slot during swap; otherwise, :code:`<code>false</code>`.
:type preserve_vnet: bool
"""
_validation = {
'target_slot': {'required': True},
'preserve_vnet': {'required': True},
}
_attribute_map = {
'target_slot': {'key': 'targetSlot', 'type': 'str'},
'preserve_vnet': {'key': 'preserveVnet', 'type': 'bool'},
}
def __init__(
self,
*,
target_slot: str,
preserve_vnet: bool,
**kwargs
):
super(CsmSlotEntity, self).__init__(**kwargs)
self.target_slot = target_slot
self.preserve_vnet = preserve_vnet
class CsmUsageQuota(msrest.serialization.Model):
"""Usage of the quota resource.
:param unit: Units of measurement for the quota resource.
:type unit: str
:param next_reset_time: Next reset time for the resource counter.
:type next_reset_time: ~datetime.datetime
:param current_value: The current value of the resource counter.
:type current_value: long
:param limit: The resource limit.
:type limit: long
:param name: Quota name.
:type name: ~azure.mgmt.web.v2020_06_01.models.LocalizableString
"""
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'next_reset_time': {'key': 'nextResetTime', 'type': 'iso-8601'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'LocalizableString'},
}
def __init__(
self,
*,
unit: Optional[str] = None,
next_reset_time: Optional[datetime.datetime] = None,
current_value: Optional[int] = None,
limit: Optional[int] = None,
name: Optional["LocalizableString"] = None,
**kwargs
):
super(CsmUsageQuota, self).__init__(**kwargs)
self.unit = unit
self.next_reset_time = next_reset_time
self.current_value = current_value
self.limit = limit
self.name = name
class CsmUsageQuotaCollection(msrest.serialization.Model):
"""Collection of CSM usage quotas.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.CsmUsageQuota]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[CsmUsageQuota]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["CsmUsageQuota"],
**kwargs
):
super(CsmUsageQuotaCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class CustomHostnameAnalysisResult(ProxyOnlyResource):
"""Custom domain analysis.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar is_hostname_already_verified: :code:`<code>true</code>` if hostname is already verified;
otherwise, :code:`<code>false</code>`.
:vartype is_hostname_already_verified: bool
:ivar custom_domain_verification_test: DNS verification test result. Possible values include:
"Passed", "Failed", "Skipped".
:vartype custom_domain_verification_test: str or
~azure.mgmt.web.v2020_06_01.models.DnsVerificationTestResult
:ivar custom_domain_verification_failure_info: Raw failure information if DNS verification
fails.
:vartype custom_domain_verification_failure_info:
~azure.mgmt.web.v2020_06_01.models.ErrorEntity
:ivar has_conflict_on_scale_unit: :code:`<code>true</code>` if there is a conflict on a scale
unit; otherwise, :code:`<code>false</code>`.
:vartype has_conflict_on_scale_unit: bool
:ivar has_conflict_across_subscription: :code:`<code>true</code>` if there is a conflict across
subscriptions; otherwise, :code:`<code>false</code>`.
:vartype has_conflict_across_subscription: bool
:ivar conflicting_app_resource_id: Name of the conflicting app on scale unit if it's within the
same subscription.
:vartype conflicting_app_resource_id: str
:param c_name_records: CName records controller can see for this hostname.
:type c_name_records: list[str]
:param txt_records: TXT records controller can see for this hostname.
:type txt_records: list[str]
:param a_records: A records controller can see for this hostname.
:type a_records: list[str]
:param alternate_c_name_records: Alternate CName records controller can see for this hostname.
:type alternate_c_name_records: list[str]
:param alternate_txt_records: Alternate TXT records controller can see for this hostname.
:type alternate_txt_records: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'is_hostname_already_verified': {'readonly': True},
'custom_domain_verification_test': {'readonly': True},
'custom_domain_verification_failure_info': {'readonly': True},
'has_conflict_on_scale_unit': {'readonly': True},
'has_conflict_across_subscription': {'readonly': True},
'conflicting_app_resource_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_hostname_already_verified': {'key': 'properties.isHostnameAlreadyVerified', 'type': 'bool'},
'custom_domain_verification_test': {'key': 'properties.customDomainVerificationTest', 'type': 'str'},
'custom_domain_verification_failure_info': {'key': 'properties.customDomainVerificationFailureInfo', 'type': 'ErrorEntity'},
'has_conflict_on_scale_unit': {'key': 'properties.hasConflictOnScaleUnit', 'type': 'bool'},
'has_conflict_across_subscription': {'key': 'properties.hasConflictAcrossSubscription', 'type': 'bool'},
'conflicting_app_resource_id': {'key': 'properties.conflictingAppResourceId', 'type': 'str'},
'c_name_records': {'key': 'properties.cNameRecords', 'type': '[str]'},
'txt_records': {'key': 'properties.txtRecords', 'type': '[str]'},
'a_records': {'key': 'properties.aRecords', 'type': '[str]'},
'alternate_c_name_records': {'key': 'properties.alternateCNameRecords', 'type': '[str]'},
'alternate_txt_records': {'key': 'properties.alternateTxtRecords', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
c_name_records: Optional[List[str]] = None,
txt_records: Optional[List[str]] = None,
a_records: Optional[List[str]] = None,
alternate_c_name_records: Optional[List[str]] = None,
alternate_txt_records: Optional[List[str]] = None,
**kwargs
):
super(CustomHostnameAnalysisResult, self).__init__(kind=kind, **kwargs)
self.is_hostname_already_verified = None
self.custom_domain_verification_test = None
self.custom_domain_verification_failure_info = None
self.has_conflict_on_scale_unit = None
self.has_conflict_across_subscription = None
self.conflicting_app_resource_id = None
self.c_name_records = c_name_records
self.txt_records = txt_records
self.a_records = a_records
self.alternate_c_name_records = alternate_c_name_records
self.alternate_txt_records = alternate_txt_records
class CustomOpenIdConnectProvider(ProxyOnlyResource):
"""CustomOpenIdConnectProvider.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param enabled:
:type enabled: bool
:param registration:
:type registration: ~azure.mgmt.web.v2020_06_01.models.OpenIdConnectRegistration
:param login:
:type login: ~azure.mgmt.web.v2020_06_01.models.OpenIdConnectLogin
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'registration': {'key': 'properties.registration', 'type': 'OpenIdConnectRegistration'},
'login': {'key': 'properties.login', 'type': 'OpenIdConnectLogin'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
registration: Optional["OpenIdConnectRegistration"] = None,
login: Optional["OpenIdConnectLogin"] = None,
**kwargs
):
super(CustomOpenIdConnectProvider, self).__init__(kind=kind, **kwargs)
self.enabled = enabled
self.registration = registration
self.login = login
class DatabaseBackupSetting(msrest.serialization.Model):
"""Database backup settings.
All required parameters must be populated in order to send to Azure.
:param database_type: Required. Database type (e.g. SqlAzure / MySql). Possible values include:
"SqlAzure", "MySql", "LocalMySql", "PostgreSql".
:type database_type: str or ~azure.mgmt.web.v2020_06_01.models.DatabaseType
:param name:
:type name: str
:param connection_string_name: Contains a connection string name that is linked to the
SiteConfig.ConnectionStrings.
This is used during restore with overwrite connection strings options.
:type connection_string_name: str
:param connection_string: Contains a connection string to a database which is being backed up
or restored. If the restore should happen to a new database, the database name inside is the
new one.
:type connection_string: str
"""
_validation = {
'database_type': {'required': True},
}
_attribute_map = {
'database_type': {'key': 'databaseType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'connection_string_name': {'key': 'connectionStringName', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
}
def __init__(
self,
*,
database_type: Union[str, "DatabaseType"],
name: Optional[str] = None,
connection_string_name: Optional[str] = None,
connection_string: Optional[str] = None,
**kwargs
):
super(DatabaseBackupSetting, self).__init__(**kwargs)
self.database_type = database_type
self.name = name
self.connection_string_name = connection_string_name
self.connection_string = connection_string
class DataSource(msrest.serialization.Model):
"""Class representing data source used by the detectors.
:param instructions: Instructions if any for the data source.
:type instructions: list[str]
:param data_source_uri: Datasource Uri Links.
:type data_source_uri: list[~azure.mgmt.web.v2020_06_01.models.NameValuePair]
"""
_attribute_map = {
'instructions': {'key': 'instructions', 'type': '[str]'},
'data_source_uri': {'key': 'dataSourceUri', 'type': '[NameValuePair]'},
}
def __init__(
self,
*,
instructions: Optional[List[str]] = None,
data_source_uri: Optional[List["NameValuePair"]] = None,
**kwargs
):
super(DataSource, self).__init__(**kwargs)
self.instructions = instructions
self.data_source_uri = data_source_uri
class DataTableResponseColumn(msrest.serialization.Model):
"""Column definition.
:param column_name: Name of the column.
:type column_name: str
:param data_type: Data type which looks like 'String' or 'Int32'.
:type data_type: str
:param column_type: Column Type.
:type column_type: str
"""
_attribute_map = {
'column_name': {'key': 'columnName', 'type': 'str'},
'data_type': {'key': 'dataType', 'type': 'str'},
'column_type': {'key': 'columnType', 'type': 'str'},
}
def __init__(
self,
*,
column_name: Optional[str] = None,
data_type: Optional[str] = None,
column_type: Optional[str] = None,
**kwargs
):
super(DataTableResponseColumn, self).__init__(**kwargs)
self.column_name = column_name
self.data_type = data_type
self.column_type = column_type
class DataTableResponseObject(msrest.serialization.Model):
"""Data Table which defines columns and raw row values.
:param table_name: Name of the table.
:type table_name: str
:param columns: List of columns with data types.
:type columns: list[~azure.mgmt.web.v2020_06_01.models.DataTableResponseColumn]
:param rows: Raw row values.
:type rows: list[list[str]]
"""
_attribute_map = {
'table_name': {'key': 'tableName', 'type': 'str'},
'columns': {'key': 'columns', 'type': '[DataTableResponseColumn]'},
'rows': {'key': 'rows', 'type': '[[str]]'},
}
def __init__(
self,
*,
table_name: Optional[str] = None,
columns: Optional[List["DataTableResponseColumn"]] = None,
rows: Optional[List[List[str]]] = None,
**kwargs
):
super(DataTableResponseObject, self).__init__(**kwargs)
self.table_name = table_name
self.columns = columns
self.rows = rows
class DefaultErrorResponse(msrest.serialization.Model):
"""App Service error response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: Error model.
:vartype error: ~azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseError
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'DefaultErrorResponseError'},
}
def __init__(
self,
**kwargs
):
super(DefaultErrorResponse, self).__init__(**kwargs)
self.error = None
class DefaultErrorResponseError(msrest.serialization.Model):
"""Error model.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Standardized string to programmatically identify the error.
:vartype code: str
:ivar message: Detailed error description and debugging information.
:vartype message: str
:ivar target: Detailed error description and debugging information.
:vartype target: str
:param details:
:type details: list[~azure.mgmt.web.v2020_06_01.models.DefaultErrorResponseErrorDetailsItem]
:ivar innererror: More information to debug error.
:vartype innererror: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'innererror': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[DefaultErrorResponseErrorDetailsItem]'},
'innererror': {'key': 'innererror', 'type': 'str'},
}
def __init__(
self,
*,
details: Optional[List["DefaultErrorResponseErrorDetailsItem"]] = None,
**kwargs
):
super(DefaultErrorResponseError, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = details
self.innererror = None
class DefaultErrorResponseErrorDetailsItem(msrest.serialization.Model):
"""Detailed errors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Standardized string to programmatically identify the error.
:vartype code: str
:ivar message: Detailed error description and debugging information.
:vartype message: str
:ivar target: Detailed error description and debugging information.
:vartype target: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DefaultErrorResponseErrorDetailsItem, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
class DeletedAppRestoreRequest(ProxyOnlyResource):
"""Details about restoring a deleted app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param deleted_site_id: ARM resource ID of the deleted app. Example:
/subscriptions/{subId}/providers/Microsoft.Web/deletedSites/{deletedSiteId}.
:type deleted_site_id: str
:param recover_configuration: If true, deleted site configuration, in addition to content, will
be restored.
:type recover_configuration: bool
:param snapshot_time: Point in time to restore the deleted app from, formatted as a DateTime
string.
If unspecified, default value is the time that the app was deleted.
:type snapshot_time: str
:param use_dr_secondary: If true, the snapshot is retrieved from DRSecondary endpoint.
:type use_dr_secondary: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'deleted_site_id': {'key': 'properties.deletedSiteId', 'type': 'str'},
'recover_configuration': {'key': 'properties.recoverConfiguration', 'type': 'bool'},
'snapshot_time': {'key': 'properties.snapshotTime', 'type': 'str'},
'use_dr_secondary': {'key': 'properties.useDRSecondary', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
deleted_site_id: Optional[str] = None,
recover_configuration: Optional[bool] = None,
snapshot_time: Optional[str] = None,
use_dr_secondary: Optional[bool] = None,
**kwargs
):
super(DeletedAppRestoreRequest, self).__init__(kind=kind, **kwargs)
self.deleted_site_id = deleted_site_id
self.recover_configuration = recover_configuration
self.snapshot_time = snapshot_time
self.use_dr_secondary = use_dr_secondary
class DeletedSite(ProxyOnlyResource):
"""A deleted app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar deleted_site_id: Numeric id for the deleted site.
:vartype deleted_site_id: int
:ivar deleted_timestamp: Time in UTC when the app was deleted.
:vartype deleted_timestamp: str
:ivar subscription: Subscription containing the deleted site.
:vartype subscription: str
:ivar resource_group: ResourceGroup that contained the deleted site.
:vartype resource_group: str
:ivar deleted_site_name: Name of the deleted site.
:vartype deleted_site_name: str
:ivar slot: Slot of the deleted site.
:vartype slot: str
:ivar kind_properties_kind: Kind of site that was deleted.
:vartype kind_properties_kind: str
:ivar geo_region_name: Geo Region of the deleted site.
:vartype geo_region_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'deleted_site_id': {'readonly': True},
'deleted_timestamp': {'readonly': True},
'subscription': {'readonly': True},
'resource_group': {'readonly': True},
'deleted_site_name': {'readonly': True},
'slot': {'readonly': True},
'kind_properties_kind': {'readonly': True},
'geo_region_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'deleted_site_id': {'key': 'properties.deletedSiteId', 'type': 'int'},
'deleted_timestamp': {'key': 'properties.deletedTimestamp', 'type': 'str'},
'subscription': {'key': 'properties.subscription', 'type': 'str'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'deleted_site_name': {'key': 'properties.deletedSiteName', 'type': 'str'},
'slot': {'key': 'properties.slot', 'type': 'str'},
'kind_properties_kind': {'key': 'properties.kind', 'type': 'str'},
'geo_region_name': {'key': 'properties.geoRegionName', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(DeletedSite, self).__init__(kind=kind, **kwargs)
self.deleted_site_id = None
self.deleted_timestamp = None
self.subscription = None
self.resource_group = None
self.deleted_site_name = None
self.slot = None
self.kind_properties_kind = None
self.geo_region_name = None
class DeletedWebAppCollection(msrest.serialization.Model):
"""Collection of deleted apps.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.DeletedSite]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DeletedSite]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DeletedSite"],
**kwargs
):
super(DeletedWebAppCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class Deployment(ProxyOnlyResource):
"""User credentials used for publishing activity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param status: Deployment status.
:type status: int
:param message: Details about deployment status.
:type message: str
:param author: Who authored the deployment.
:type author: str
:param deployer: Who performed the deployment.
:type deployer: str
:param author_email: Author email.
:type author_email: str
:param start_time: Start time.
:type start_time: ~datetime.datetime
:param end_time: End time.
:type end_time: ~datetime.datetime
:param active: True if deployment is currently active, false if completed and null if not
started.
:type active: bool
:param details: Details on deployment.
:type details: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'int'},
'message': {'key': 'properties.message', 'type': 'str'},
'author': {'key': 'properties.author', 'type': 'str'},
'deployer': {'key': 'properties.deployer', 'type': 'str'},
'author_email': {'key': 'properties.author_email', 'type': 'str'},
'start_time': {'key': 'properties.start_time', 'type': 'iso-8601'},
'end_time': {'key': 'properties.end_time', 'type': 'iso-8601'},
'active': {'key': 'properties.active', 'type': 'bool'},
'details': {'key': 'properties.details', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
status: Optional[int] = None,
message: Optional[str] = None,
author: Optional[str] = None,
deployer: Optional[str] = None,
author_email: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
active: Optional[bool] = None,
details: Optional[str] = None,
**kwargs
):
super(Deployment, self).__init__(kind=kind, **kwargs)
self.status = status
self.message = message
self.author = author
self.deployer = deployer
self.author_email = author_email
self.start_time = start_time
self.end_time = end_time
self.active = active
self.details = details
class DeploymentCollection(msrest.serialization.Model):
"""Collection of app deployments.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.Deployment]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Deployment]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Deployment"],
**kwargs
):
super(DeploymentCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class DeploymentLocations(msrest.serialization.Model):
"""List of available locations (regions or App Service Environments) for
deployment of App Service resources.
:param locations: Available regions.
:type locations: list[~azure.mgmt.web.v2020_06_01.models.GeoRegion]
:param hosting_environments: Available App Service Environments with full descriptions of the
environments.
:type hosting_environments: list[~azure.mgmt.web.v2020_06_01.models.AppServiceEnvironment]
:param hosting_environment_deployment_infos: Available App Service Environments with basic
information.
:type hosting_environment_deployment_infos:
list[~azure.mgmt.web.v2020_06_01.models.HostingEnvironmentDeploymentInfo]
"""
_attribute_map = {
'locations': {'key': 'locations', 'type': '[GeoRegion]'},
'hosting_environments': {'key': 'hostingEnvironments', 'type': '[AppServiceEnvironment]'},
'hosting_environment_deployment_infos': {'key': 'hostingEnvironmentDeploymentInfos', 'type': '[HostingEnvironmentDeploymentInfo]'},
}
def __init__(
self,
*,
locations: Optional[List["GeoRegion"]] = None,
hosting_environments: Optional[List["AppServiceEnvironment"]] = None,
hosting_environment_deployment_infos: Optional[List["HostingEnvironmentDeploymentInfo"]] = None,
**kwargs
):
super(DeploymentLocations, self).__init__(**kwargs)
self.locations = locations
self.hosting_environments = hosting_environments
self.hosting_environment_deployment_infos = hosting_environment_deployment_infos
class DetectorAbnormalTimePeriod(msrest.serialization.Model):
"""Class representing Abnormal Time Period detected.
:param start_time: Start time of the correlated event.
:type start_time: ~datetime.datetime
:param end_time: End time of the correlated event.
:type end_time: ~datetime.datetime
:param message: Message describing the event.
:type message: str
:param source: Represents the name of the Detector.
:type source: str
:param priority: Represents the rank of the Detector.
:type priority: float
:param meta_data: Downtime metadata.
:type meta_data: list[list[~azure.mgmt.web.v2020_06_01.models.NameValuePair]]
:param type: Represents the type of the Detector. Possible values include: "ServiceIncident",
"AppDeployment", "AppCrash", "RuntimeIssueDetected", "AseDeployment", "UserIssue",
"PlatformIssue", "Other".
:type type: str or ~azure.mgmt.web.v2020_06_01.models.IssueType
:param solutions: List of proposed solutions.
:type solutions: list[~azure.mgmt.web.v2020_06_01.models.Solution]
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'message': {'key': 'message', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'float'},
'meta_data': {'key': 'metaData', 'type': '[[NameValuePair]]'},
'type': {'key': 'type', 'type': 'str'},
'solutions': {'key': 'solutions', 'type': '[Solution]'},
}
def __init__(
self,
*,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
message: Optional[str] = None,
source: Optional[str] = None,
priority: Optional[float] = None,
meta_data: Optional[List[List["NameValuePair"]]] = None,
type: Optional[Union[str, "IssueType"]] = None,
solutions: Optional[List["Solution"]] = None,
**kwargs
):
super(DetectorAbnormalTimePeriod, self).__init__(**kwargs)
self.start_time = start_time
self.end_time = end_time
self.message = message
self.source = source
self.priority = priority
self.meta_data = meta_data
self.type = type
self.solutions = solutions
class DetectorDefinition(ProxyOnlyResource):
"""Class representing detector definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar display_name: Display name of the detector.
:vartype display_name: str
:ivar description: Description of the detector.
:vartype description: str
:ivar rank: Detector Rank.
:vartype rank: float
:ivar is_enabled: Flag representing whether detector is enabled or not.
:vartype is_enabled: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'display_name': {'readonly': True},
'description': {'readonly': True},
'rank': {'readonly': True},
'is_enabled': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'rank': {'key': 'properties.rank', 'type': 'float'},
'is_enabled': {'key': 'properties.isEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(DetectorDefinition, self).__init__(kind=kind, **kwargs)
self.display_name = None
self.description = None
self.rank = None
self.is_enabled = None
class DetectorInfo(msrest.serialization.Model):
"""Definition of Detector.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar description: Short description of the detector and its purpose.
:vartype description: str
:ivar category: Support Category.
:vartype category: str
:ivar sub_category: Support Sub Category.
:vartype sub_category: str
:ivar support_topic_id: Support Topic Id.
:vartype support_topic_id: str
"""
_validation = {
'description': {'readonly': True},
'category': {'readonly': True},
'sub_category': {'readonly': True},
'support_topic_id': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'sub_category': {'key': 'subCategory', 'type': 'str'},
'support_topic_id': {'key': 'supportTopicId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DetectorInfo, self).__init__(**kwargs)
self.description = None
self.category = None
self.sub_category = None
self.support_topic_id = None
class DetectorResponse(ProxyOnlyResource):
"""Class representing Response from Detector.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param metadata: metadata for the detector.
:type metadata: ~azure.mgmt.web.v2020_06_01.models.DetectorInfo
:param dataset: Data Set.
:type dataset: list[~azure.mgmt.web.v2020_06_01.models.DiagnosticData]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': 'DetectorInfo'},
'dataset': {'key': 'properties.dataset', 'type': '[DiagnosticData]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
metadata: Optional["DetectorInfo"] = None,
dataset: Optional[List["DiagnosticData"]] = None,
**kwargs
):
super(DetectorResponse, self).__init__(kind=kind, **kwargs)
self.metadata = metadata
self.dataset = dataset
class DetectorResponseCollection(msrest.serialization.Model):
"""Collection of detector responses.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.DetectorResponse]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DetectorResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DetectorResponse"],
**kwargs
):
super(DetectorResponseCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class DiagnosticAnalysis(ProxyOnlyResource):
"""Class representing a diagnostic analysis done on an application.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param start_time: Start time of the period.
:type start_time: ~datetime.datetime
:param end_time: End time of the period.
:type end_time: ~datetime.datetime
:param abnormal_time_periods: List of time periods.
:type abnormal_time_periods: list[~azure.mgmt.web.v2020_06_01.models.AbnormalTimePeriod]
:param payload: Data by each detector.
:type payload: list[~azure.mgmt.web.v2020_06_01.models.AnalysisData]
:param non_correlated_detectors: Data by each detector for detectors that did not corelate.
:type non_correlated_detectors: list[~azure.mgmt.web.v2020_06_01.models.DetectorDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'abnormal_time_periods': {'key': 'properties.abnormalTimePeriods', 'type': '[AbnormalTimePeriod]'},
'payload': {'key': 'properties.payload', 'type': '[AnalysisData]'},
'non_correlated_detectors': {'key': 'properties.nonCorrelatedDetectors', 'type': '[DetectorDefinition]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
abnormal_time_periods: Optional[List["AbnormalTimePeriod"]] = None,
payload: Optional[List["AnalysisData"]] = None,
non_correlated_detectors: Optional[List["DetectorDefinition"]] = None,
**kwargs
):
super(DiagnosticAnalysis, self).__init__(kind=kind, **kwargs)
self.start_time = start_time
self.end_time = end_time
self.abnormal_time_periods = abnormal_time_periods
self.payload = payload
self.non_correlated_detectors = non_correlated_detectors
class DiagnosticAnalysisCollection(msrest.serialization.Model):
"""Collection of Diagnostic Analyses.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.AnalysisDefinition]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AnalysisDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["AnalysisDefinition"],
**kwargs
):
super(DiagnosticAnalysisCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class DiagnosticCategory(ProxyOnlyResource):
"""Class representing detector definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar description: Description of the diagnostic category.
:vartype description: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(DiagnosticCategory, self).__init__(kind=kind, **kwargs)
self.description = None
class DiagnosticCategoryCollection(msrest.serialization.Model):
"""Collection of Diagnostic Categories.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.DiagnosticCategory]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DiagnosticCategory]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DiagnosticCategory"],
**kwargs
):
super(DiagnosticCategoryCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class DiagnosticData(msrest.serialization.Model):
"""Set of data with rendering instructions.
:param table: Data in table form.
:type table: ~azure.mgmt.web.v2020_06_01.models.DataTableResponseObject
:param rendering_properties: Properties that describe how the table should be rendered.
:type rendering_properties: ~azure.mgmt.web.v2020_06_01.models.Rendering
"""
_attribute_map = {
'table': {'key': 'table', 'type': 'DataTableResponseObject'},
'rendering_properties': {'key': 'renderingProperties', 'type': 'Rendering'},
}
def __init__(
self,
*,
table: Optional["DataTableResponseObject"] = None,
rendering_properties: Optional["Rendering"] = None,
**kwargs
):
super(DiagnosticData, self).__init__(**kwargs)
self.table = table
self.rendering_properties = rendering_properties
class DiagnosticDetectorCollection(msrest.serialization.Model):
"""Collection of Diagnostic Detectors.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.DetectorDefinition]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DetectorDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DetectorDefinition"],
**kwargs
):
super(DiagnosticDetectorCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class DiagnosticDetectorResponse(ProxyOnlyResource):
"""Class representing Response from Diagnostic Detectors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param start_time: Start time of the period.
:type start_time: ~datetime.datetime
:param end_time: End time of the period.
:type end_time: ~datetime.datetime
:param issue_detected: Flag representing Issue was detected.
:type issue_detected: bool
:param detector_definition: Detector's definition.
:type detector_definition: ~azure.mgmt.web.v2020_06_01.models.DetectorDefinition
:param metrics: Metrics provided by the detector.
:type metrics: list[~azure.mgmt.web.v2020_06_01.models.DiagnosticMetricSet]
:param abnormal_time_periods: List of Correlated events found by the detector.
:type abnormal_time_periods:
list[~azure.mgmt.web.v2020_06_01.models.DetectorAbnormalTimePeriod]
:param data: Additional Data that detector wants to send.
:type data: list[list[~azure.mgmt.web.v2020_06_01.models.NameValuePair]]
:param response_meta_data: Meta Data.
:type response_meta_data: ~azure.mgmt.web.v2020_06_01.models.ResponseMetaData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'issue_detected': {'key': 'properties.issueDetected', 'type': 'bool'},
'detector_definition': {'key': 'properties.detectorDefinition', 'type': 'DetectorDefinition'},
'metrics': {'key': 'properties.metrics', 'type': '[DiagnosticMetricSet]'},
'abnormal_time_periods': {'key': 'properties.abnormalTimePeriods', 'type': '[DetectorAbnormalTimePeriod]'},
'data': {'key': 'properties.data', 'type': '[[NameValuePair]]'},
'response_meta_data': {'key': 'properties.responseMetaData', 'type': 'ResponseMetaData'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
issue_detected: Optional[bool] = None,
detector_definition: Optional["DetectorDefinition"] = None,
metrics: Optional[List["DiagnosticMetricSet"]] = None,
abnormal_time_periods: Optional[List["DetectorAbnormalTimePeriod"]] = None,
data: Optional[List[List["NameValuePair"]]] = None,
response_meta_data: Optional["ResponseMetaData"] = None,
**kwargs
):
super(DiagnosticDetectorResponse, self).__init__(kind=kind, **kwargs)
self.start_time = start_time
self.end_time = end_time
self.issue_detected = issue_detected
self.detector_definition = detector_definition
self.metrics = metrics
self.abnormal_time_periods = abnormal_time_periods
self.data = data
self.response_meta_data = response_meta_data
class DiagnosticMetricSample(msrest.serialization.Model):
"""Class representing Diagnostic Metric.
:param timestamp: Time at which metric is measured.
:type timestamp: ~datetime.datetime
:param role_instance: Role Instance. Null if this counter is not per instance
This is returned and should be whichever instance name we desire to be returned
i.e. CPU and Memory return RDWORKERNAME (LargeDed..._IN_0)
where RDWORKERNAME is Machine name below and RoleInstance name in parenthesis.
:type role_instance: str
:param total: Total value of the metric. If multiple measurements are made this will have sum
of all.
:type total: float
:param maximum: Maximum of the metric sampled during the time period.
:type maximum: float
:param minimum: Minimum of the metric sampled during the time period.
:type minimum: float
:param is_aggregated: Whether the values are aggregates across all workers or not.
:type is_aggregated: bool
"""
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'role_instance': {'key': 'roleInstance', 'type': 'str'},
'total': {'key': 'total', 'type': 'float'},
'maximum': {'key': 'maximum', 'type': 'float'},
'minimum': {'key': 'minimum', 'type': 'float'},
'is_aggregated': {'key': 'isAggregated', 'type': 'bool'},
}
def __init__(
self,
*,
timestamp: Optional[datetime.datetime] = None,
role_instance: Optional[str] = None,
total: Optional[float] = None,
maximum: Optional[float] = None,
minimum: Optional[float] = None,
is_aggregated: Optional[bool] = None,
**kwargs
):
super(DiagnosticMetricSample, self).__init__(**kwargs)
self.timestamp = timestamp
self.role_instance = role_instance
self.total = total
self.maximum = maximum
self.minimum = minimum
self.is_aggregated = is_aggregated
class DiagnosticMetricSet(msrest.serialization.Model):
"""Class representing Diagnostic Metric information.
:param name: Name of the metric.
:type name: str
:param unit: Metric's unit.
:type unit: str
:param start_time: Start time of the period.
:type start_time: ~datetime.datetime
:param end_time: End time of the period.
:type end_time: ~datetime.datetime
:param time_grain: Presented time grain. Supported grains at the moment are PT1M, PT1H, P1D.
:type time_grain: str
:param values: Collection of metric values for the selected period based on the
{Microsoft.Web.Hosting.Administration.DiagnosticMetricSet.TimeGrain}.
:type values: list[~azure.mgmt.web.v2020_06_01.models.DiagnosticMetricSample]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'values': {'key': 'values', 'type': '[DiagnosticMetricSample]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
unit: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
time_grain: Optional[str] = None,
values: Optional[List["DiagnosticMetricSample"]] = None,
**kwargs
):
super(DiagnosticMetricSet, self).__init__(**kwargs)
self.name = name
self.unit = unit
self.start_time = start_time
self.end_time = end_time
self.time_grain = time_grain
self.values = values
class Dimension(msrest.serialization.Model):
"""Dimension of a resource metric. For e.g. instance specific HTTP requests for a web app,
where instance name is dimension of the metric HTTP request.
:param name:
:type name: str
:param display_name:
:type display_name: str
:param internal_name:
:type internal_name: str
:param to_be_exported_for_shoebox:
:type to_be_exported_for_shoebox: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'internal_name': {'key': 'internalName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
internal_name: Optional[str] = None,
to_be_exported_for_shoebox: Optional[bool] = None,
**kwargs
):
super(Dimension, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.internal_name = internal_name
self.to_be_exported_for_shoebox = to_be_exported_for_shoebox
class Domain(Resource):
"""Information about a domain.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param contact_admin: Administrative contact.
:type contact_admin: ~azure.mgmt.web.v2020_06_01.models.Contact
:param contact_billing: Billing contact.
:type contact_billing: ~azure.mgmt.web.v2020_06_01.models.Contact
:param contact_registrant: Registrant contact.
:type contact_registrant: ~azure.mgmt.web.v2020_06_01.models.Contact
:param contact_tech: Technical contact.
:type contact_tech: ~azure.mgmt.web.v2020_06_01.models.Contact
:ivar registration_status: Domain registration status. Possible values include: "Active",
"Awaiting", "Cancelled", "Confiscated", "Disabled", "Excluded", "Expired", "Failed", "Held",
"Locked", "Parked", "Pending", "Reserved", "Reverted", "Suspended", "Transferred", "Unknown",
"Unlocked", "Unparked", "Updated", "JsonConverterFailed".
:vartype registration_status: str or ~azure.mgmt.web.v2020_06_01.models.DomainStatus
:ivar provisioning_state: Domain provisioning state. Possible values include: "Succeeded",
"Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2020_06_01.models.ProvisioningState
:ivar name_servers: Name servers.
:vartype name_servers: list[str]
:param privacy: :code:`<code>true</code>` if domain privacy is enabled for this domain;
otherwise, :code:`<code>false</code>`.
:type privacy: bool
:ivar created_time: Domain creation timestamp.
:vartype created_time: ~datetime.datetime
:ivar expiration_time: Domain expiration timestamp.
:vartype expiration_time: ~datetime.datetime
:ivar last_renewed_time: Timestamp when the domain was renewed last time.
:vartype last_renewed_time: ~datetime.datetime
:param auto_renew: :code:`<code>true</code>` if the domain should be automatically renewed;
otherwise, :code:`<code>false</code>`.
:type auto_renew: bool
:ivar ready_for_dns_record_management: :code:`<code>true</code>` if Azure can assign this
domain to App Service apps; otherwise, :code:`<code>false</code>`. This value will be
:code:`<code>true</code>` if domain registration status is active and
it is hosted on name servers Azure has programmatic access to.
:vartype ready_for_dns_record_management: bool
:ivar managed_host_names: All hostnames derived from the domain and assigned to Azure
resources.
:vartype managed_host_names: list[~azure.mgmt.web.v2020_06_01.models.HostName]
:param consent: Legal agreement consent.
:type consent: ~azure.mgmt.web.v2020_06_01.models.DomainPurchaseConsent
:ivar domain_not_renewable_reasons: Reasons why domain is not renewable.
:vartype domain_not_renewable_reasons: list[str or
~azure.mgmt.web.v2020_06_01.models.DomainPropertiesDomainNotRenewableReasonsItem]
:param dns_type: Current DNS type. Possible values include: "AzureDns",
"DefaultDomainRegistrarDns".
:type dns_type: str or ~azure.mgmt.web.v2020_06_01.models.DnsType
:param dns_zone_id: Azure DNS Zone to use.
:type dns_zone_id: str
:param target_dns_type: Target DNS type (would be used for migration). Possible values include:
"AzureDns", "DefaultDomainRegistrarDns".
:type target_dns_type: str or ~azure.mgmt.web.v2020_06_01.models.DnsType
:param auth_code:
:type auth_code: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'registration_status': {'readonly': True},
'provisioning_state': {'readonly': True},
'name_servers': {'readonly': True},
'created_time': {'readonly': True},
'expiration_time': {'readonly': True},
'last_renewed_time': {'readonly': True},
'ready_for_dns_record_management': {'readonly': True},
'managed_host_names': {'readonly': True},
'domain_not_renewable_reasons': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'contact_admin': {'key': 'properties.contactAdmin', 'type': 'Contact'},
'contact_billing': {'key': 'properties.contactBilling', 'type': 'Contact'},
'contact_registrant': {'key': 'properties.contactRegistrant', 'type': 'Contact'},
'contact_tech': {'key': 'properties.contactTech', 'type': 'Contact'},
'registration_status': {'key': 'properties.registrationStatus', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name_servers': {'key': 'properties.nameServers', 'type': '[str]'},
'privacy': {'key': 'properties.privacy', 'type': 'bool'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'properties.expirationTime', 'type': 'iso-8601'},
'last_renewed_time': {'key': 'properties.lastRenewedTime', 'type': 'iso-8601'},
'auto_renew': {'key': 'properties.autoRenew', 'type': 'bool'},
'ready_for_dns_record_management': {'key': 'properties.readyForDnsRecordManagement', 'type': 'bool'},
'managed_host_names': {'key': 'properties.managedHostNames', 'type': '[HostName]'},
'consent': {'key': 'properties.consent', 'type': 'DomainPurchaseConsent'},
'domain_not_renewable_reasons': {'key': 'properties.domainNotRenewableReasons', 'type': '[str]'},
'dns_type': {'key': 'properties.dnsType', 'type': 'str'},
'dns_zone_id': {'key': 'properties.dnsZoneId', 'type': 'str'},
'target_dns_type': {'key': 'properties.targetDnsType', 'type': 'str'},
'auth_code': {'key': 'properties.authCode', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
contact_admin: Optional["Contact"] = None,
contact_billing: Optional["Contact"] = None,
contact_registrant: Optional["Contact"] = None,
contact_tech: Optional["Contact"] = None,
privacy: Optional[bool] = None,
auto_renew: Optional[bool] = True,
consent: Optional["DomainPurchaseConsent"] = None,
dns_type: Optional[Union[str, "DnsType"]] = None,
dns_zone_id: Optional[str] = None,
target_dns_type: Optional[Union[str, "DnsType"]] = None,
auth_code: Optional[str] = None,
**kwargs
):
super(Domain, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.contact_admin = contact_admin
self.contact_billing = contact_billing
self.contact_registrant = contact_registrant
self.contact_tech = contact_tech
self.registration_status = None
self.provisioning_state = None
self.name_servers = None
self.privacy = privacy
self.created_time = None
self.expiration_time = None
self.last_renewed_time = None
self.auto_renew = auto_renew
self.ready_for_dns_record_management = None
self.managed_host_names = None
self.consent = consent
self.domain_not_renewable_reasons = None
self.dns_type = dns_type
self.dns_zone_id = dns_zone_id
self.target_dns_type = target_dns_type
self.auth_code = auth_code
class DomainAvailabilityCheckResult(msrest.serialization.Model):
"""Domain availability check result.
:param name: Name of the domain.
:type name: str
:param available: :code:`<code>true</code>` if domain can be purchased using CreateDomain API;
otherwise, :code:`<code>false</code>`.
:type available: bool
:param domain_type: Valid values are Regular domain: Azure will charge the full price of domain
registration, SoftDeleted: Purchasing this domain will simply restore it and this operation
will not cost anything. Possible values include: "Regular", "SoftDeleted".
:type domain_type: str or ~azure.mgmt.web.v2020_06_01.models.DomainType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'available': {'key': 'available', 'type': 'bool'},
'domain_type': {'key': 'domainType', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
available: Optional[bool] = None,
domain_type: Optional[Union[str, "DomainType"]] = None,
**kwargs
):
super(DomainAvailabilityCheckResult, self).__init__(**kwargs)
self.name = name
self.available = available
self.domain_type = domain_type
class DomainCollection(msrest.serialization.Model):
"""Collection of domains.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.Domain]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Domain]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Domain"],
**kwargs
):
super(DomainCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class DomainControlCenterSsoRequest(msrest.serialization.Model):
"""Single sign-on request information for domain management.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar url: URL where the single sign-on request is to be made.
:vartype url: str
:ivar post_parameter_key: Post parameter key.
:vartype post_parameter_key: str
:ivar post_parameter_value: Post parameter value. Client should use
'application/x-www-form-urlencoded' encoding for this value.
:vartype post_parameter_value: str
"""
_validation = {
'url': {'readonly': True},
'post_parameter_key': {'readonly': True},
'post_parameter_value': {'readonly': True},
}
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
'post_parameter_key': {'key': 'postParameterKey', 'type': 'str'},
'post_parameter_value': {'key': 'postParameterValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DomainControlCenterSsoRequest, self).__init__(**kwargs)
self.url = None
self.post_parameter_key = None
self.post_parameter_value = None
class DomainOwnershipIdentifier(ProxyOnlyResource):
"""Domain ownership Identifier.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param ownership_id: Ownership Id.
:type ownership_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'ownership_id': {'key': 'properties.ownershipId', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
ownership_id: Optional[str] = None,
**kwargs
):
super(DomainOwnershipIdentifier, self).__init__(kind=kind, **kwargs)
self.ownership_id = ownership_id
class DomainOwnershipIdentifierCollection(msrest.serialization.Model):
"""Collection of domain ownership identifiers.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.DomainOwnershipIdentifier]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DomainOwnershipIdentifier]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DomainOwnershipIdentifier"],
**kwargs
):
super(DomainOwnershipIdentifierCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class DomainPatchResource(ProxyOnlyResource):
"""ARM resource for a domain.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param contact_admin: Administrative contact.
:type contact_admin: ~azure.mgmt.web.v2020_06_01.models.Contact
:param contact_billing: Billing contact.
:type contact_billing: ~azure.mgmt.web.v2020_06_01.models.Contact
:param contact_registrant: Registrant contact.
:type contact_registrant: ~azure.mgmt.web.v2020_06_01.models.Contact
:param contact_tech: Technical contact.
:type contact_tech: ~azure.mgmt.web.v2020_06_01.models.Contact
:ivar registration_status: Domain registration status. Possible values include: "Active",
"Awaiting", "Cancelled", "Confiscated", "Disabled", "Excluded", "Expired", "Failed", "Held",
"Locked", "Parked", "Pending", "Reserved", "Reverted", "Suspended", "Transferred", "Unknown",
"Unlocked", "Unparked", "Updated", "JsonConverterFailed".
:vartype registration_status: str or ~azure.mgmt.web.v2020_06_01.models.DomainStatus
:ivar provisioning_state: Domain provisioning state. Possible values include: "Succeeded",
"Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2020_06_01.models.ProvisioningState
:ivar name_servers: Name servers.
:vartype name_servers: list[str]
:param privacy: :code:`<code>true</code>` if domain privacy is enabled for this domain;
otherwise, :code:`<code>false</code>`.
:type privacy: bool
:ivar created_time: Domain creation timestamp.
:vartype created_time: ~datetime.datetime
:ivar expiration_time: Domain expiration timestamp.
:vartype expiration_time: ~datetime.datetime
:ivar last_renewed_time: Timestamp when the domain was renewed last time.
:vartype last_renewed_time: ~datetime.datetime
:param auto_renew: :code:`<code>true</code>` if the domain should be automatically renewed;
otherwise, :code:`<code>false</code>`.
:type auto_renew: bool
:ivar ready_for_dns_record_management: :code:`<code>true</code>` if Azure can assign this
domain to App Service apps; otherwise, :code:`<code>false</code>`. This value will be
:code:`<code>true</code>` if domain registration status is active and
it is hosted on name servers Azure has programmatic access to.
:vartype ready_for_dns_record_management: bool
:ivar managed_host_names: All hostnames derived from the domain and assigned to Azure
resources.
:vartype managed_host_names: list[~azure.mgmt.web.v2020_06_01.models.HostName]
:param consent: Legal agreement consent.
:type consent: ~azure.mgmt.web.v2020_06_01.models.DomainPurchaseConsent
:ivar domain_not_renewable_reasons: Reasons why domain is not renewable.
:vartype domain_not_renewable_reasons: list[str or
~azure.mgmt.web.v2020_06_01.models.DomainPatchResourcePropertiesDomainNotRenewableReasonsItem]
:param dns_type: Current DNS type. Possible values include: "AzureDns",
"DefaultDomainRegistrarDns".
:type dns_type: str or ~azure.mgmt.web.v2020_06_01.models.DnsType
:param dns_zone_id: Azure DNS Zone to use.
:type dns_zone_id: str
:param target_dns_type: Target DNS type (would be used for migration). Possible values include:
"AzureDns", "DefaultDomainRegistrarDns".
:type target_dns_type: str or ~azure.mgmt.web.v2020_06_01.models.DnsType
:param auth_code:
:type auth_code: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'registration_status': {'readonly': True},
'provisioning_state': {'readonly': True},
'name_servers': {'readonly': True},
'created_time': {'readonly': True},
'expiration_time': {'readonly': True},
'last_renewed_time': {'readonly': True},
'ready_for_dns_record_management': {'readonly': True},
'managed_host_names': {'readonly': True},
'domain_not_renewable_reasons': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'contact_admin': {'key': 'properties.contactAdmin', 'type': 'Contact'},
'contact_billing': {'key': 'properties.contactBilling', 'type': 'Contact'},
'contact_registrant': {'key': 'properties.contactRegistrant', 'type': 'Contact'},
'contact_tech': {'key': 'properties.contactTech', 'type': 'Contact'},
'registration_status': {'key': 'properties.registrationStatus', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name_servers': {'key': 'properties.nameServers', 'type': '[str]'},
'privacy': {'key': 'properties.privacy', 'type': 'bool'},
'created_time': {'key': 'properties.createdTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'properties.expirationTime', 'type': 'iso-8601'},
'last_renewed_time': {'key': 'properties.lastRenewedTime', 'type': 'iso-8601'},
'auto_renew': {'key': 'properties.autoRenew', 'type': 'bool'},
'ready_for_dns_record_management': {'key': 'properties.readyForDnsRecordManagement', 'type': 'bool'},
'managed_host_names': {'key': 'properties.managedHostNames', 'type': '[HostName]'},
'consent': {'key': 'properties.consent', 'type': 'DomainPurchaseConsent'},
'domain_not_renewable_reasons': {'key': 'properties.domainNotRenewableReasons', 'type': '[str]'},
'dns_type': {'key': 'properties.dnsType', 'type': 'str'},
'dns_zone_id': {'key': 'properties.dnsZoneId', 'type': 'str'},
'target_dns_type': {'key': 'properties.targetDnsType', 'type': 'str'},
'auth_code': {'key': 'properties.authCode', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
contact_admin: Optional["Contact"] = None,
contact_billing: Optional["Contact"] = None,
contact_registrant: Optional["Contact"] = None,
contact_tech: Optional["Contact"] = None,
privacy: Optional[bool] = None,
auto_renew: Optional[bool] = True,
consent: Optional["DomainPurchaseConsent"] = None,
dns_type: Optional[Union[str, "DnsType"]] = None,
dns_zone_id: Optional[str] = None,
target_dns_type: Optional[Union[str, "DnsType"]] = None,
auth_code: Optional[str] = None,
**kwargs
):
super(DomainPatchResource, self).__init__(kind=kind, **kwargs)
self.contact_admin = contact_admin
self.contact_billing = contact_billing
self.contact_registrant = contact_registrant
self.contact_tech = contact_tech
self.registration_status = None
self.provisioning_state = None
self.name_servers = None
self.privacy = privacy
self.created_time = None
self.expiration_time = None
self.last_renewed_time = None
self.auto_renew = auto_renew
self.ready_for_dns_record_management = None
self.managed_host_names = None
self.consent = consent
self.domain_not_renewable_reasons = None
self.dns_type = dns_type
self.dns_zone_id = dns_zone_id
self.target_dns_type = target_dns_type
self.auth_code = auth_code
class DomainPurchaseConsent(msrest.serialization.Model):
"""Domain purchase consent object, representing acceptance of applicable legal agreements.
:param agreement_keys: List of applicable legal agreement keys. This list can be retrieved
using ListLegalAgreements API under :code:`<code>TopLevelDomain</code>` resource.
:type agreement_keys: list[str]
:param agreed_by: Client IP address.
:type agreed_by: str
:param agreed_at: Timestamp when the agreements were accepted.
:type agreed_at: ~datetime.datetime
"""
_attribute_map = {
'agreement_keys': {'key': 'agreementKeys', 'type': '[str]'},
'agreed_by': {'key': 'agreedBy', 'type': 'str'},
'agreed_at': {'key': 'agreedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
agreement_keys: Optional[List[str]] = None,
agreed_by: Optional[str] = None,
agreed_at: Optional[datetime.datetime] = None,
**kwargs
):
super(DomainPurchaseConsent, self).__init__(**kwargs)
self.agreement_keys = agreement_keys
self.agreed_by = agreed_by
self.agreed_at = agreed_at
class DomainRecommendationSearchParameters(msrest.serialization.Model):
"""Domain recommendation search parameters.
:param keywords: Keywords to be used for generating domain recommendations.
:type keywords: str
:param max_domain_recommendations: Maximum number of recommendations.
:type max_domain_recommendations: int
"""
_attribute_map = {
'keywords': {'key': 'keywords', 'type': 'str'},
'max_domain_recommendations': {'key': 'maxDomainRecommendations', 'type': 'int'},
}
def __init__(
self,
*,
keywords: Optional[str] = None,
max_domain_recommendations: Optional[int] = None,
**kwargs
):
super(DomainRecommendationSearchParameters, self).__init__(**kwargs)
self.keywords = keywords
self.max_domain_recommendations = max_domain_recommendations
class EnabledConfig(msrest.serialization.Model):
"""Enabled configuration.
:param enabled: True if configuration is enabled, false if it is disabled and null if
configuration is not set.
:type enabled: bool
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
**kwargs
):
super(EnabledConfig, self).__init__(**kwargs)
self.enabled = enabled
class EndpointDependency(msrest.serialization.Model):
"""A domain name that a service is reached at, including details of the current connection status.
:param domain_name: The domain name of the dependency.
:type domain_name: str
:param endpoint_details: The IP Addresses and Ports used when connecting to DomainName.
:type endpoint_details: list[~azure.mgmt.web.v2020_06_01.models.EndpointDetail]
"""
_attribute_map = {
'domain_name': {'key': 'domainName', 'type': 'str'},
'endpoint_details': {'key': 'endpointDetails', 'type': '[EndpointDetail]'},
}
def __init__(
self,
*,
domain_name: Optional[str] = None,
endpoint_details: Optional[List["EndpointDetail"]] = None,
**kwargs
):
super(EndpointDependency, self).__init__(**kwargs)
self.domain_name = domain_name
self.endpoint_details = endpoint_details
class EndpointDetail(msrest.serialization.Model):
"""Current TCP connectivity information from the App Service Environment to a single endpoint.
:param ip_address: An IP Address that Domain Name currently resolves to.
:type ip_address: str
:param port: The port an endpoint is connected to.
:type port: int
:param latency: The time in milliseconds it takes for a TCP connection to be created from the
App Service Environment to this IpAddress at this Port.
:type latency: float
:param is_accessible: Whether it is possible to create a TCP connection from the App Service
Environment to this IpAddress at this Port.
:type is_accessible: bool
"""
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'latency': {'key': 'latency', 'type': 'float'},
'is_accessible': {'key': 'isAccessible', 'type': 'bool'},
}
def __init__(
self,
*,
ip_address: Optional[str] = None,
port: Optional[int] = None,
latency: Optional[float] = None,
is_accessible: Optional[bool] = None,
**kwargs
):
super(EndpointDetail, self).__init__(**kwargs)
self.ip_address = ip_address
self.port = port
self.latency = latency
self.is_accessible = is_accessible
class ErrorEntity(msrest.serialization.Model):
"""Body of the error response returned from the API.
:param extended_code: Type of error.
:type extended_code: str
:param message_template: Message template.
:type message_template: str
:param parameters: Parameters for the template.
:type parameters: list[str]
:param inner_errors: Inner errors.
:type inner_errors: list[~azure.mgmt.web.v2020_06_01.models.ErrorEntity]
:param code: Basic error code.
:type code: str
:param message: Any details of the error.
:type message: str
"""
_attribute_map = {
'extended_code': {'key': 'extendedCode', 'type': 'str'},
'message_template': {'key': 'messageTemplate', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[str]'},
'inner_errors': {'key': 'innerErrors', 'type': '[ErrorEntity]'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
extended_code: Optional[str] = None,
message_template: Optional[str] = None,
parameters: Optional[List[str]] = None,
inner_errors: Optional[List["ErrorEntity"]] = None,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ErrorEntity, self).__init__(**kwargs)
self.extended_code = extended_code
self.message_template = message_template
self.parameters = parameters
self.inner_errors = inner_errors
self.code = code
self.message = message
class Experiments(msrest.serialization.Model):
"""Routing rules in production experiments.
:param ramp_up_rules: List of ramp-up rules.
:type ramp_up_rules: list[~azure.mgmt.web.v2020_06_01.models.RampUpRule]
"""
_attribute_map = {
'ramp_up_rules': {'key': 'rampUpRules', 'type': '[RampUpRule]'},
}
def __init__(
self,
*,
ramp_up_rules: Optional[List["RampUpRule"]] = None,
**kwargs
):
super(Experiments, self).__init__(**kwargs)
self.ramp_up_rules = ramp_up_rules
class Facebook(ProxyOnlyResource):
"""Facebook.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param enabled:
:type enabled: bool
:param registration:
:type registration: ~azure.mgmt.web.v2020_06_01.models.AppRegistration
:param graph_api_version:
:type graph_api_version: str
:param login:
:type login: ~azure.mgmt.web.v2020_06_01.models.LoginScopes
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'registration': {'key': 'properties.registration', 'type': 'AppRegistration'},
'graph_api_version': {'key': 'properties.graphApiVersion', 'type': 'str'},
'login': {'key': 'properties.login', 'type': 'LoginScopes'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
registration: Optional["AppRegistration"] = None,
graph_api_version: Optional[str] = None,
login: Optional["LoginScopes"] = None,
**kwargs
):
super(Facebook, self).__init__(kind=kind, **kwargs)
self.enabled = enabled
self.registration = registration
self.graph_api_version = graph_api_version
self.login = login
class FileSystemApplicationLogsConfig(msrest.serialization.Model):
"""Application logs to file system configuration.
:param level: Log level. Possible values include: "Off", "Verbose", "Information", "Warning",
"Error".
:type level: str or ~azure.mgmt.web.v2020_06_01.models.LogLevel
"""
_attribute_map = {
'level': {'key': 'level', 'type': 'str'},
}
def __init__(
self,
*,
level: Optional[Union[str, "LogLevel"]] = None,
**kwargs
):
super(FileSystemApplicationLogsConfig, self).__init__(**kwargs)
self.level = level
class FileSystemHttpLogsConfig(msrest.serialization.Model):
"""Http logs to file system configuration.
:param retention_in_mb: Maximum size in megabytes that http log files can use.
When reached old log files will be removed to make space for new ones.
Value can range between 25 and 100.
:type retention_in_mb: int
:param retention_in_days: Retention in days.
Remove files older than X days.
0 or lower means no retention.
:type retention_in_days: int
:param enabled: True if configuration is enabled, false if it is disabled and null if
configuration is not set.
:type enabled: bool
"""
_validation = {
'retention_in_mb': {'maximum': 100, 'minimum': 25},
}
_attribute_map = {
'retention_in_mb': {'key': 'retentionInMb', 'type': 'int'},
'retention_in_days': {'key': 'retentionInDays', 'type': 'int'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
retention_in_mb: Optional[int] = None,
retention_in_days: Optional[int] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(FileSystemHttpLogsConfig, self).__init__(**kwargs)
self.retention_in_mb = retention_in_mb
self.retention_in_days = retention_in_days
self.enabled = enabled
class FileSystemTokenStore(ProxyOnlyResource):
"""FileSystemTokenStore.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param directory:
:type directory: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'directory': {'key': 'properties.directory', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
directory: Optional[str] = None,
**kwargs
):
super(FileSystemTokenStore, self).__init__(kind=kind, **kwargs)
self.directory = directory
class ForwardProxy(ProxyOnlyResource):
"""ForwardProxy.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param convention: Possible values include: "NoProxy", "Standard", "Custom".
:type convention: str or ~azure.mgmt.web.v2020_06_01.models.ForwardProxyConvention
:param custom_host_header_name:
:type custom_host_header_name: str
:param custom_proto_header_name:
:type custom_proto_header_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'convention': {'key': 'properties.convention', 'type': 'str'},
'custom_host_header_name': {'key': 'properties.customHostHeaderName', 'type': 'str'},
'custom_proto_header_name': {'key': 'properties.customProtoHeaderName', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
convention: Optional[Union[str, "ForwardProxyConvention"]] = None,
custom_host_header_name: Optional[str] = None,
custom_proto_header_name: Optional[str] = None,
**kwargs
):
super(ForwardProxy, self).__init__(kind=kind, **kwargs)
self.convention = convention
self.custom_host_header_name = custom_host_header_name
self.custom_proto_header_name = custom_proto_header_name
class FunctionEnvelope(ProxyOnlyResource):
"""Function information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param function_app_id: Function App ID.
:type function_app_id: str
:param script_root_path_href: Script root path URI.
:type script_root_path_href: str
:param script_href: Script URI.
:type script_href: str
:param config_href: Config URI.
:type config_href: str
:param test_data_href: Test data URI.
:type test_data_href: str
:param secrets_file_href: Secrets file URI.
:type secrets_file_href: str
:param href: Function URI.
:type href: str
:param config: Config information.
:type config: any
:param files: File list.
:type files: dict[str, str]
:param test_data: Test data used when testing via the Azure Portal.
:type test_data: str
:param invoke_url_template: The invocation URL.
:type invoke_url_template: str
:param language: The function language.
:type language: str
:param is_disabled: Gets or sets a value indicating whether the function is disabled.
:type is_disabled: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'function_app_id': {'key': 'properties.function_app_id', 'type': 'str'},
'script_root_path_href': {'key': 'properties.script_root_path_href', 'type': 'str'},
'script_href': {'key': 'properties.script_href', 'type': 'str'},
'config_href': {'key': 'properties.config_href', 'type': 'str'},
'test_data_href': {'key': 'properties.test_data_href', 'type': 'str'},
'secrets_file_href': {'key': 'properties.secrets_file_href', 'type': 'str'},
'href': {'key': 'properties.href', 'type': 'str'},
'config': {'key': 'properties.config', 'type': 'object'},
'files': {'key': 'properties.files', 'type': '{str}'},
'test_data': {'key': 'properties.test_data', 'type': 'str'},
'invoke_url_template': {'key': 'properties.invoke_url_template', 'type': 'str'},
'language': {'key': 'properties.language', 'type': 'str'},
'is_disabled': {'key': 'properties.isDisabled', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
function_app_id: Optional[str] = None,
script_root_path_href: Optional[str] = None,
script_href: Optional[str] = None,
config_href: Optional[str] = None,
test_data_href: Optional[str] = None,
secrets_file_href: Optional[str] = None,
href: Optional[str] = None,
config: Optional[Any] = None,
files: Optional[Dict[str, str]] = None,
test_data: Optional[str] = None,
invoke_url_template: Optional[str] = None,
language: Optional[str] = None,
is_disabled: Optional[bool] = None,
**kwargs
):
super(FunctionEnvelope, self).__init__(kind=kind, **kwargs)
self.function_app_id = function_app_id
self.script_root_path_href = script_root_path_href
self.script_href = script_href
self.config_href = config_href
self.test_data_href = test_data_href
self.secrets_file_href = secrets_file_href
self.href = href
self.config = config
self.files = files
self.test_data = test_data
self.invoke_url_template = invoke_url_template
self.language = language
self.is_disabled = is_disabled
class FunctionEnvelopeCollection(msrest.serialization.Model):
"""Collection of Kudu function information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.FunctionEnvelope]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[FunctionEnvelope]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["FunctionEnvelope"],
**kwargs
):
super(FunctionEnvelopeCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class FunctionSecrets(msrest.serialization.Model):
"""Function secrets.
:param key: Secret key.
:type key: str
:param trigger_url: Trigger URL.
:type trigger_url: str
"""
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'trigger_url': {'key': 'trigger_url', 'type': 'str'},
}
def __init__(
self,
*,
key: Optional[str] = None,
trigger_url: Optional[str] = None,
**kwargs
):
super(FunctionSecrets, self).__init__(**kwargs)
self.key = key
self.trigger_url = trigger_url
class GeoRegion(ProxyOnlyResource):
"""Geographical region.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar description: Region description.
:vartype description: str
:ivar display_name: Display name for region.
:vartype display_name: str
:ivar org_domain: Display name for region.
:vartype org_domain: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'description': {'readonly': True},
'display_name': {'readonly': True},
'org_domain': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'org_domain': {'key': 'properties.orgDomain', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(GeoRegion, self).__init__(kind=kind, **kwargs)
self.description = None
self.display_name = None
self.org_domain = None
class GeoRegionCollection(msrest.serialization.Model):
"""Collection of geographical regions.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.GeoRegion]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[GeoRegion]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["GeoRegion"],
**kwargs
):
super(GeoRegionCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class GitHub(ProxyOnlyResource):
"""GitHub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param enabled:
:type enabled: bool
:param registration:
:type registration: ~azure.mgmt.web.v2020_06_01.models.ClientRegistration
:param login:
:type login: ~azure.mgmt.web.v2020_06_01.models.LoginScopes
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'registration': {'key': 'properties.registration', 'type': 'ClientRegistration'},
'login': {'key': 'properties.login', 'type': 'LoginScopes'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
registration: Optional["ClientRegistration"] = None,
login: Optional["LoginScopes"] = None,
**kwargs
):
super(GitHub, self).__init__(kind=kind, **kwargs)
self.enabled = enabled
self.registration = registration
self.login = login
class GlobalCsmSkuDescription(msrest.serialization.Model):
"""A Global SKU Description.
:param name: Name of the resource SKU.
:type name: str
:param tier: Service Tier of the resource SKU.
:type tier: str
:param size: Size specifier of the resource SKU.
:type size: str
:param family: Family code of the resource SKU.
:type family: str
:param capacity: Min, max, and default scale values of the SKU.
:type capacity: ~azure.mgmt.web.v2020_06_01.models.SkuCapacity
:param locations: Locations of the SKU.
:type locations: list[str]
:param capabilities: Capabilities of the SKU, e.g., is traffic manager enabled?.
:type capabilities: list[~azure.mgmt.web.v2020_06_01.models.Capability]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'SkuCapacity'},
'locations': {'key': 'locations', 'type': '[str]'},
'capabilities': {'key': 'capabilities', 'type': '[Capability]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
tier: Optional[str] = None,
size: Optional[str] = None,
family: Optional[str] = None,
capacity: Optional["SkuCapacity"] = None,
locations: Optional[List[str]] = None,
capabilities: Optional[List["Capability"]] = None,
**kwargs
):
super(GlobalCsmSkuDescription, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.size = size
self.family = family
self.capacity = capacity
self.locations = locations
self.capabilities = capabilities
class GlobalValidation(ProxyOnlyResource):
"""GlobalValidation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param require_authentication:
:type require_authentication: bool
:param unauthenticated_client_action: Possible values include: "RedirectToLoginPage",
"AllowAnonymous", "Return401", "Return403".
:type unauthenticated_client_action: str or
~azure.mgmt.web.v2020_06_01.models.UnauthenticatedClientActionV2
:param redirect_to_provider:
:type redirect_to_provider: str
:param excluded_paths:
:type excluded_paths: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'require_authentication': {'key': 'properties.requireAuthentication', 'type': 'bool'},
'unauthenticated_client_action': {'key': 'properties.unauthenticatedClientAction', 'type': 'str'},
'redirect_to_provider': {'key': 'properties.redirectToProvider', 'type': 'str'},
'excluded_paths': {'key': 'properties.excludedPaths', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
require_authentication: Optional[bool] = None,
unauthenticated_client_action: Optional[Union[str, "UnauthenticatedClientActionV2"]] = None,
redirect_to_provider: Optional[str] = None,
excluded_paths: Optional[List[str]] = None,
**kwargs
):
super(GlobalValidation, self).__init__(kind=kind, **kwargs)
self.require_authentication = require_authentication
self.unauthenticated_client_action = unauthenticated_client_action
self.redirect_to_provider = redirect_to_provider
self.excluded_paths = excluded_paths
class Google(ProxyOnlyResource):
"""Google.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param enabled:
:type enabled: bool
:param registration:
:type registration: ~azure.mgmt.web.v2020_06_01.models.ClientRegistration
:param login:
:type login: ~azure.mgmt.web.v2020_06_01.models.LoginScopes
:param validation:
:type validation: ~azure.mgmt.web.v2020_06_01.models.AllowedAudiencesValidation
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'registration': {'key': 'properties.registration', 'type': 'ClientRegistration'},
'login': {'key': 'properties.login', 'type': 'LoginScopes'},
'validation': {'key': 'properties.validation', 'type': 'AllowedAudiencesValidation'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
registration: Optional["ClientRegistration"] = None,
login: Optional["LoginScopes"] = None,
validation: Optional["AllowedAudiencesValidation"] = None,
**kwargs
):
super(Google, self).__init__(kind=kind, **kwargs)
self.enabled = enabled
self.registration = registration
self.login = login
self.validation = validation
class HandlerMapping(msrest.serialization.Model):
"""The IIS handler mappings used to define which handler processes HTTP requests with certain extension.
For example, it is used to configure php-cgi.exe process to handle all HTTP requests with *.php extension.
:param extension: Requests with this extension will be handled using the specified FastCGI
application.
:type extension: str
:param script_processor: The absolute path to the FastCGI application.
:type script_processor: str
:param arguments: Command-line arguments to be passed to the script processor.
:type arguments: str
"""
_attribute_map = {
'extension': {'key': 'extension', 'type': 'str'},
'script_processor': {'key': 'scriptProcessor', 'type': 'str'},
'arguments': {'key': 'arguments', 'type': 'str'},
}
def __init__(
self,
*,
extension: Optional[str] = None,
script_processor: Optional[str] = None,
arguments: Optional[str] = None,
**kwargs
):
super(HandlerMapping, self).__init__(**kwargs)
self.extension = extension
self.script_processor = script_processor
self.arguments = arguments
class HostingEnvironmentDeploymentInfo(msrest.serialization.Model):
"""Information needed to create resources on an App Service Environment.
:param name: Name of the App Service Environment.
:type name: str
:param location: Location of the App Service Environment.
:type location: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
location: Optional[str] = None,
**kwargs
):
super(HostingEnvironmentDeploymentInfo, self).__init__(**kwargs)
self.name = name
self.location = location
class HostingEnvironmentDiagnostics(msrest.serialization.Model):
"""Diagnostics for an App Service Environment.
:param name: Name/identifier of the diagnostics.
:type name: str
:param diagnostics_output: Diagnostics output.
:type diagnostics_output: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'diagnostics_output': {'key': 'diagnosticsOutput', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
diagnostics_output: Optional[str] = None,
**kwargs
):
super(HostingEnvironmentDiagnostics, self).__init__(**kwargs)
self.name = name
self.diagnostics_output = diagnostics_output
class HostingEnvironmentProfile(msrest.serialization.Model):
"""Specification for an App Service Environment to use for this resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID of the App Service Environment.
:type id: str
:ivar name: Name of the App Service Environment.
:vartype name: str
:ivar type: Resource type of the App Service Environment.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(HostingEnvironmentProfile, self).__init__(**kwargs)
self.id = id
self.name = None
self.type = None
class HostKeys(msrest.serialization.Model):
"""Functions host level keys.
:param master_key: Secret key.
:type master_key: str
:param function_keys: Host level function keys.
:type function_keys: dict[str, str]
:param system_keys: System keys.
:type system_keys: dict[str, str]
"""
_attribute_map = {
'master_key': {'key': 'masterKey', 'type': 'str'},
'function_keys': {'key': 'functionKeys', 'type': '{str}'},
'system_keys': {'key': 'systemKeys', 'type': '{str}'},
}
def __init__(
self,
*,
master_key: Optional[str] = None,
function_keys: Optional[Dict[str, str]] = None,
system_keys: Optional[Dict[str, str]] = None,
**kwargs
):
super(HostKeys, self).__init__(**kwargs)
self.master_key = master_key
self.function_keys = function_keys
self.system_keys = system_keys
class HostName(msrest.serialization.Model):
"""Details of a hostname derived from a domain.
:param name: Name of the hostname.
:type name: str
:param site_names: List of apps the hostname is assigned to. This list will have more than one
app only if the hostname is pointing to a Traffic Manager.
:type site_names: list[str]
:param azure_resource_name: Name of the Azure resource the hostname is assigned to. If it is
assigned to a Traffic Manager then it will be the Traffic Manager name otherwise it will be the
app name.
:type azure_resource_name: str
:param azure_resource_type: Type of the Azure resource the hostname is assigned to. Possible
values include: "Website", "TrafficManager".
:type azure_resource_type: str or ~azure.mgmt.web.v2020_06_01.models.AzureResourceType
:param custom_host_name_dns_record_type: Type of the DNS record. Possible values include:
"CName", "A".
:type custom_host_name_dns_record_type: str or
~azure.mgmt.web.v2020_06_01.models.CustomHostNameDnsRecordType
:param host_name_type: Type of the hostname. Possible values include: "Verified", "Managed".
:type host_name_type: str or ~azure.mgmt.web.v2020_06_01.models.HostNameType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'site_names': {'key': 'siteNames', 'type': '[str]'},
'azure_resource_name': {'key': 'azureResourceName', 'type': 'str'},
'azure_resource_type': {'key': 'azureResourceType', 'type': 'str'},
'custom_host_name_dns_record_type': {'key': 'customHostNameDnsRecordType', 'type': 'str'},
'host_name_type': {'key': 'hostNameType', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
site_names: Optional[List[str]] = None,
azure_resource_name: Optional[str] = None,
azure_resource_type: Optional[Union[str, "AzureResourceType"]] = None,
custom_host_name_dns_record_type: Optional[Union[str, "CustomHostNameDnsRecordType"]] = None,
host_name_type: Optional[Union[str, "HostNameType"]] = None,
**kwargs
):
super(HostName, self).__init__(**kwargs)
self.name = name
self.site_names = site_names
self.azure_resource_name = azure_resource_name
self.azure_resource_type = azure_resource_type
self.custom_host_name_dns_record_type = custom_host_name_dns_record_type
self.host_name_type = host_name_type
class HostNameBinding(ProxyOnlyResource):
"""A hostname binding object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param site_name: App Service app name.
:type site_name: str
:param domain_id: Fully qualified ARM domain resource URI.
:type domain_id: str
:param azure_resource_name: Azure resource name.
:type azure_resource_name: str
:param azure_resource_type: Azure resource type. Possible values include: "Website",
"TrafficManager".
:type azure_resource_type: str or ~azure.mgmt.web.v2020_06_01.models.AzureResourceType
:param custom_host_name_dns_record_type: Custom DNS record type. Possible values include:
"CName", "A".
:type custom_host_name_dns_record_type: str or
~azure.mgmt.web.v2020_06_01.models.CustomHostNameDnsRecordType
:param host_name_type: Hostname type. Possible values include: "Verified", "Managed".
:type host_name_type: str or ~azure.mgmt.web.v2020_06_01.models.HostNameType
:param ssl_state: SSL type. Possible values include: "Disabled", "SniEnabled",
"IpBasedEnabled".
:type ssl_state: str or ~azure.mgmt.web.v2020_06_01.models.SslState
:param thumbprint: SSL certificate thumbprint.
:type thumbprint: str
:ivar virtual_ip: Virtual IP address assigned to the hostname if IP based SSL is enabled.
:vartype virtual_ip: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_ip': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'site_name': {'key': 'properties.siteName', 'type': 'str'},
'domain_id': {'key': 'properties.domainId', 'type': 'str'},
'azure_resource_name': {'key': 'properties.azureResourceName', 'type': 'str'},
'azure_resource_type': {'key': 'properties.azureResourceType', 'type': 'str'},
'custom_host_name_dns_record_type': {'key': 'properties.customHostNameDnsRecordType', 'type': 'str'},
'host_name_type': {'key': 'properties.hostNameType', 'type': 'str'},
'ssl_state': {'key': 'properties.sslState', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
'virtual_ip': {'key': 'properties.virtualIP', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
site_name: Optional[str] = None,
domain_id: Optional[str] = None,
azure_resource_name: Optional[str] = None,
azure_resource_type: Optional[Union[str, "AzureResourceType"]] = None,
custom_host_name_dns_record_type: Optional[Union[str, "CustomHostNameDnsRecordType"]] = None,
host_name_type: Optional[Union[str, "HostNameType"]] = None,
ssl_state: Optional[Union[str, "SslState"]] = None,
thumbprint: Optional[str] = None,
**kwargs
):
super(HostNameBinding, self).__init__(kind=kind, **kwargs)
self.site_name = site_name
self.domain_id = domain_id
self.azure_resource_name = azure_resource_name
self.azure_resource_type = azure_resource_type
self.custom_host_name_dns_record_type = custom_host_name_dns_record_type
self.host_name_type = host_name_type
self.ssl_state = ssl_state
self.thumbprint = thumbprint
self.virtual_ip = None
class HostNameBindingCollection(msrest.serialization.Model):
"""Collection of hostname bindings.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.HostNameBinding]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[HostNameBinding]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["HostNameBinding"],
**kwargs
):
super(HostNameBindingCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class HostNameSslState(msrest.serialization.Model):
"""SSL-enabled hostname.
:param name: Hostname.
:type name: str
:param ssl_state: SSL type. Possible values include: "Disabled", "SniEnabled",
"IpBasedEnabled".
:type ssl_state: str or ~azure.mgmt.web.v2020_06_01.models.SslState
:param virtual_ip: Virtual IP address assigned to the hostname if IP based SSL is enabled.
:type virtual_ip: str
:param thumbprint: SSL certificate thumbprint.
:type thumbprint: str
:param to_update: Set to :code:`<code>true</code>` to update existing hostname.
:type to_update: bool
:param host_type: Indicates whether the hostname is a standard or repository hostname. Possible
values include: "Standard", "Repository".
:type host_type: str or ~azure.mgmt.web.v2020_06_01.models.HostType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'ssl_state': {'key': 'sslState', 'type': 'str'},
'virtual_ip': {'key': 'virtualIP', 'type': 'str'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'to_update': {'key': 'toUpdate', 'type': 'bool'},
'host_type': {'key': 'hostType', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
ssl_state: Optional[Union[str, "SslState"]] = None,
virtual_ip: Optional[str] = None,
thumbprint: Optional[str] = None,
to_update: Optional[bool] = None,
host_type: Optional[Union[str, "HostType"]] = None,
**kwargs
):
super(HostNameSslState, self).__init__(**kwargs)
self.name = name
self.ssl_state = ssl_state
self.virtual_ip = virtual_ip
self.thumbprint = thumbprint
self.to_update = to_update
self.host_type = host_type
class HttpLogsConfig(msrest.serialization.Model):
"""Http logs configuration.
:param file_system: Http logs to file system configuration.
:type file_system: ~azure.mgmt.web.v2020_06_01.models.FileSystemHttpLogsConfig
:param azure_blob_storage: Http logs to azure blob storage configuration.
:type azure_blob_storage: ~azure.mgmt.web.v2020_06_01.models.AzureBlobStorageHttpLogsConfig
"""
_attribute_map = {
'file_system': {'key': 'fileSystem', 'type': 'FileSystemHttpLogsConfig'},
'azure_blob_storage': {'key': 'azureBlobStorage', 'type': 'AzureBlobStorageHttpLogsConfig'},
}
def __init__(
self,
*,
file_system: Optional["FileSystemHttpLogsConfig"] = None,
azure_blob_storage: Optional["AzureBlobStorageHttpLogsConfig"] = None,
**kwargs
):
super(HttpLogsConfig, self).__init__(**kwargs)
self.file_system = file_system
self.azure_blob_storage = azure_blob_storage
class HttpSettings(ProxyOnlyResource):
"""HttpSettings.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param require_https:
:type require_https: bool
:param routes:
:type routes: ~azure.mgmt.web.v2020_06_01.models.HttpSettingsRoutes
:param forward_proxy:
:type forward_proxy: ~azure.mgmt.web.v2020_06_01.models.ForwardProxy
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'require_https': {'key': 'properties.requireHttps', 'type': 'bool'},
'routes': {'key': 'properties.routes', 'type': 'HttpSettingsRoutes'},
'forward_proxy': {'key': 'properties.forwardProxy', 'type': 'ForwardProxy'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
require_https: Optional[bool] = None,
routes: Optional["HttpSettingsRoutes"] = None,
forward_proxy: Optional["ForwardProxy"] = None,
**kwargs
):
super(HttpSettings, self).__init__(kind=kind, **kwargs)
self.require_https = require_https
self.routes = routes
self.forward_proxy = forward_proxy
class HttpSettingsRoutes(ProxyOnlyResource):
"""HttpSettingsRoutes.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param api_prefix:
:type api_prefix: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'api_prefix': {'key': 'properties.apiPrefix', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
api_prefix: Optional[str] = None,
**kwargs
):
super(HttpSettingsRoutes, self).__init__(kind=kind, **kwargs)
self.api_prefix = api_prefix
class HybridConnection(ProxyOnlyResource):
"""Hybrid Connection contract. This is used to configure a Hybrid Connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param service_bus_namespace: The name of the Service Bus namespace.
:type service_bus_namespace: str
:param relay_name: The name of the Service Bus relay.
:type relay_name: str
:param relay_arm_uri: The ARM URI to the Service Bus relay.
:type relay_arm_uri: str
:param hostname: The hostname of the endpoint.
:type hostname: str
:param port: The port of the endpoint.
:type port: int
:param send_key_name: The name of the Service Bus key which has Send permissions. This is used
to authenticate to Service Bus.
:type send_key_name: str
:param send_key_value: The value of the Service Bus key. This is used to authenticate to
Service Bus. In ARM this key will not be returned
normally, use the POST /listKeys API instead.
:type send_key_value: str
:param service_bus_suffix: The suffix for the service bus endpoint. By default this is
.servicebus.windows.net.
:type service_bus_suffix: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'service_bus_namespace': {'key': 'properties.serviceBusNamespace', 'type': 'str'},
'relay_name': {'key': 'properties.relayName', 'type': 'str'},
'relay_arm_uri': {'key': 'properties.relayArmUri', 'type': 'str'},
'hostname': {'key': 'properties.hostname', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'send_key_name': {'key': 'properties.sendKeyName', 'type': 'str'},
'send_key_value': {'key': 'properties.sendKeyValue', 'type': 'str'},
'service_bus_suffix': {'key': 'properties.serviceBusSuffix', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
service_bus_namespace: Optional[str] = None,
relay_name: Optional[str] = None,
relay_arm_uri: Optional[str] = None,
hostname: Optional[str] = None,
port: Optional[int] = None,
send_key_name: Optional[str] = None,
send_key_value: Optional[str] = None,
service_bus_suffix: Optional[str] = None,
**kwargs
):
super(HybridConnection, self).__init__(kind=kind, **kwargs)
self.service_bus_namespace = service_bus_namespace
self.relay_name = relay_name
self.relay_arm_uri = relay_arm_uri
self.hostname = hostname
self.port = port
self.send_key_name = send_key_name
self.send_key_value = send_key_value
self.service_bus_suffix = service_bus_suffix
class HybridConnectionCollection(msrest.serialization.Model):
"""Collection of hostname bindings.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.HybridConnection]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[HybridConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["HybridConnection"],
**kwargs
):
super(HybridConnectionCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class HybridConnectionKey(ProxyOnlyResource):
"""Hybrid Connection key contract. This has the send key name and value for a Hybrid Connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar send_key_name: The name of the send key.
:vartype send_key_name: str
:ivar send_key_value: The value of the send key.
:vartype send_key_value: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'send_key_name': {'readonly': True},
'send_key_value': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'send_key_name': {'key': 'properties.sendKeyName', 'type': 'str'},
'send_key_value': {'key': 'properties.sendKeyValue', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(HybridConnectionKey, self).__init__(kind=kind, **kwargs)
self.send_key_name = None
self.send_key_value = None
class HybridConnectionLimits(ProxyOnlyResource):
"""Hybrid Connection limits contract. This is used to return the plan limits of Hybrid Connections.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar current: The current number of Hybrid Connections.
:vartype current: int
:ivar maximum: The maximum number of Hybrid Connections allowed.
:vartype maximum: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'current': {'readonly': True},
'maximum': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'current': {'key': 'properties.current', 'type': 'int'},
'maximum': {'key': 'properties.maximum', 'type': 'int'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(HybridConnectionLimits, self).__init__(kind=kind, **kwargs)
self.current = None
self.maximum = None
class Identifier(ProxyOnlyResource):
"""A domain specific resource identifier.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param value: String representation of the identity.
:type value: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'properties.id', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(Identifier, self).__init__(kind=kind, **kwargs)
self.value = value
class IdentifierCollection(msrest.serialization.Model):
"""Collection of identifiers.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.Identifier]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Identifier]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Identifier"],
**kwargs
):
super(IdentifierCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class IdentityProviders(ProxyOnlyResource):
"""IdentityProviders.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param azure_active_directory:
:type azure_active_directory: ~azure.mgmt.web.v2020_06_01.models.AzureActiveDirectory
:param facebook:
:type facebook: ~azure.mgmt.web.v2020_06_01.models.Facebook
:param git_hub:
:type git_hub: ~azure.mgmt.web.v2020_06_01.models.GitHub
:param google:
:type google: ~azure.mgmt.web.v2020_06_01.models.Google
:param twitter:
:type twitter: ~azure.mgmt.web.v2020_06_01.models.Twitter
:param custom_open_id_connect_providers: Dictionary of :code:`<CustomOpenIdConnectProvider>`.
:type custom_open_id_connect_providers: dict[str,
~azure.mgmt.web.v2020_06_01.models.CustomOpenIdConnectProvider]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'azure_active_directory': {'key': 'properties.azureActiveDirectory', 'type': 'AzureActiveDirectory'},
'facebook': {'key': 'properties.facebook', 'type': 'Facebook'},
'git_hub': {'key': 'properties.gitHub', 'type': 'GitHub'},
'google': {'key': 'properties.google', 'type': 'Google'},
'twitter': {'key': 'properties.twitter', 'type': 'Twitter'},
'custom_open_id_connect_providers': {'key': 'properties.customOpenIdConnectProviders', 'type': '{CustomOpenIdConnectProvider}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
azure_active_directory: Optional["AzureActiveDirectory"] = None,
facebook: Optional["Facebook"] = None,
git_hub: Optional["GitHub"] = None,
google: Optional["Google"] = None,
twitter: Optional["Twitter"] = None,
custom_open_id_connect_providers: Optional[Dict[str, "CustomOpenIdConnectProvider"]] = None,
**kwargs
):
super(IdentityProviders, self).__init__(kind=kind, **kwargs)
self.azure_active_directory = azure_active_directory
self.facebook = facebook
self.git_hub = git_hub
self.google = google
self.twitter = twitter
self.custom_open_id_connect_providers = custom_open_id_connect_providers
class InboundEnvironmentEndpoint(msrest.serialization.Model):
"""The IP Addresses and Ports that require inbound network access to and within the subnet of the App Service Environment.
:param description: Short text describing the purpose of the network traffic.
:type description: str
:param endpoints: The IP addresses that network traffic will originate from in cidr notation.
:type endpoints: list[str]
:param ports: The ports that network traffic will arrive to the App Service Environment at.
:type ports: list[str]
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'endpoints': {'key': 'endpoints', 'type': '[str]'},
'ports': {'key': 'ports', 'type': '[str]'},
}
def __init__(
self,
*,
description: Optional[str] = None,
endpoints: Optional[List[str]] = None,
ports: Optional[List[str]] = None,
**kwargs
):
super(InboundEnvironmentEndpoint, self).__init__(**kwargs)
self.description = description
self.endpoints = endpoints
self.ports = ports
class InboundEnvironmentEndpointCollection(msrest.serialization.Model):
"""Collection of Inbound Environment Endpoints.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.InboundEnvironmentEndpoint]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[InboundEnvironmentEndpoint]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["InboundEnvironmentEndpoint"],
**kwargs
):
super(InboundEnvironmentEndpointCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class IpSecurityRestriction(msrest.serialization.Model):
"""IP security restriction on an app.
:param ip_address: IP address the security restriction is valid for.
It can be in form of pure ipv4 address (required SubnetMask property) or
CIDR notation such as ipv4/mask (leading bit match). For CIDR,
SubnetMask property must not be specified.
:type ip_address: str
:param subnet_mask: Subnet mask for the range of IP addresses the restriction is valid for.
:type subnet_mask: str
:param vnet_subnet_resource_id: Virtual network resource id.
:type vnet_subnet_resource_id: str
:param vnet_traffic_tag: (internal) Vnet traffic tag.
:type vnet_traffic_tag: int
:param subnet_traffic_tag: (internal) Subnet traffic tag.
:type subnet_traffic_tag: int
:param action: Allow or Deny access for this IP range.
:type action: str
:param tag: Defines what this IP filter will be used for. This is to support IP filtering on
proxies. Possible values include: "Default", "XffProxy", "ServiceTag".
:type tag: str or ~azure.mgmt.web.v2020_06_01.models.IpFilterTag
:param priority: Priority of IP restriction rule.
:type priority: int
:param name: IP restriction rule name.
:type name: str
:param description: IP restriction rule description.
:type description: str
:param headers: IP restriction rule headers.
X-Forwarded-Host
(https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-Host#Examples).
The matching logic is ..
* If the property is null or empty (default), all hosts(or lack of) are allowed.
* A value is compared using ordinal-ignore-case (excluding port number).
* Subdomain wildcards are permitted but don't match the root domain. For example,
*.contoso.com matches the subdomain foo.contoso.com
but not the root domain contoso.com or multi-level foo.bar.contoso.com
* Unicode host names are allowed but are converted to Punycode for matching.
X-Forwarded-For
(https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For#Examples).
The matching logic is ..
* If the property is null or empty (default), any forwarded-for chains (or lack of) are
allowed.
* If any address (excluding port number) in the chain (comma separated) matches the CIDR
defined by the property.
X-Azure-FDID and X-FD-HealthProbe.
The matching logic is exact match.
:type headers: dict[str, list[str]]
"""
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'subnet_mask': {'key': 'subnetMask', 'type': 'str'},
'vnet_subnet_resource_id': {'key': 'vnetSubnetResourceId', 'type': 'str'},
'vnet_traffic_tag': {'key': 'vnetTrafficTag', 'type': 'int'},
'subnet_traffic_tag': {'key': 'subnetTrafficTag', 'type': 'int'},
'action': {'key': 'action', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'headers': {'key': 'headers', 'type': '{[str]}'},
}
def __init__(
self,
*,
ip_address: Optional[str] = None,
subnet_mask: Optional[str] = None,
vnet_subnet_resource_id: Optional[str] = None,
vnet_traffic_tag: Optional[int] = None,
subnet_traffic_tag: Optional[int] = None,
action: Optional[str] = None,
tag: Optional[Union[str, "IpFilterTag"]] = None,
priority: Optional[int] = None,
name: Optional[str] = None,
description: Optional[str] = None,
headers: Optional[Dict[str, List[str]]] = None,
**kwargs
):
super(IpSecurityRestriction, self).__init__(**kwargs)
self.ip_address = ip_address
self.subnet_mask = subnet_mask
self.vnet_subnet_resource_id = vnet_subnet_resource_id
self.vnet_traffic_tag = vnet_traffic_tag
self.subnet_traffic_tag = subnet_traffic_tag
self.action = action
self.tag = tag
self.priority = priority
self.name = name
self.description = description
self.headers = headers
class JwtClaimChecks(ProxyOnlyResource):
"""JwtClaimChecks.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param allowed_groups:
:type allowed_groups: list[str]
:param allowed_client_applications:
:type allowed_client_applications: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'allowed_groups': {'key': 'properties.allowedGroups', 'type': '[str]'},
'allowed_client_applications': {'key': 'properties.allowedClientApplications', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
allowed_groups: Optional[List[str]] = None,
allowed_client_applications: Optional[List[str]] = None,
**kwargs
):
super(JwtClaimChecks, self).__init__(kind=kind, **kwargs)
self.allowed_groups = allowed_groups
self.allowed_client_applications = allowed_client_applications
class KeyInfo(msrest.serialization.Model):
"""Function key info.
:param name: Key name.
:type name: str
:param value: Key value.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(KeyInfo, self).__init__(**kwargs)
self.name = name
self.value = value
class LocalizableString(msrest.serialization.Model):
"""Localizable string object containing the name and a localized value.
:param value: Non-localized name.
:type value: str
:param localized_value: Localized name.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
localized_value: Optional[str] = None,
**kwargs
):
super(LocalizableString, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class Login(ProxyOnlyResource):
"""Login.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param routes:
:type routes: ~azure.mgmt.web.v2020_06_01.models.LoginRoutes
:param token_store:
:type token_store: ~azure.mgmt.web.v2020_06_01.models.TokenStore
:param preserve_url_fragments_for_logins:
:type preserve_url_fragments_for_logins: bool
:param allowed_external_redirect_urls:
:type allowed_external_redirect_urls: list[str]
:param cookie_expiration:
:type cookie_expiration: ~azure.mgmt.web.v2020_06_01.models.CookieExpiration
:param nonce:
:type nonce: ~azure.mgmt.web.v2020_06_01.models.Nonce
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'routes': {'key': 'properties.routes', 'type': 'LoginRoutes'},
'token_store': {'key': 'properties.tokenStore', 'type': 'TokenStore'},
'preserve_url_fragments_for_logins': {'key': 'properties.preserveUrlFragmentsForLogins', 'type': 'bool'},
'allowed_external_redirect_urls': {'key': 'properties.allowedExternalRedirectUrls', 'type': '[str]'},
'cookie_expiration': {'key': 'properties.cookieExpiration', 'type': 'CookieExpiration'},
'nonce': {'key': 'properties.nonce', 'type': 'Nonce'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
routes: Optional["LoginRoutes"] = None,
token_store: Optional["TokenStore"] = None,
preserve_url_fragments_for_logins: Optional[bool] = None,
allowed_external_redirect_urls: Optional[List[str]] = None,
cookie_expiration: Optional["CookieExpiration"] = None,
nonce: Optional["Nonce"] = None,
**kwargs
):
super(Login, self).__init__(kind=kind, **kwargs)
self.routes = routes
self.token_store = token_store
self.preserve_url_fragments_for_logins = preserve_url_fragments_for_logins
self.allowed_external_redirect_urls = allowed_external_redirect_urls
self.cookie_expiration = cookie_expiration
self.nonce = nonce
class LoginRoutes(ProxyOnlyResource):
"""LoginRoutes.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param logout_endpoint:
:type logout_endpoint: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'logout_endpoint': {'key': 'properties.logoutEndpoint', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
logout_endpoint: Optional[str] = None,
**kwargs
):
super(LoginRoutes, self).__init__(kind=kind, **kwargs)
self.logout_endpoint = logout_endpoint
class LoginScopes(ProxyOnlyResource):
"""LoginScopes.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param scopes:
:type scopes: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'scopes': {'key': 'properties.scopes', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
scopes: Optional[List[str]] = None,
**kwargs
):
super(LoginScopes, self).__init__(kind=kind, **kwargs)
self.scopes = scopes
class LogSpecification(msrest.serialization.Model):
"""Log Definition of a single resource metric.
:param name:
:type name: str
:param display_name:
:type display_name: str
:param blob_duration:
:type blob_duration: str
:param log_filter_pattern:
:type log_filter_pattern: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
'log_filter_pattern': {'key': 'logFilterPattern', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
blob_duration: Optional[str] = None,
log_filter_pattern: Optional[str] = None,
**kwargs
):
super(LogSpecification, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.blob_duration = blob_duration
self.log_filter_pattern = log_filter_pattern
class ManagedServiceIdentity(msrest.serialization.Model):
"""Managed service identity.
Variables are only populated by the server, and will be ignored when sending a request.
:param type: Type of managed service identity. Possible values include: "SystemAssigned",
"UserAssigned", "SystemAssigned, UserAssigned", "None".
:type type: str or ~azure.mgmt.web.v2020_06_01.models.ManagedServiceIdentityType
:ivar tenant_id: Tenant of managed service identity.
:vartype tenant_id: str
:ivar principal_id: Principal Id of managed service identity.
:vartype principal_id: str
:param user_assigned_identities: The list of user assigned identities associated with the
resource. The user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
:type user_assigned_identities: dict[str,
~azure.mgmt.web.v2020_06_01.models.Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties]
"""
_validation = {
'tenant_id': {'readonly': True},
'principal_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ManagedServiceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties"]] = None,
**kwargs
):
super(ManagedServiceIdentity, self).__init__(**kwargs)
self.type = type
self.tenant_id = None
self.principal_id = None
self.user_assigned_identities = user_assigned_identities
class MetricAvailability(msrest.serialization.Model):
"""Retention policy of a resource metric.
:param time_grain:
:type time_grain: str
:param blob_duration:
:type blob_duration: str
"""
_attribute_map = {
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(
self,
*,
time_grain: Optional[str] = None,
blob_duration: Optional[str] = None,
**kwargs
):
super(MetricAvailability, self).__init__(**kwargs)
self.time_grain = time_grain
self.blob_duration = blob_duration
class MetricSpecification(msrest.serialization.Model):
"""Definition of a single resource metric.
:param name:
:type name: str
:param display_name:
:type display_name: str
:param display_description:
:type display_description: str
:param unit:
:type unit: str
:param aggregation_type:
:type aggregation_type: str
:param supports_instance_level_aggregation:
:type supports_instance_level_aggregation: bool
:param enable_regional_mdm_account:
:type enable_regional_mdm_account: bool
:param source_mdm_account:
:type source_mdm_account: str
:param source_mdm_namespace:
:type source_mdm_namespace: str
:param metric_filter_pattern:
:type metric_filter_pattern: str
:param fill_gap_with_zero:
:type fill_gap_with_zero: bool
:param is_internal:
:type is_internal: bool
:param dimensions:
:type dimensions: list[~azure.mgmt.web.v2020_06_01.models.Dimension]
:param category:
:type category: str
:param availabilities:
:type availabilities: list[~azure.mgmt.web.v2020_06_01.models.MetricAvailability]
:param supported_time_grain_types:
:type supported_time_grain_types: list[str]
:param supported_aggregation_types:
:type supported_aggregation_types: list[str]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'supports_instance_level_aggregation': {'key': 'supportsInstanceLevelAggregation', 'type': 'bool'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'metric_filter_pattern': {'key': 'metricFilterPattern', 'type': 'str'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'is_internal': {'key': 'isInternal', 'type': 'bool'},
'dimensions': {'key': 'dimensions', 'type': '[Dimension]'},
'category': {'key': 'category', 'type': 'str'},
'availabilities': {'key': 'availabilities', 'type': '[MetricAvailability]'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
display_description: Optional[str] = None,
unit: Optional[str] = None,
aggregation_type: Optional[str] = None,
supports_instance_level_aggregation: Optional[bool] = None,
enable_regional_mdm_account: Optional[bool] = None,
source_mdm_account: Optional[str] = None,
source_mdm_namespace: Optional[str] = None,
metric_filter_pattern: Optional[str] = None,
fill_gap_with_zero: Optional[bool] = None,
is_internal: Optional[bool] = None,
dimensions: Optional[List["Dimension"]] = None,
category: Optional[str] = None,
availabilities: Optional[List["MetricAvailability"]] = None,
supported_time_grain_types: Optional[List[str]] = None,
supported_aggregation_types: Optional[List[str]] = None,
**kwargs
):
super(MetricSpecification, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.display_description = display_description
self.unit = unit
self.aggregation_type = aggregation_type
self.supports_instance_level_aggregation = supports_instance_level_aggregation
self.enable_regional_mdm_account = enable_regional_mdm_account
self.source_mdm_account = source_mdm_account
self.source_mdm_namespace = source_mdm_namespace
self.metric_filter_pattern = metric_filter_pattern
self.fill_gap_with_zero = fill_gap_with_zero
self.is_internal = is_internal
self.dimensions = dimensions
self.category = category
self.availabilities = availabilities
self.supported_time_grain_types = supported_time_grain_types
self.supported_aggregation_types = supported_aggregation_types
class MigrateMySqlRequest(ProxyOnlyResource):
"""MySQL migration request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param connection_string: Connection string to the remote MySQL database.
:type connection_string: str
:param migration_type: The type of migration operation to be done. Possible values include:
"LocalToRemote", "RemoteToLocal".
:type migration_type: str or ~azure.mgmt.web.v2020_06_01.models.MySqlMigrationType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'connection_string': {'key': 'properties.connectionString', 'type': 'str'},
'migration_type': {'key': 'properties.migrationType', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
connection_string: Optional[str] = None,
migration_type: Optional[Union[str, "MySqlMigrationType"]] = None,
**kwargs
):
super(MigrateMySqlRequest, self).__init__(kind=kind, **kwargs)
self.connection_string = connection_string
self.migration_type = migration_type
class MigrateMySqlStatus(ProxyOnlyResource):
"""MySQL migration status.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar migration_operation_status: Status of the migration task. Possible values include:
"InProgress", "Failed", "Succeeded", "TimedOut", "Created".
:vartype migration_operation_status: str or ~azure.mgmt.web.v2020_06_01.models.OperationStatus
:ivar operation_id: Operation ID for the migration task.
:vartype operation_id: str
:ivar local_my_sql_enabled: True if the web app has in app MySql enabled.
:vartype local_my_sql_enabled: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'migration_operation_status': {'readonly': True},
'operation_id': {'readonly': True},
'local_my_sql_enabled': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'migration_operation_status': {'key': 'properties.migrationOperationStatus', 'type': 'str'},
'operation_id': {'key': 'properties.operationId', 'type': 'str'},
'local_my_sql_enabled': {'key': 'properties.localMySqlEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(MigrateMySqlStatus, self).__init__(kind=kind, **kwargs)
self.migration_operation_status = None
self.operation_id = None
self.local_my_sql_enabled = None
class MSDeploy(ProxyOnlyResource):
"""MSDeploy ARM PUT information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param package_uri: Package URI.
:type package_uri: str
:param connection_string: SQL Connection String.
:type connection_string: str
:param db_type: Database Type.
:type db_type: str
:param set_parameters_xml_file_uri: URI of MSDeploy Parameters file. Must not be set if
SetParameters is used.
:type set_parameters_xml_file_uri: str
:param set_parameters: MSDeploy Parameters. Must not be set if SetParametersXmlFileUri is used.
:type set_parameters: dict[str, str]
:param skip_app_data: Controls whether the MSDeploy operation skips the App_Data directory.
If set to :code:`<code>true</code>`, the existing App_Data directory on the destination
will not be deleted, and any App_Data directory in the source will be ignored.
Setting is :code:`<code>false</code>` by default.
:type skip_app_data: bool
:param app_offline: Sets the AppOffline rule while the MSDeploy operation executes.
Setting is :code:`<code>false</code>` by default.
:type app_offline: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'package_uri': {'key': 'properties.packageUri', 'type': 'str'},
'connection_string': {'key': 'properties.connectionString', 'type': 'str'},
'db_type': {'key': 'properties.dbType', 'type': 'str'},
'set_parameters_xml_file_uri': {'key': 'properties.setParametersXmlFileUri', 'type': 'str'},
'set_parameters': {'key': 'properties.setParameters', 'type': '{str}'},
'skip_app_data': {'key': 'properties.skipAppData', 'type': 'bool'},
'app_offline': {'key': 'properties.appOffline', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
package_uri: Optional[str] = None,
connection_string: Optional[str] = None,
db_type: Optional[str] = None,
set_parameters_xml_file_uri: Optional[str] = None,
set_parameters: Optional[Dict[str, str]] = None,
skip_app_data: Optional[bool] = None,
app_offline: Optional[bool] = None,
**kwargs
):
super(MSDeploy, self).__init__(kind=kind, **kwargs)
self.package_uri = package_uri
self.connection_string = connection_string
self.db_type = db_type
self.set_parameters_xml_file_uri = set_parameters_xml_file_uri
self.set_parameters = set_parameters
self.skip_app_data = skip_app_data
self.app_offline = app_offline
class MSDeployLog(ProxyOnlyResource):
"""MSDeploy log.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar entries: List of log entry messages.
:vartype entries: list[~azure.mgmt.web.v2020_06_01.models.MSDeployLogEntry]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'entries': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'entries': {'key': 'properties.entries', 'type': '[MSDeployLogEntry]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(MSDeployLog, self).__init__(kind=kind, **kwargs)
self.entries = None
class MSDeployLogEntry(msrest.serialization.Model):
"""MSDeploy log entry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar time: Timestamp of log entry.
:vartype time: ~datetime.datetime
:ivar type: Log entry type. Possible values include: "Message", "Warning", "Error".
:vartype type: str or ~azure.mgmt.web.v2020_06_01.models.MSDeployLogEntryType
:ivar message: Log entry message.
:vartype message: str
"""
_validation = {
'time': {'readonly': True},
'type': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'time': {'key': 'time', 'type': 'iso-8601'},
'type': {'key': 'type', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MSDeployLogEntry, self).__init__(**kwargs)
self.time = None
self.type = None
self.message = None
class MSDeployStatus(ProxyOnlyResource):
"""MSDeploy ARM response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar deployer: Username of deployer.
:vartype deployer: str
:ivar provisioning_state: Provisioning state. Possible values include: "accepted", "running",
"succeeded", "failed", "canceled".
:vartype provisioning_state: str or
~azure.mgmt.web.v2020_06_01.models.MSDeployProvisioningState
:ivar start_time: Start time of deploy operation.
:vartype start_time: ~datetime.datetime
:ivar end_time: End time of deploy operation.
:vartype end_time: ~datetime.datetime
:ivar complete: Whether the deployment operation has completed.
:vartype complete: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'deployer': {'readonly': True},
'provisioning_state': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'complete': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'deployer': {'key': 'properties.deployer', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'complete': {'key': 'properties.complete', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(MSDeployStatus, self).__init__(kind=kind, **kwargs)
self.deployer = None
self.provisioning_state = None
self.start_time = None
self.end_time = None
self.complete = None
class NameIdentifier(msrest.serialization.Model):
"""Identifies an object.
:param name: Name of the object.
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
**kwargs
):
super(NameIdentifier, self).__init__(**kwargs)
self.name = name
class NameIdentifierCollection(msrest.serialization.Model):
"""Collection of domain name identifiers.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.NameIdentifier]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[NameIdentifier]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["NameIdentifier"],
**kwargs
):
super(NameIdentifierCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class NameValuePair(msrest.serialization.Model):
"""Name value pair.
:param name: Pair name.
:type name: str
:param value: Pair value.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(NameValuePair, self).__init__(**kwargs)
self.name = name
self.value = value
class NetworkAccessControlEntry(msrest.serialization.Model):
"""Network access control entry.
:param action: Action object. Possible values include: "Permit", "Deny".
:type action: str or ~azure.mgmt.web.v2020_06_01.models.AccessControlEntryAction
:param description: Description of network access control entry.
:type description: str
:param order: Order of precedence.
:type order: int
:param remote_subnet: Remote subnet.
:type remote_subnet: str
"""
_attribute_map = {
'action': {'key': 'action', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'remote_subnet': {'key': 'remoteSubnet', 'type': 'str'},
}
def __init__(
self,
*,
action: Optional[Union[str, "AccessControlEntryAction"]] = None,
description: Optional[str] = None,
order: Optional[int] = None,
remote_subnet: Optional[str] = None,
**kwargs
):
super(NetworkAccessControlEntry, self).__init__(**kwargs)
self.action = action
self.description = description
self.order = order
self.remote_subnet = remote_subnet
class NetworkFeatures(ProxyOnlyResource):
"""Full view of network features for an app (presently VNET integration and Hybrid Connections).
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar virtual_network_name: The Virtual Network name.
:vartype virtual_network_name: str
:ivar virtual_network_connection: The Virtual Network summary view.
:vartype virtual_network_connection: ~azure.mgmt.web.v2020_06_01.models.VnetInfo
:ivar hybrid_connections: The Hybrid Connections summary view.
:vartype hybrid_connections:
list[~azure.mgmt.web.v2020_06_01.models.RelayServiceConnectionEntity]
:ivar hybrid_connections_v2: The Hybrid Connection V2 (Service Bus) view.
:vartype hybrid_connections_v2: list[~azure.mgmt.web.v2020_06_01.models.HybridConnection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_name': {'readonly': True},
'virtual_network_connection': {'readonly': True},
'hybrid_connections': {'readonly': True},
'hybrid_connections_v2': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'virtual_network_name': {'key': 'properties.virtualNetworkName', 'type': 'str'},
'virtual_network_connection': {'key': 'properties.virtualNetworkConnection', 'type': 'VnetInfo'},
'hybrid_connections': {'key': 'properties.hybridConnections', 'type': '[RelayServiceConnectionEntity]'},
'hybrid_connections_v2': {'key': 'properties.hybridConnectionsV2', 'type': '[HybridConnection]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(NetworkFeatures, self).__init__(kind=kind, **kwargs)
self.virtual_network_name = None
self.virtual_network_connection = None
self.hybrid_connections = None
self.hybrid_connections_v2 = None
class NetworkTrace(msrest.serialization.Model):
"""Network trace.
:param path: Local file path for the captured network trace file.
:type path: str
:param status: Current status of the network trace operation, same as Operation.Status
(InProgress/Succeeded/Failed).
:type status: str
:param message: Detailed message of a network trace operation, e.g. error message in case of
failure.
:type message: str
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
path: Optional[str] = None,
status: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(NetworkTrace, self).__init__(**kwargs)
self.path = path
self.status = status
self.message = message
class Nonce(ProxyOnlyResource):
"""Nonce.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param validate_nonce:
:type validate_nonce: bool
:param nonce_expiration_interval:
:type nonce_expiration_interval: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'validate_nonce': {'key': 'properties.validateNonce', 'type': 'bool'},
'nonce_expiration_interval': {'key': 'properties.nonceExpirationInterval', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
validate_nonce: Optional[bool] = None,
nonce_expiration_interval: Optional[str] = None,
**kwargs
):
super(Nonce, self).__init__(kind=kind, **kwargs)
self.validate_nonce = validate_nonce
self.nonce_expiration_interval = nonce_expiration_interval
class OpenIdConnectClientCredential(ProxyOnlyResource):
"""OpenIdConnectClientCredential.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar method: Default value: "ClientSecretPost".
:vartype method: str
:param client_secret_setting_name:
:type client_secret_setting_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'method': {'constant': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'method': {'key': 'properties.method', 'type': 'str'},
'client_secret_setting_name': {'key': 'properties.clientSecretSettingName', 'type': 'str'},
}
method = "ClientSecretPost"
def __init__(
self,
*,
kind: Optional[str] = None,
client_secret_setting_name: Optional[str] = None,
**kwargs
):
super(OpenIdConnectClientCredential, self).__init__(kind=kind, **kwargs)
self.client_secret_setting_name = client_secret_setting_name
class OpenIdConnectConfig(ProxyOnlyResource):
"""OpenIdConnectConfig.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param authorization_endpoint:
:type authorization_endpoint: str
:param token_endpoint:
:type token_endpoint: str
:param issuer:
:type issuer: str
:param certification_uri:
:type certification_uri: str
:param well_known_open_id_configuration:
:type well_known_open_id_configuration: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'authorization_endpoint': {'key': 'properties.authorizationEndpoint', 'type': 'str'},
'token_endpoint': {'key': 'properties.tokenEndpoint', 'type': 'str'},
'issuer': {'key': 'properties.issuer', 'type': 'str'},
'certification_uri': {'key': 'properties.certificationUri', 'type': 'str'},
'well_known_open_id_configuration': {'key': 'properties.wellKnownOpenIdConfiguration', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
authorization_endpoint: Optional[str] = None,
token_endpoint: Optional[str] = None,
issuer: Optional[str] = None,
certification_uri: Optional[str] = None,
well_known_open_id_configuration: Optional[str] = None,
**kwargs
):
super(OpenIdConnectConfig, self).__init__(kind=kind, **kwargs)
self.authorization_endpoint = authorization_endpoint
self.token_endpoint = token_endpoint
self.issuer = issuer
self.certification_uri = certification_uri
self.well_known_open_id_configuration = well_known_open_id_configuration
class OpenIdConnectLogin(ProxyOnlyResource):
"""OpenIdConnectLogin.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param name_claim_type:
:type name_claim_type: str
:param scopes:
:type scopes: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name_claim_type': {'key': 'properties.nameClaimType', 'type': 'str'},
'scopes': {'key': 'properties.scopes', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
name_claim_type: Optional[str] = None,
scopes: Optional[List[str]] = None,
**kwargs
):
super(OpenIdConnectLogin, self).__init__(kind=kind, **kwargs)
self.name_claim_type = name_claim_type
self.scopes = scopes
class OpenIdConnectRegistration(ProxyOnlyResource):
"""OpenIdConnectRegistration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param client_id:
:type client_id: str
:param client_credential:
:type client_credential: ~azure.mgmt.web.v2020_06_01.models.OpenIdConnectClientCredential
:param open_id_connect_configuration:
:type open_id_connect_configuration: ~azure.mgmt.web.v2020_06_01.models.OpenIdConnectConfig
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'client_id': {'key': 'properties.clientId', 'type': 'str'},
'client_credential': {'key': 'properties.clientCredential', 'type': 'OpenIdConnectClientCredential'},
'open_id_connect_configuration': {'key': 'properties.openIdConnectConfiguration', 'type': 'OpenIdConnectConfig'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
client_id: Optional[str] = None,
client_credential: Optional["OpenIdConnectClientCredential"] = None,
open_id_connect_configuration: Optional["OpenIdConnectConfig"] = None,
**kwargs
):
super(OpenIdConnectRegistration, self).__init__(kind=kind, **kwargs)
self.client_id = client_id
self.client_credential = client_credential
self.open_id_connect_configuration = open_id_connect_configuration
class Operation(msrest.serialization.Model):
"""An operation on a resource.
:param id: Operation ID.
:type id: str
:param name: Operation name.
:type name: str
:param status: The current status of the operation. Possible values include: "InProgress",
"Failed", "Succeeded", "TimedOut", "Created".
:type status: str or ~azure.mgmt.web.v2020_06_01.models.OperationStatus
:param errors: Any errors associate with the operation.
:type errors: list[~azure.mgmt.web.v2020_06_01.models.ErrorEntity]
:param created_time: Time when operation has started.
:type created_time: ~datetime.datetime
:param modified_time: Time when operation has been updated.
:type modified_time: ~datetime.datetime
:param expiration_time: Time when operation will expire.
:type expiration_time: ~datetime.datetime
:param geo_master_operation_id: Applicable only for stamp operation ids.
:type geo_master_operation_id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[ErrorEntity]'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'expirationTime', 'type': 'iso-8601'},
'geo_master_operation_id': {'key': 'geoMasterOperationId', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
name: Optional[str] = None,
status: Optional[Union[str, "OperationStatus"]] = None,
errors: Optional[List["ErrorEntity"]] = None,
created_time: Optional[datetime.datetime] = None,
modified_time: Optional[datetime.datetime] = None,
expiration_time: Optional[datetime.datetime] = None,
geo_master_operation_id: Optional[str] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.id = id
self.name = name
self.status = status
self.errors = errors
self.created_time = created_time
self.modified_time = modified_time
self.expiration_time = expiration_time
self.geo_master_operation_id = geo_master_operation_id
class OutboundEnvironmentEndpoint(msrest.serialization.Model):
"""Endpoints accessed for a common purpose that the App Service Environment requires outbound network access to.
:param category: The type of service accessed by the App Service Environment, e.g., Azure
Storage, Azure SQL Database, and Azure Active Directory.
:type category: str
:param endpoints: The endpoints that the App Service Environment reaches the service at.
:type endpoints: list[~azure.mgmt.web.v2020_06_01.models.EndpointDependency]
"""
_attribute_map = {
'category': {'key': 'category', 'type': 'str'},
'endpoints': {'key': 'endpoints', 'type': '[EndpointDependency]'},
}
def __init__(
self,
*,
category: Optional[str] = None,
endpoints: Optional[List["EndpointDependency"]] = None,
**kwargs
):
super(OutboundEnvironmentEndpoint, self).__init__(**kwargs)
self.category = category
self.endpoints = endpoints
class OutboundEnvironmentEndpointCollection(msrest.serialization.Model):
"""Collection of Outbound Environment Endpoints.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.OutboundEnvironmentEndpoint]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[OutboundEnvironmentEndpoint]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["OutboundEnvironmentEndpoint"],
**kwargs
):
super(OutboundEnvironmentEndpointCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class PerfMonCounterCollection(msrest.serialization.Model):
"""Collection of performance monitor counters.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.PerfMonResponse]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PerfMonResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["PerfMonResponse"],
**kwargs
):
super(PerfMonCounterCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class PerfMonResponse(msrest.serialization.Model):
"""Performance monitor API response.
:param code: The response code.
:type code: str
:param message: The message.
:type message: str
:param data: The performance monitor counters.
:type data: ~azure.mgmt.web.v2020_06_01.models.PerfMonSet
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'data': {'key': 'data', 'type': 'PerfMonSet'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
data: Optional["PerfMonSet"] = None,
**kwargs
):
super(PerfMonResponse, self).__init__(**kwargs)
self.code = code
self.message = message
self.data = data
class PerfMonSample(msrest.serialization.Model):
"""Performance monitor sample in a set.
:param time: Point in time for which counter was measured.
:type time: ~datetime.datetime
:param instance_name: Name of the server on which the measurement is made.
:type instance_name: str
:param value: Value of counter at a certain time.
:type value: float
"""
_attribute_map = {
'time': {'key': 'time', 'type': 'iso-8601'},
'instance_name': {'key': 'instanceName', 'type': 'str'},
'value': {'key': 'value', 'type': 'float'},
}
def __init__(
self,
*,
time: Optional[datetime.datetime] = None,
instance_name: Optional[str] = None,
value: Optional[float] = None,
**kwargs
):
super(PerfMonSample, self).__init__(**kwargs)
self.time = time
self.instance_name = instance_name
self.value = value
class PerfMonSet(msrest.serialization.Model):
"""Metric information.
:param name: Unique key name of the counter.
:type name: str
:param start_time: Start time of the period.
:type start_time: ~datetime.datetime
:param end_time: End time of the period.
:type end_time: ~datetime.datetime
:param time_grain: Presented time grain.
:type time_grain: str
:param values: Collection of workers that are active during this time.
:type values: list[~azure.mgmt.web.v2020_06_01.models.PerfMonSample]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'values': {'key': 'values', 'type': '[PerfMonSample]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
time_grain: Optional[str] = None,
values: Optional[List["PerfMonSample"]] = None,
**kwargs
):
super(PerfMonSet, self).__init__(**kwargs)
self.name = name
self.start_time = start_time
self.end_time = end_time
self.time_grain = time_grain
self.values = values
class PremierAddOn(Resource):
"""Premier add-on.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: Premier add on SKU.
:type sku: str
:param product: Premier add on Product.
:type product: str
:param vendor: Premier add on Vendor.
:type vendor: str
:param marketplace_publisher: Premier add on Marketplace publisher.
:type marketplace_publisher: str
:param marketplace_offer: Premier add on Marketplace offer.
:type marketplace_offer: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'properties.sku', 'type': 'str'},
'product': {'key': 'properties.product', 'type': 'str'},
'vendor': {'key': 'properties.vendor', 'type': 'str'},
'marketplace_publisher': {'key': 'properties.marketplacePublisher', 'type': 'str'},
'marketplace_offer': {'key': 'properties.marketplaceOffer', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional[str] = None,
product: Optional[str] = None,
vendor: Optional[str] = None,
marketplace_publisher: Optional[str] = None,
marketplace_offer: Optional[str] = None,
**kwargs
):
super(PremierAddOn, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.sku = sku
self.product = product
self.vendor = vendor
self.marketplace_publisher = marketplace_publisher
self.marketplace_offer = marketplace_offer
class PremierAddOnOffer(ProxyOnlyResource):
"""Premier add-on offer.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param sku: Premier add on SKU.
:type sku: str
:param product: Premier add on offer Product.
:type product: str
:param vendor: Premier add on offer Vendor.
:type vendor: str
:param promo_code_required: :code:`<code>true</code>` if promotion code is required; otherwise,
:code:`<code>false</code>`.
:type promo_code_required: bool
:param quota: Premier add on offer Quota.
:type quota: int
:param web_hosting_plan_restrictions: App Service plans this offer is restricted to. Possible
values include: "None", "Free", "Shared", "Basic", "Standard", "Premium".
:type web_hosting_plan_restrictions: str or
~azure.mgmt.web.v2020_06_01.models.AppServicePlanRestrictions
:param privacy_policy_url: Privacy policy URL.
:type privacy_policy_url: str
:param legal_terms_url: Legal terms URL.
:type legal_terms_url: str
:param marketplace_publisher: Marketplace publisher.
:type marketplace_publisher: str
:param marketplace_offer: Marketplace offer.
:type marketplace_offer: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'sku': {'key': 'properties.sku', 'type': 'str'},
'product': {'key': 'properties.product', 'type': 'str'},
'vendor': {'key': 'properties.vendor', 'type': 'str'},
'promo_code_required': {'key': 'properties.promoCodeRequired', 'type': 'bool'},
'quota': {'key': 'properties.quota', 'type': 'int'},
'web_hosting_plan_restrictions': {'key': 'properties.webHostingPlanRestrictions', 'type': 'str'},
'privacy_policy_url': {'key': 'properties.privacyPolicyUrl', 'type': 'str'},
'legal_terms_url': {'key': 'properties.legalTermsUrl', 'type': 'str'},
'marketplace_publisher': {'key': 'properties.marketplacePublisher', 'type': 'str'},
'marketplace_offer': {'key': 'properties.marketplaceOffer', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
sku: Optional[str] = None,
product: Optional[str] = None,
vendor: Optional[str] = None,
promo_code_required: Optional[bool] = None,
quota: Optional[int] = None,
web_hosting_plan_restrictions: Optional[Union[str, "AppServicePlanRestrictions"]] = None,
privacy_policy_url: Optional[str] = None,
legal_terms_url: Optional[str] = None,
marketplace_publisher: Optional[str] = None,
marketplace_offer: Optional[str] = None,
**kwargs
):
super(PremierAddOnOffer, self).__init__(kind=kind, **kwargs)
self.sku = sku
self.product = product
self.vendor = vendor
self.promo_code_required = promo_code_required
self.quota = quota
self.web_hosting_plan_restrictions = web_hosting_plan_restrictions
self.privacy_policy_url = privacy_policy_url
self.legal_terms_url = legal_terms_url
self.marketplace_publisher = marketplace_publisher
self.marketplace_offer = marketplace_offer
class PremierAddOnOfferCollection(msrest.serialization.Model):
"""Collection of premier add-on offers.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.PremierAddOnOffer]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PremierAddOnOffer]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["PremierAddOnOffer"],
**kwargs
):
super(PremierAddOnOfferCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class PremierAddOnPatchResource(ProxyOnlyResource):
"""ARM resource for a PremierAddOn.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param sku: Premier add on SKU.
:type sku: str
:param product: Premier add on Product.
:type product: str
:param vendor: Premier add on Vendor.
:type vendor: str
:param marketplace_publisher: Premier add on Marketplace publisher.
:type marketplace_publisher: str
:param marketplace_offer: Premier add on Marketplace offer.
:type marketplace_offer: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'sku': {'key': 'properties.sku', 'type': 'str'},
'product': {'key': 'properties.product', 'type': 'str'},
'vendor': {'key': 'properties.vendor', 'type': 'str'},
'marketplace_publisher': {'key': 'properties.marketplacePublisher', 'type': 'str'},
'marketplace_offer': {'key': 'properties.marketplaceOffer', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
sku: Optional[str] = None,
product: Optional[str] = None,
vendor: Optional[str] = None,
marketplace_publisher: Optional[str] = None,
marketplace_offer: Optional[str] = None,
**kwargs
):
super(PremierAddOnPatchResource, self).__init__(kind=kind, **kwargs)
self.sku = sku
self.product = product
self.vendor = vendor
self.marketplace_publisher = marketplace_publisher
self.marketplace_offer = marketplace_offer
class PrivateAccess(ProxyOnlyResource):
"""Description of the parameters of Private Access for a Web Site.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param enabled: Whether private access is enabled or not.
:type enabled: bool
:param virtual_networks: The Virtual Networks (and subnets) allowed to access the site
privately.
:type virtual_networks: list[~azure.mgmt.web.v2020_06_01.models.PrivateAccessVirtualNetwork]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'virtual_networks': {'key': 'properties.virtualNetworks', 'type': '[PrivateAccessVirtualNetwork]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
virtual_networks: Optional[List["PrivateAccessVirtualNetwork"]] = None,
**kwargs
):
super(PrivateAccess, self).__init__(kind=kind, **kwargs)
self.enabled = enabled
self.virtual_networks = virtual_networks
class PrivateAccessSubnet(msrest.serialization.Model):
"""Description of a Virtual Network subnet that is useable for private site access.
:param name: The name of the subnet.
:type name: str
:param key: The key (ID) of the subnet.
:type key: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'key': {'key': 'key', 'type': 'int'},
}
def __init__(
self,
*,
name: Optional[str] = None,
key: Optional[int] = None,
**kwargs
):
super(PrivateAccessSubnet, self).__init__(**kwargs)
self.name = name
self.key = key
class PrivateAccessVirtualNetwork(msrest.serialization.Model):
"""Description of a Virtual Network that is useable for private site access.
:param name: The name of the Virtual Network.
:type name: str
:param key: The key (ID) of the Virtual Network.
:type key: int
:param resource_id: The ARM uri of the Virtual Network.
:type resource_id: str
:param subnets: A List of subnets that access is allowed to on this Virtual Network. An empty
array (but not null) is interpreted to mean that all subnets are allowed within this Virtual
Network.
:type subnets: list[~azure.mgmt.web.v2020_06_01.models.PrivateAccessSubnet]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'key': {'key': 'key', 'type': 'int'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'subnets': {'key': 'subnets', 'type': '[PrivateAccessSubnet]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
key: Optional[int] = None,
resource_id: Optional[str] = None,
subnets: Optional[List["PrivateAccessSubnet"]] = None,
**kwargs
):
super(PrivateAccessVirtualNetwork, self).__init__(**kwargs)
self.name = name
self.key = key
self.resource_id = resource_id
self.subnets = subnets
class PrivateEndpointConnectionResource(ProxyOnlyResource):
"""Private Endpoint Connection ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar provisioning_state:
:vartype provisioning_state: str
:param private_endpoint: PrivateEndpoint of a remote private endpoint connection.
:type private_endpoint: ~azure.mgmt.web.v2020_06_01.models.ArmIdWrapper
:param private_link_service_connection_state: The state of a private link connection.
:type private_link_service_connection_state:
~azure.mgmt.web.v2020_06_01.models.PrivateLinkConnectionState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'ArmIdWrapper'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkConnectionState'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
private_endpoint: Optional["ArmIdWrapper"] = None,
private_link_service_connection_state: Optional["PrivateLinkConnectionState"] = None,
**kwargs
):
super(PrivateEndpointConnectionResource, self).__init__(kind=kind, **kwargs)
self.provisioning_state = None
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
class PrivateLinkConnectionApprovalRequestResource(ProxyOnlyResource):
"""Private Endpoint Connection Approval ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param private_link_service_connection_state: The state of a private link connection.
:type private_link_service_connection_state:
~azure.mgmt.web.v2020_06_01.models.PrivateLinkConnectionState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkConnectionState'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
private_link_service_connection_state: Optional["PrivateLinkConnectionState"] = None,
**kwargs
):
super(PrivateLinkConnectionApprovalRequestResource, self).__init__(kind=kind, **kwargs)
self.private_link_service_connection_state = private_link_service_connection_state
class PrivateLinkConnectionState(msrest.serialization.Model):
"""The state of a private link connection.
:param status: Status of a private link connection.
:type status: str
:param description: Description of a private link connection.
:type description: str
:param actions_required: ActionsRequired for a private link connection.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[str] = None,
description: Optional[str] = None,
actions_required: Optional[str] = None,
**kwargs
):
super(PrivateLinkConnectionState, self).__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
class PrivateLinkResource(msrest.serialization.Model):
"""A private link resource.
All required parameters must be populated in order to send to Azure.
:param id: Required.
:type id: str
:param name: Required. Name of a private link resource.
:type name: str
:param type: Required.
:type type: str
:param properties: Required. Properties of a private link resource.
:type properties: ~azure.mgmt.web.v2020_06_01.models.PrivateLinkResourceProperties
"""
_validation = {
'id': {'required': True},
'name': {'required': True},
'type': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'PrivateLinkResourceProperties'},
}
def __init__(
self,
*,
id: str,
name: str,
type: str,
properties: "PrivateLinkResourceProperties",
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.id = id
self.name = name
self.type = type
self.properties = properties
class PrivateLinkResourceProperties(msrest.serialization.Model):
"""Properties of a private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar group_id: GroupId of a private link resource.
:vartype group_id: str
:ivar required_members: RequiredMembers of a private link resource.
:vartype required_members: list[str]
:ivar required_zone_names: RequiredZoneNames of a private link resource.
:vartype required_zone_names: list[str]
"""
_validation = {
'group_id': {'readonly': True},
'required_members': {'readonly': True},
'required_zone_names': {'readonly': True},
}
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'required_members': {'key': 'requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourceProperties, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = None
class PrivateLinkResourcesWrapper(msrest.serialization.Model):
"""Wrapper for a collection of private link resources.
All required parameters must be populated in order to send to Azure.
:param value: Required.
:type value: list[~azure.mgmt.web.v2020_06_01.models.PrivateLinkResource]
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
*,
value: List["PrivateLinkResource"],
**kwargs
):
super(PrivateLinkResourcesWrapper, self).__init__(**kwargs)
self.value = value
class ProcessInfo(ProxyOnlyResource):
"""Process Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar identifier: ARM Identifier for deployment.
:vartype identifier: int
:param deployment_name: Deployment name.
:type deployment_name: str
:param href: HRef URI.
:type href: str
:param minidump: Minidump URI.
:type minidump: str
:param is_profile_running: Is profile running?.
:type is_profile_running: bool
:param is_iis_profile_running: Is the IIS Profile running?.
:type is_iis_profile_running: bool
:param iis_profile_timeout_in_seconds: IIS Profile timeout (seconds).
:type iis_profile_timeout_in_seconds: float
:param parent: Parent process.
:type parent: str
:param children: Child process list.
:type children: list[str]
:param threads: Thread list.
:type threads: list[~azure.mgmt.web.v2020_06_01.models.ProcessThreadInfo]
:param open_file_handles: List of open files.
:type open_file_handles: list[str]
:param modules: List of modules.
:type modules: list[~azure.mgmt.web.v2020_06_01.models.ProcessModuleInfo]
:param file_name: File name of this process.
:type file_name: str
:param command_line: Command line.
:type command_line: str
:param user_name: User name.
:type user_name: str
:param handle_count: Handle count.
:type handle_count: int
:param module_count: Module count.
:type module_count: int
:param thread_count: Thread count.
:type thread_count: int
:param start_time: Start time.
:type start_time: ~datetime.datetime
:param total_cpu_time: Total CPU time.
:type total_cpu_time: str
:param user_cpu_time: User CPU time.
:type user_cpu_time: str
:param privileged_cpu_time: Privileged CPU time.
:type privileged_cpu_time: str
:param working_set: Working set.
:type working_set: long
:param peak_working_set: Peak working set.
:type peak_working_set: long
:param private_memory: Private memory size.
:type private_memory: long
:param virtual_memory: Virtual memory size.
:type virtual_memory: long
:param peak_virtual_memory: Peak virtual memory usage.
:type peak_virtual_memory: long
:param paged_system_memory: Paged system memory.
:type paged_system_memory: long
:param non_paged_system_memory: Non-paged system memory.
:type non_paged_system_memory: long
:param paged_memory: Paged memory.
:type paged_memory: long
:param peak_paged_memory: Peak paged memory.
:type peak_paged_memory: long
:param time_stamp: Time stamp.
:type time_stamp: ~datetime.datetime
:param environment_variables: List of environment variables.
:type environment_variables: dict[str, str]
:param is_scm_site: Is this the SCM site?.
:type is_scm_site: bool
:param is_webjob: Is this a Web Job?.
:type is_webjob: bool
:param description: Description of process.
:type description: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'identifier': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'identifier': {'key': 'properties.identifier', 'type': 'int'},
'deployment_name': {'key': 'properties.deployment_name', 'type': 'str'},
'href': {'key': 'properties.href', 'type': 'str'},
'minidump': {'key': 'properties.minidump', 'type': 'str'},
'is_profile_running': {'key': 'properties.is_profile_running', 'type': 'bool'},
'is_iis_profile_running': {'key': 'properties.is_iis_profile_running', 'type': 'bool'},
'iis_profile_timeout_in_seconds': {'key': 'properties.iis_profile_timeout_in_seconds', 'type': 'float'},
'parent': {'key': 'properties.parent', 'type': 'str'},
'children': {'key': 'properties.children', 'type': '[str]'},
'threads': {'key': 'properties.threads', 'type': '[ProcessThreadInfo]'},
'open_file_handles': {'key': 'properties.open_file_handles', 'type': '[str]'},
'modules': {'key': 'properties.modules', 'type': '[ProcessModuleInfo]'},
'file_name': {'key': 'properties.file_name', 'type': 'str'},
'command_line': {'key': 'properties.command_line', 'type': 'str'},
'user_name': {'key': 'properties.user_name', 'type': 'str'},
'handle_count': {'key': 'properties.handle_count', 'type': 'int'},
'module_count': {'key': 'properties.module_count', 'type': 'int'},
'thread_count': {'key': 'properties.thread_count', 'type': 'int'},
'start_time': {'key': 'properties.start_time', 'type': 'iso-8601'},
'total_cpu_time': {'key': 'properties.total_cpu_time', 'type': 'str'},
'user_cpu_time': {'key': 'properties.user_cpu_time', 'type': 'str'},
'privileged_cpu_time': {'key': 'properties.privileged_cpu_time', 'type': 'str'},
'working_set': {'key': 'properties.working_set', 'type': 'long'},
'peak_working_set': {'key': 'properties.peak_working_set', 'type': 'long'},
'private_memory': {'key': 'properties.private_memory', 'type': 'long'},
'virtual_memory': {'key': 'properties.virtual_memory', 'type': 'long'},
'peak_virtual_memory': {'key': 'properties.peak_virtual_memory', 'type': 'long'},
'paged_system_memory': {'key': 'properties.paged_system_memory', 'type': 'long'},
'non_paged_system_memory': {'key': 'properties.non_paged_system_memory', 'type': 'long'},
'paged_memory': {'key': 'properties.paged_memory', 'type': 'long'},
'peak_paged_memory': {'key': 'properties.peak_paged_memory', 'type': 'long'},
'time_stamp': {'key': 'properties.time_stamp', 'type': 'iso-8601'},
'environment_variables': {'key': 'properties.environment_variables', 'type': '{str}'},
'is_scm_site': {'key': 'properties.is_scm_site', 'type': 'bool'},
'is_webjob': {'key': 'properties.is_webjob', 'type': 'bool'},
'description': {'key': 'properties.description', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
deployment_name: Optional[str] = None,
href: Optional[str] = None,
minidump: Optional[str] = None,
is_profile_running: Optional[bool] = None,
is_iis_profile_running: Optional[bool] = None,
iis_profile_timeout_in_seconds: Optional[float] = None,
parent: Optional[str] = None,
children: Optional[List[str]] = None,
threads: Optional[List["ProcessThreadInfo"]] = None,
open_file_handles: Optional[List[str]] = None,
modules: Optional[List["ProcessModuleInfo"]] = None,
file_name: Optional[str] = None,
command_line: Optional[str] = None,
user_name: Optional[str] = None,
handle_count: Optional[int] = None,
module_count: Optional[int] = None,
thread_count: Optional[int] = None,
start_time: Optional[datetime.datetime] = None,
total_cpu_time: Optional[str] = None,
user_cpu_time: Optional[str] = None,
privileged_cpu_time: Optional[str] = None,
working_set: Optional[int] = None,
peak_working_set: Optional[int] = None,
private_memory: Optional[int] = None,
virtual_memory: Optional[int] = None,
peak_virtual_memory: Optional[int] = None,
paged_system_memory: Optional[int] = None,
non_paged_system_memory: Optional[int] = None,
paged_memory: Optional[int] = None,
peak_paged_memory: Optional[int] = None,
time_stamp: Optional[datetime.datetime] = None,
environment_variables: Optional[Dict[str, str]] = None,
is_scm_site: Optional[bool] = None,
is_webjob: Optional[bool] = None,
description: Optional[str] = None,
**kwargs
):
super(ProcessInfo, self).__init__(kind=kind, **kwargs)
self.identifier = None
self.deployment_name = deployment_name
self.href = href
self.minidump = minidump
self.is_profile_running = is_profile_running
self.is_iis_profile_running = is_iis_profile_running
self.iis_profile_timeout_in_seconds = iis_profile_timeout_in_seconds
self.parent = parent
self.children = children
self.threads = threads
self.open_file_handles = open_file_handles
self.modules = modules
self.file_name = file_name
self.command_line = command_line
self.user_name = user_name
self.handle_count = handle_count
self.module_count = module_count
self.thread_count = thread_count
self.start_time = start_time
self.total_cpu_time = total_cpu_time
self.user_cpu_time = user_cpu_time
self.privileged_cpu_time = privileged_cpu_time
self.working_set = working_set
self.peak_working_set = peak_working_set
self.private_memory = private_memory
self.virtual_memory = virtual_memory
self.peak_virtual_memory = peak_virtual_memory
self.paged_system_memory = paged_system_memory
self.non_paged_system_memory = non_paged_system_memory
self.paged_memory = paged_memory
self.peak_paged_memory = peak_paged_memory
self.time_stamp = time_stamp
self.environment_variables = environment_variables
self.is_scm_site = is_scm_site
self.is_webjob = is_webjob
self.description = description
class ProcessInfoCollection(msrest.serialization.Model):
"""Collection of Kudu process information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.ProcessInfo]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ProcessInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ProcessInfo"],
**kwargs
):
super(ProcessInfoCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ProcessModuleInfo(ProxyOnlyResource):
"""Process Module Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param base_address: Base address. Used as module identifier in ARM resource URI.
:type base_address: str
:param file_name: File name.
:type file_name: str
:param href: HRef URI.
:type href: str
:param file_path: File path.
:type file_path: str
:param module_memory_size: Module memory size.
:type module_memory_size: int
:param file_version: File version.
:type file_version: str
:param file_description: File description.
:type file_description: str
:param product: Product name.
:type product: str
:param product_version: Product version.
:type product_version: str
:param is_debug: Is debug?.
:type is_debug: bool
:param language: Module language (locale).
:type language: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'base_address': {'key': 'properties.base_address', 'type': 'str'},
'file_name': {'key': 'properties.file_name', 'type': 'str'},
'href': {'key': 'properties.href', 'type': 'str'},
'file_path': {'key': 'properties.file_path', 'type': 'str'},
'module_memory_size': {'key': 'properties.module_memory_size', 'type': 'int'},
'file_version': {'key': 'properties.file_version', 'type': 'str'},
'file_description': {'key': 'properties.file_description', 'type': 'str'},
'product': {'key': 'properties.product', 'type': 'str'},
'product_version': {'key': 'properties.product_version', 'type': 'str'},
'is_debug': {'key': 'properties.is_debug', 'type': 'bool'},
'language': {'key': 'properties.language', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
base_address: Optional[str] = None,
file_name: Optional[str] = None,
href: Optional[str] = None,
file_path: Optional[str] = None,
module_memory_size: Optional[int] = None,
file_version: Optional[str] = None,
file_description: Optional[str] = None,
product: Optional[str] = None,
product_version: Optional[str] = None,
is_debug: Optional[bool] = None,
language: Optional[str] = None,
**kwargs
):
super(ProcessModuleInfo, self).__init__(kind=kind, **kwargs)
self.base_address = base_address
self.file_name = file_name
self.href = href
self.file_path = file_path
self.module_memory_size = module_memory_size
self.file_version = file_version
self.file_description = file_description
self.product = product
self.product_version = product_version
self.is_debug = is_debug
self.language = language
class ProcessModuleInfoCollection(msrest.serialization.Model):
"""Collection of Kudu thread information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.ProcessModuleInfo]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ProcessModuleInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ProcessModuleInfo"],
**kwargs
):
super(ProcessModuleInfoCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ProcessThreadInfo(ProxyOnlyResource):
"""Process Thread Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar identifier: Site extension ID.
:vartype identifier: int
:param href: HRef URI.
:type href: str
:param process: Process URI.
:type process: str
:param start_address: Start address.
:type start_address: str
:param current_priority: Current thread priority.
:type current_priority: int
:param priority_level: Thread priority level.
:type priority_level: str
:param base_priority: Base priority.
:type base_priority: int
:param start_time: Start time.
:type start_time: ~datetime.datetime
:param total_processor_time: Total processor time.
:type total_processor_time: str
:param user_processor_time: User processor time.
:type user_processor_time: str
:param state: Thread state.
:type state: str
:param wait_reason: Wait reason.
:type wait_reason: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'identifier': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'identifier': {'key': 'properties.identifier', 'type': 'int'},
'href': {'key': 'properties.href', 'type': 'str'},
'process': {'key': 'properties.process', 'type': 'str'},
'start_address': {'key': 'properties.start_address', 'type': 'str'},
'current_priority': {'key': 'properties.current_priority', 'type': 'int'},
'priority_level': {'key': 'properties.priority_level', 'type': 'str'},
'base_priority': {'key': 'properties.base_priority', 'type': 'int'},
'start_time': {'key': 'properties.start_time', 'type': 'iso-8601'},
'total_processor_time': {'key': 'properties.total_processor_time', 'type': 'str'},
'user_processor_time': {'key': 'properties.user_processor_time', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'wait_reason': {'key': 'properties.wait_reason', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
href: Optional[str] = None,
process: Optional[str] = None,
start_address: Optional[str] = None,
current_priority: Optional[int] = None,
priority_level: Optional[str] = None,
base_priority: Optional[int] = None,
start_time: Optional[datetime.datetime] = None,
total_processor_time: Optional[str] = None,
user_processor_time: Optional[str] = None,
state: Optional[str] = None,
wait_reason: Optional[str] = None,
**kwargs
):
super(ProcessThreadInfo, self).__init__(kind=kind, **kwargs)
self.identifier = None
self.href = href
self.process = process
self.start_address = start_address
self.current_priority = current_priority
self.priority_level = priority_level
self.base_priority = base_priority
self.start_time = start_time
self.total_processor_time = total_processor_time
self.user_processor_time = user_processor_time
self.state = state
self.wait_reason = wait_reason
class ProcessThreadInfoCollection(msrest.serialization.Model):
"""Collection of Kudu thread information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.ProcessThreadInfo]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ProcessThreadInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ProcessThreadInfo"],
**kwargs
):
super(ProcessThreadInfoCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class PublicCertificate(ProxyOnlyResource):
"""Public certificate object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param blob: Public Certificate byte array.
:type blob: bytearray
:param public_certificate_location: Public Certificate Location. Possible values include:
"CurrentUserMy", "LocalMachineMy", "Unknown".
:type public_certificate_location: str or
~azure.mgmt.web.v2020_06_01.models.PublicCertificateLocation
:ivar thumbprint: Certificate Thumbprint.
:vartype thumbprint: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'thumbprint': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'blob': {'key': 'properties.blob', 'type': 'bytearray'},
'public_certificate_location': {'key': 'properties.publicCertificateLocation', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
blob: Optional[bytearray] = None,
public_certificate_location: Optional[Union[str, "PublicCertificateLocation"]] = None,
**kwargs
):
super(PublicCertificate, self).__init__(kind=kind, **kwargs)
self.blob = blob
self.public_certificate_location = public_certificate_location
self.thumbprint = None
class PublicCertificateCollection(msrest.serialization.Model):
"""Collection of public certificates.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.PublicCertificate]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PublicCertificate]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["PublicCertificate"],
**kwargs
):
super(PublicCertificateCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class PushSettings(ProxyOnlyResource):
"""Push settings for the App.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param is_push_enabled: Gets or sets a flag indicating whether the Push endpoint is enabled.
:type is_push_enabled: bool
:param tag_whitelist_json: Gets or sets a JSON string containing a list of tags that are
whitelisted for use by the push registration endpoint.
:type tag_whitelist_json: str
:param tags_requiring_auth: Gets or sets a JSON string containing a list of tags that require
user authentication to be used in the push registration endpoint.
Tags can consist of alphanumeric characters and the following:
'_', '@', '#', '.', ':', '-'.
Validation should be performed at the PushRequestHandler.
:type tags_requiring_auth: str
:param dynamic_tags_json: Gets or sets a JSON string containing a list of dynamic tags that
will be evaluated from user claims in the push registration endpoint.
:type dynamic_tags_json: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_push_enabled': {'key': 'properties.isPushEnabled', 'type': 'bool'},
'tag_whitelist_json': {'key': 'properties.tagWhitelistJson', 'type': 'str'},
'tags_requiring_auth': {'key': 'properties.tagsRequiringAuth', 'type': 'str'},
'dynamic_tags_json': {'key': 'properties.dynamicTagsJson', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
is_push_enabled: Optional[bool] = None,
tag_whitelist_json: Optional[str] = None,
tags_requiring_auth: Optional[str] = None,
dynamic_tags_json: Optional[str] = None,
**kwargs
):
super(PushSettings, self).__init__(kind=kind, **kwargs)
self.is_push_enabled = is_push_enabled
self.tag_whitelist_json = tag_whitelist_json
self.tags_requiring_auth = tags_requiring_auth
self.dynamic_tags_json = dynamic_tags_json
class RampUpRule(msrest.serialization.Model):
"""Routing rules for ramp up testing. This rule allows to redirect static traffic % to a slot or to gradually change routing % based on performance.
:param action_host_name: Hostname of a slot to which the traffic will be redirected if decided
to. E.g. myapp-stage.azurewebsites.net.
:type action_host_name: str
:param reroute_percentage: Percentage of the traffic which will be redirected to
:code:`<code>ActionHostName</code>`.
:type reroute_percentage: float
:param change_step: In auto ramp up scenario this is the step to add/remove from
:code:`<code>ReroutePercentage</code>` until it reaches
\n:code:`<code>MinReroutePercentage</code>` or
:code:`<code>MaxReroutePercentage</code>`. Site metrics are checked every N minutes specified
in :code:`<code>ChangeIntervalInMinutes</code>`.\nCustom decision algorithm
can be provided in TiPCallback site extension which URL can be specified in
:code:`<code>ChangeDecisionCallbackUrl</code>`.
:type change_step: float
:param change_interval_in_minutes: Specifies interval in minutes to reevaluate
ReroutePercentage.
:type change_interval_in_minutes: int
:param min_reroute_percentage: Specifies lower boundary above which ReroutePercentage will
stay.
:type min_reroute_percentage: float
:param max_reroute_percentage: Specifies upper boundary below which ReroutePercentage will
stay.
:type max_reroute_percentage: float
:param change_decision_callback_url: Custom decision algorithm can be provided in TiPCallback
site extension which URL can be specified. See TiPCallback site extension for the scaffold and
contracts.
https://www.siteextensions.net/packages/TiPCallback/.
:type change_decision_callback_url: str
:param name: Name of the routing rule. The recommended name would be to point to the slot which
will receive the traffic in the experiment.
:type name: str
"""
_attribute_map = {
'action_host_name': {'key': 'actionHostName', 'type': 'str'},
'reroute_percentage': {'key': 'reroutePercentage', 'type': 'float'},
'change_step': {'key': 'changeStep', 'type': 'float'},
'change_interval_in_minutes': {'key': 'changeIntervalInMinutes', 'type': 'int'},
'min_reroute_percentage': {'key': 'minReroutePercentage', 'type': 'float'},
'max_reroute_percentage': {'key': 'maxReroutePercentage', 'type': 'float'},
'change_decision_callback_url': {'key': 'changeDecisionCallbackUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
action_host_name: Optional[str] = None,
reroute_percentage: Optional[float] = None,
change_step: Optional[float] = None,
change_interval_in_minutes: Optional[int] = None,
min_reroute_percentage: Optional[float] = None,
max_reroute_percentage: Optional[float] = None,
change_decision_callback_url: Optional[str] = None,
name: Optional[str] = None,
**kwargs
):
super(RampUpRule, self).__init__(**kwargs)
self.action_host_name = action_host_name
self.reroute_percentage = reroute_percentage
self.change_step = change_step
self.change_interval_in_minutes = change_interval_in_minutes
self.min_reroute_percentage = min_reroute_percentage
self.max_reroute_percentage = max_reroute_percentage
self.change_decision_callback_url = change_decision_callback_url
self.name = name
class Recommendation(ProxyOnlyResource):
"""Represents a recommendation result generated by the recommendation engine.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param creation_time: Timestamp when this instance was created.
:type creation_time: ~datetime.datetime
:param recommendation_id: A GUID value that each recommendation object is associated with.
:type recommendation_id: str
:param resource_id: Full ARM resource ID string that this recommendation object is associated
with.
:type resource_id: str
:param resource_scope: Name of a resource type this recommendation applies, e.g. Subscription,
ServerFarm, Site. Possible values include: "ServerFarm", "Subscription", "WebSite".
:type resource_scope: str or ~azure.mgmt.web.v2020_06_01.models.ResourceScopeType
:param rule_name: Unique name of the rule.
:type rule_name: str
:param display_name: UI friendly name of the rule (may not be unique).
:type display_name: str
:param message: Recommendation text.
:type message: str
:param level: Level indicating how critical this recommendation can impact. Possible values
include: "Critical", "Warning", "Information", "NonUrgentSuggestion".
:type level: str or ~azure.mgmt.web.v2020_06_01.models.NotificationLevel
:param channels: List of channels that this recommendation can apply. Possible values include:
"Notification", "Api", "Email", "Webhook", "All".
:type channels: str or ~azure.mgmt.web.v2020_06_01.models.Channels
:ivar category_tags: The list of category tags that this recommendation belongs to.
:vartype category_tags: list[str]
:param action_name: Name of action recommended by this object.
:type action_name: str
:param enabled: True if this recommendation is still valid (i.e. "actionable"). False if it is
invalid.
:type enabled: int
:param states: The list of states of this recommendation. If it's null then it should be
considered "Active".
:type states: list[str]
:param start_time: The beginning time in UTC of a range that the recommendation refers to.
:type start_time: ~datetime.datetime
:param end_time: The end time in UTC of a range that the recommendation refers to.
:type end_time: ~datetime.datetime
:param next_notification_time: When to notify this recommendation next in UTC. Null means that
this will never be notified anymore.
:type next_notification_time: ~datetime.datetime
:param notification_expiration_time: Date and time in UTC when this notification expires.
:type notification_expiration_time: ~datetime.datetime
:param notified_time: Last timestamp in UTC this instance was actually notified. Null means
that this recommendation hasn't been notified yet.
:type notified_time: ~datetime.datetime
:param score: A metric value measured by the rule.
:type score: float
:param is_dynamic: True if this is associated with a dynamically added rule.
:type is_dynamic: bool
:param extension_name: Extension name of the portal if exists.
:type extension_name: str
:param blade_name: Deep link to a blade on the portal.
:type blade_name: str
:param forward_link: Forward link to an external document associated with the rule.
:type forward_link: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'category_tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'recommendation_id': {'key': 'properties.recommendationId', 'type': 'str'},
'resource_id': {'key': 'properties.resourceId', 'type': 'str'},
'resource_scope': {'key': 'properties.resourceScope', 'type': 'str'},
'rule_name': {'key': 'properties.ruleName', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'message': {'key': 'properties.message', 'type': 'str'},
'level': {'key': 'properties.level', 'type': 'str'},
'channels': {'key': 'properties.channels', 'type': 'str'},
'category_tags': {'key': 'properties.categoryTags', 'type': '[str]'},
'action_name': {'key': 'properties.actionName', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'int'},
'states': {'key': 'properties.states', 'type': '[str]'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'next_notification_time': {'key': 'properties.nextNotificationTime', 'type': 'iso-8601'},
'notification_expiration_time': {'key': 'properties.notificationExpirationTime', 'type': 'iso-8601'},
'notified_time': {'key': 'properties.notifiedTime', 'type': 'iso-8601'},
'score': {'key': 'properties.score', 'type': 'float'},
'is_dynamic': {'key': 'properties.isDynamic', 'type': 'bool'},
'extension_name': {'key': 'properties.extensionName', 'type': 'str'},
'blade_name': {'key': 'properties.bladeName', 'type': 'str'},
'forward_link': {'key': 'properties.forwardLink', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
creation_time: Optional[datetime.datetime] = None,
recommendation_id: Optional[str] = None,
resource_id: Optional[str] = None,
resource_scope: Optional[Union[str, "ResourceScopeType"]] = None,
rule_name: Optional[str] = None,
display_name: Optional[str] = None,
message: Optional[str] = None,
level: Optional[Union[str, "NotificationLevel"]] = None,
channels: Optional[Union[str, "Channels"]] = None,
action_name: Optional[str] = None,
enabled: Optional[int] = None,
states: Optional[List[str]] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
next_notification_time: Optional[datetime.datetime] = None,
notification_expiration_time: Optional[datetime.datetime] = None,
notified_time: Optional[datetime.datetime] = None,
score: Optional[float] = None,
is_dynamic: Optional[bool] = None,
extension_name: Optional[str] = None,
blade_name: Optional[str] = None,
forward_link: Optional[str] = None,
**kwargs
):
super(Recommendation, self).__init__(kind=kind, **kwargs)
self.creation_time = creation_time
self.recommendation_id = recommendation_id
self.resource_id = resource_id
self.resource_scope = resource_scope
self.rule_name = rule_name
self.display_name = display_name
self.message = message
self.level = level
self.channels = channels
self.category_tags = None
self.action_name = action_name
self.enabled = enabled
self.states = states
self.start_time = start_time
self.end_time = end_time
self.next_notification_time = next_notification_time
self.notification_expiration_time = notification_expiration_time
self.notified_time = notified_time
self.score = score
self.is_dynamic = is_dynamic
self.extension_name = extension_name
self.blade_name = blade_name
self.forward_link = forward_link
class RecommendationCollection(msrest.serialization.Model):
"""Collection of recommendations.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.Recommendation]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Recommendation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Recommendation"],
**kwargs
):
super(RecommendationCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class RecommendationRule(ProxyOnlyResource):
"""Represents a recommendation rule that the recommendation engine can perform.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param recommendation_name: Unique name of the rule.
:type recommendation_name: str
:param display_name: UI friendly name of the rule.
:type display_name: str
:param message: Localized name of the rule (Good for UI).
:type message: str
:param recommendation_id: Recommendation ID of an associated recommendation object tied to the
rule, if exists.
If such an object doesn't exist, it is set to null.
:type recommendation_id: str
:param description: Localized detailed description of the rule.
:type description: str
:param action_name: Name of action that is recommended by this rule in string.
:type action_name: str
:param level: Level of impact indicating how critical this rule is. Possible values include:
"Critical", "Warning", "Information", "NonUrgentSuggestion".
:type level: str or ~azure.mgmt.web.v2020_06_01.models.NotificationLevel
:param channels: List of available channels that this rule applies. Possible values include:
"Notification", "Api", "Email", "Webhook", "All".
:type channels: str or ~azure.mgmt.web.v2020_06_01.models.Channels
:ivar category_tags: The list of category tags that this recommendation rule belongs to.
:vartype category_tags: list[str]
:param is_dynamic: True if this is associated with a dynamically added rule.
:type is_dynamic: bool
:param extension_name: Extension name of the portal if exists. Applicable to dynamic rule only.
:type extension_name: str
:param blade_name: Deep link to a blade on the portal. Applicable to dynamic rule only.
:type blade_name: str
:param forward_link: Forward link to an external document associated with the rule. Applicable
to dynamic rule only.
:type forward_link: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'category_tags': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'recommendation_name': {'key': 'properties.recommendationName', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'message': {'key': 'properties.message', 'type': 'str'},
'recommendation_id': {'key': 'properties.recommendationId', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'action_name': {'key': 'properties.actionName', 'type': 'str'},
'level': {'key': 'properties.level', 'type': 'str'},
'channels': {'key': 'properties.channels', 'type': 'str'},
'category_tags': {'key': 'properties.categoryTags', 'type': '[str]'},
'is_dynamic': {'key': 'properties.isDynamic', 'type': 'bool'},
'extension_name': {'key': 'properties.extensionName', 'type': 'str'},
'blade_name': {'key': 'properties.bladeName', 'type': 'str'},
'forward_link': {'key': 'properties.forwardLink', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
recommendation_name: Optional[str] = None,
display_name: Optional[str] = None,
message: Optional[str] = None,
recommendation_id: Optional[str] = None,
description: Optional[str] = None,
action_name: Optional[str] = None,
level: Optional[Union[str, "NotificationLevel"]] = None,
channels: Optional[Union[str, "Channels"]] = None,
is_dynamic: Optional[bool] = None,
extension_name: Optional[str] = None,
blade_name: Optional[str] = None,
forward_link: Optional[str] = None,
**kwargs
):
super(RecommendationRule, self).__init__(kind=kind, **kwargs)
self.recommendation_name = recommendation_name
self.display_name = display_name
self.message = message
self.recommendation_id = recommendation_id
self.description = description
self.action_name = action_name
self.level = level
self.channels = channels
self.category_tags = None
self.is_dynamic = is_dynamic
self.extension_name = extension_name
self.blade_name = blade_name
self.forward_link = forward_link
class ReissueCertificateOrderRequest(ProxyOnlyResource):
"""Class representing certificate reissue request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param key_size: Certificate Key Size.
:type key_size: int
:param delay_existing_revoke_in_hours: Delay in hours to revoke existing certificate after the
new certificate is issued.
:type delay_existing_revoke_in_hours: int
:param csr: Csr to be used for re-key operation.
:type csr: str
:param is_private_key_external: Should we change the ASC type (from managed private key to
external private key and vice versa).
:type is_private_key_external: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'key_size': {'key': 'properties.keySize', 'type': 'int'},
'delay_existing_revoke_in_hours': {'key': 'properties.delayExistingRevokeInHours', 'type': 'int'},
'csr': {'key': 'properties.csr', 'type': 'str'},
'is_private_key_external': {'key': 'properties.isPrivateKeyExternal', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
key_size: Optional[int] = None,
delay_existing_revoke_in_hours: Optional[int] = None,
csr: Optional[str] = None,
is_private_key_external: Optional[bool] = None,
**kwargs
):
super(ReissueCertificateOrderRequest, self).__init__(kind=kind, **kwargs)
self.key_size = key_size
self.delay_existing_revoke_in_hours = delay_existing_revoke_in_hours
self.csr = csr
self.is_private_key_external = is_private_key_external
class RelayServiceConnectionEntity(ProxyOnlyResource):
"""Hybrid Connection for an App Service app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param entity_name:
:type entity_name: str
:param entity_connection_string:
:type entity_connection_string: str
:param resource_type:
:type resource_type: str
:param resource_connection_string:
:type resource_connection_string: str
:param hostname:
:type hostname: str
:param port:
:type port: int
:param biztalk_uri:
:type biztalk_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'entity_name': {'key': 'properties.entityName', 'type': 'str'},
'entity_connection_string': {'key': 'properties.entityConnectionString', 'type': 'str'},
'resource_type': {'key': 'properties.resourceType', 'type': 'str'},
'resource_connection_string': {'key': 'properties.resourceConnectionString', 'type': 'str'},
'hostname': {'key': 'properties.hostname', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'biztalk_uri': {'key': 'properties.biztalkUri', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
entity_name: Optional[str] = None,
entity_connection_string: Optional[str] = None,
resource_type: Optional[str] = None,
resource_connection_string: Optional[str] = None,
hostname: Optional[str] = None,
port: Optional[int] = None,
biztalk_uri: Optional[str] = None,
**kwargs
):
super(RelayServiceConnectionEntity, self).__init__(kind=kind, **kwargs)
self.entity_name = entity_name
self.entity_connection_string = entity_connection_string
self.resource_type = resource_type
self.resource_connection_string = resource_connection_string
self.hostname = hostname
self.port = port
self.biztalk_uri = biztalk_uri
class Rendering(msrest.serialization.Model):
"""Instructions for rendering the data.
:param type: Rendering Type. Possible values include: "NoGraph", "Table", "TimeSeries",
"TimeSeriesPerInstance".
:type type: str or ~azure.mgmt.web.v2020_06_01.models.RenderingType
:param title: Title of data.
:type title: str
:param description: Description of the data that will help it be interpreted.
:type description: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'title': {'key': 'title', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "RenderingType"]] = None,
title: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(Rendering, self).__init__(**kwargs)
self.type = type
self.title = title
self.description = description
class RenewCertificateOrderRequest(ProxyOnlyResource):
"""Class representing certificate renew request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param key_size: Certificate Key Size.
:type key_size: int
:param csr: Csr to be used for re-key operation.
:type csr: str
:param is_private_key_external: Should we change the ASC type (from managed private key to
external private key and vice versa).
:type is_private_key_external: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'key_size': {'key': 'properties.keySize', 'type': 'int'},
'csr': {'key': 'properties.csr', 'type': 'str'},
'is_private_key_external': {'key': 'properties.isPrivateKeyExternal', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
key_size: Optional[int] = None,
csr: Optional[str] = None,
is_private_key_external: Optional[bool] = None,
**kwargs
):
super(RenewCertificateOrderRequest, self).__init__(kind=kind, **kwargs)
self.key_size = key_size
self.csr = csr
self.is_private_key_external = is_private_key_external
class RequestsBasedTrigger(msrest.serialization.Model):
"""Trigger based on total requests.
:param count: Request Count.
:type count: int
:param time_interval: Time interval.
:type time_interval: str
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'time_interval': {'key': 'timeInterval', 'type': 'str'},
}
def __init__(
self,
*,
count: Optional[int] = None,
time_interval: Optional[str] = None,
**kwargs
):
super(RequestsBasedTrigger, self).__init__(**kwargs)
self.count = count
self.time_interval = time_interval
class ResourceCollection(msrest.serialization.Model):
"""Collection of resources.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[str]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[str]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List[str],
**kwargs
):
super(ResourceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ResourceHealthMetadata(ProxyOnlyResource):
"""Used for getting ResourceHealthCheck settings.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param category: The category that the resource matches in the RHC Policy File.
:type category: str
:param signal_availability: Is there a health signal for the resource.
:type signal_availability: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'category': {'key': 'properties.category', 'type': 'str'},
'signal_availability': {'key': 'properties.signalAvailability', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
category: Optional[str] = None,
signal_availability: Optional[bool] = None,
**kwargs
):
super(ResourceHealthMetadata, self).__init__(kind=kind, **kwargs)
self.category = category
self.signal_availability = signal_availability
class ResourceHealthMetadataCollection(msrest.serialization.Model):
"""Collection of resource health metadata.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.ResourceHealthMetadata]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceHealthMetadata]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ResourceHealthMetadata"],
**kwargs
):
super(ResourceHealthMetadataCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ResourceMetricAvailability(msrest.serialization.Model):
"""Metrics availability and retention.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar time_grain: Time grain .
:vartype time_grain: str
:ivar retention: Retention period for the current time grain.
:vartype retention: str
"""
_validation = {
'time_grain': {'readonly': True},
'retention': {'readonly': True},
}
_attribute_map = {
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'retention': {'key': 'retention', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceMetricAvailability, self).__init__(**kwargs)
self.time_grain = None
self.retention = None
class ResourceMetricDefinition(ProxyOnlyResource):
"""Metadata for the metrics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar unit: Unit of the metric.
:vartype unit: str
:ivar primary_aggregation_type: Primary aggregation type.
:vartype primary_aggregation_type: str
:ivar metric_availabilities: List of time grains supported for the metric together with
retention period.
:vartype metric_availabilities:
list[~azure.mgmt.web.v2020_06_01.models.ResourceMetricAvailability]
:ivar resource_uri: Resource URI.
:vartype resource_uri: str
:ivar properties: Resource metric definition properties.
:vartype properties: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'unit': {'readonly': True},
'primary_aggregation_type': {'readonly': True},
'metric_availabilities': {'readonly': True},
'resource_uri': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'unit': {'key': 'properties.unit', 'type': 'str'},
'primary_aggregation_type': {'key': 'properties.primaryAggregationType', 'type': 'str'},
'metric_availabilities': {'key': 'properties.metricAvailabilities', 'type': '[ResourceMetricAvailability]'},
'resource_uri': {'key': 'properties.resourceUri', 'type': 'str'},
'properties': {'key': 'properties.properties', 'type': '{str}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(ResourceMetricDefinition, self).__init__(kind=kind, **kwargs)
self.unit = None
self.primary_aggregation_type = None
self.metric_availabilities = None
self.resource_uri = None
self.properties = None
class ResourceMetricDefinitionCollection(msrest.serialization.Model):
"""Collection of metric definitions.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.ResourceMetricDefinition]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceMetricDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["ResourceMetricDefinition"],
**kwargs
):
super(ResourceMetricDefinitionCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ResourceNameAvailability(msrest.serialization.Model):
"""Information regarding availability of a resource name.
:param name_available: :code:`<code>true</code>` indicates name is valid and available.
:code:`<code>false</code>` indicates the name is invalid, unavailable, or both.
:type name_available: bool
:param reason: :code:`<code>Invalid</code>` indicates the name provided does not match Azure
App Service naming requirements. :code:`<code>AlreadyExists</code>` indicates that the name is
already in use and is therefore unavailable. Possible values include: "Invalid",
"AlreadyExists".
:type reason: str or ~azure.mgmt.web.v2020_06_01.models.InAvailabilityReasonType
:param message: If reason == invalid, provide the user with the reason why the given name is
invalid, and provide the resource naming requirements so that the user can select a valid name.
If reason == AlreadyExists, explain that resource name is already in use, and direct them to
select a different name.
:type message: str
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
name_available: Optional[bool] = None,
reason: Optional[Union[str, "InAvailabilityReasonType"]] = None,
message: Optional[str] = None,
**kwargs
):
super(ResourceNameAvailability, self).__init__(**kwargs)
self.name_available = name_available
self.reason = reason
self.message = message
class ResourceNameAvailabilityRequest(msrest.serialization.Model):
"""Resource name availability request content.
All required parameters must be populated in order to send to Azure.
:param name: Required. Resource name to verify.
:type name: str
:param type: Required. Resource type used for verification. Possible values include: "Site",
"Slot", "HostingEnvironment", "PublishingUser", "Microsoft.Web/sites",
"Microsoft.Web/sites/slots", "Microsoft.Web/hostingEnvironments",
"Microsoft.Web/publishingUsers".
:type type: str or ~azure.mgmt.web.v2020_06_01.models.CheckNameResourceTypes
:param is_fqdn: Is fully qualified domain name.
:type is_fqdn: bool
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_fqdn': {'key': 'isFqdn', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
type: Union[str, "CheckNameResourceTypes"],
is_fqdn: Optional[bool] = None,
**kwargs
):
super(ResourceNameAvailabilityRequest, self).__init__(**kwargs)
self.name = name
self.type = type
self.is_fqdn = is_fqdn
class ResponseMetaData(msrest.serialization.Model):
"""ResponseMetaData.
:param data_source: Source of the Data.
:type data_source: ~azure.mgmt.web.v2020_06_01.models.DataSource
"""
_attribute_map = {
'data_source': {'key': 'dataSource', 'type': 'DataSource'},
}
def __init__(
self,
*,
data_source: Optional["DataSource"] = None,
**kwargs
):
super(ResponseMetaData, self).__init__(**kwargs)
self.data_source = data_source
class RestoreRequest(ProxyOnlyResource):
"""Description of a restore request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param storage_account_url: SAS URL to the container.
:type storage_account_url: str
:param blob_name: Name of a blob which contains the backup.
:type blob_name: str
:param overwrite: :code:`<code>true</code>` if the restore operation can overwrite target app;
otherwise, :code:`<code>false</code>`. :code:`<code>true</code>` is needed if trying to restore
over an existing app.
:type overwrite: bool
:param site_name: Name of an app.
:type site_name: str
:param databases: Collection of databases which should be restored. This list has to match the
list of databases included in the backup.
:type databases: list[~azure.mgmt.web.v2020_06_01.models.DatabaseBackupSetting]
:param ignore_conflicting_host_names: Changes a logic when restoring an app with custom
domains. :code:`<code>true</code>` to remove custom domains automatically. If
:code:`<code>false</code>`, custom domains are added to
the app's object when it is being restored, but that might fail due to conflicts during the
operation.
:type ignore_conflicting_host_names: bool
:param ignore_databases: Ignore the databases and only restore the site content.
:type ignore_databases: bool
:param app_service_plan: Specify app service plan that will own restored site.
:type app_service_plan: str
:param operation_type: Operation type. Possible values include: "Default", "Clone",
"Relocation", "Snapshot", "CloudFS". Default value: "Default".
:type operation_type: str or ~azure.mgmt.web.v2020_06_01.models.BackupRestoreOperationType
:param adjust_connection_strings: :code:`<code>true</code>` if SiteConfig.ConnectionStrings
should be set in new app; otherwise, :code:`<code>false</code>`.
:type adjust_connection_strings: bool
:param hosting_environment: App Service Environment name, if needed (only when restoring an app
to an App Service Environment).
:type hosting_environment: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'storage_account_url': {'key': 'properties.storageAccountUrl', 'type': 'str'},
'blob_name': {'key': 'properties.blobName', 'type': 'str'},
'overwrite': {'key': 'properties.overwrite', 'type': 'bool'},
'site_name': {'key': 'properties.siteName', 'type': 'str'},
'databases': {'key': 'properties.databases', 'type': '[DatabaseBackupSetting]'},
'ignore_conflicting_host_names': {'key': 'properties.ignoreConflictingHostNames', 'type': 'bool'},
'ignore_databases': {'key': 'properties.ignoreDatabases', 'type': 'bool'},
'app_service_plan': {'key': 'properties.appServicePlan', 'type': 'str'},
'operation_type': {'key': 'properties.operationType', 'type': 'str'},
'adjust_connection_strings': {'key': 'properties.adjustConnectionStrings', 'type': 'bool'},
'hosting_environment': {'key': 'properties.hostingEnvironment', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
storage_account_url: Optional[str] = None,
blob_name: Optional[str] = None,
overwrite: Optional[bool] = None,
site_name: Optional[str] = None,
databases: Optional[List["DatabaseBackupSetting"]] = None,
ignore_conflicting_host_names: Optional[bool] = False,
ignore_databases: Optional[bool] = False,
app_service_plan: Optional[str] = None,
operation_type: Optional[Union[str, "BackupRestoreOperationType"]] = "Default",
adjust_connection_strings: Optional[bool] = None,
hosting_environment: Optional[str] = None,
**kwargs
):
super(RestoreRequest, self).__init__(kind=kind, **kwargs)
self.storage_account_url = storage_account_url
self.blob_name = blob_name
self.overwrite = overwrite
self.site_name = site_name
self.databases = databases
self.ignore_conflicting_host_names = ignore_conflicting_host_names
self.ignore_databases = ignore_databases
self.app_service_plan = app_service_plan
self.operation_type = operation_type
self.adjust_connection_strings = adjust_connection_strings
self.hosting_environment = hosting_environment
class ServiceSpecification(msrest.serialization.Model):
"""Resource metrics service provided by Microsoft.Insights resource provider.
:param metric_specifications:
:type metric_specifications: list[~azure.mgmt.web.v2020_06_01.models.MetricSpecification]
:param log_specifications:
:type log_specifications: list[~azure.mgmt.web.v2020_06_01.models.LogSpecification]
"""
_attribute_map = {
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
}
def __init__(
self,
*,
metric_specifications: Optional[List["MetricSpecification"]] = None,
log_specifications: Optional[List["LogSpecification"]] = None,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.metric_specifications = metric_specifications
self.log_specifications = log_specifications
class Site(Resource):
"""A web app, a mobile app backend, or an API app.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: Managed service identity.
:type identity: ~azure.mgmt.web.v2020_06_01.models.ManagedServiceIdentity
:ivar state: Current state of the app.
:vartype state: str
:ivar host_names: Hostnames associated with the app.
:vartype host_names: list[str]
:ivar repository_site_name: Name of the repository site.
:vartype repository_site_name: str
:ivar usage_state: State indicating whether the app has exceeded its quota usage. Read-only.
Possible values include: "Normal", "Exceeded".
:vartype usage_state: str or ~azure.mgmt.web.v2020_06_01.models.UsageState
:param enabled: :code:`<code>true</code>` if the app is enabled; otherwise,
:code:`<code>false</code>`. Setting this value to false disables the app (takes the app
offline).
:type enabled: bool
:ivar enabled_host_names: Enabled hostnames for the app.Hostnames need to be assigned (see
HostNames) AND enabled. Otherwise,
the app is not served on those hostnames.
:vartype enabled_host_names: list[str]
:ivar availability_state: Management information availability state for the app. Possible
values include: "Normal", "Limited", "DisasterRecoveryMode".
:vartype availability_state: str or ~azure.mgmt.web.v2020_06_01.models.SiteAvailabilityState
:param host_name_ssl_states: Hostname SSL states are used to manage the SSL bindings for app's
hostnames.
:type host_name_ssl_states: list[~azure.mgmt.web.v2020_06_01.models.HostNameSslState]
:param server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:type server_farm_id: str
:param reserved: :code:`<code>true</code>` if reserved; otherwise, :code:`<code>false</code>`.
:type reserved: bool
:param is_xenon: Obsolete: Hyper-V sandbox.
:type is_xenon: bool
:param hyper_v: Hyper-V sandbox.
:type hyper_v: bool
:ivar last_modified_time_utc: Last time the app was modified, in UTC. Read-only.
:vartype last_modified_time_utc: ~datetime.datetime
:param site_config: Configuration of the app.
:type site_config: ~azure.mgmt.web.v2020_06_01.models.SiteConfig
:ivar traffic_manager_host_names: Azure Traffic Manager hostnames associated with the app.
Read-only.
:vartype traffic_manager_host_names: list[str]
:param scm_site_also_stopped: :code:`<code>true</code>` to stop SCM (KUDU) site when the app is
stopped; otherwise, :code:`<code>false</code>`. The default is :code:`<code>false</code>`.
:type scm_site_also_stopped: bool
:ivar target_swap_slot: Specifies which deployment slot this app will swap into. Read-only.
:vartype target_swap_slot: str
:param hosting_environment_profile: App Service Environment to use for the app.
:type hosting_environment_profile: ~azure.mgmt.web.v2020_06_01.models.HostingEnvironmentProfile
:param client_affinity_enabled: :code:`<code>true</code>` to enable client affinity;
:code:`<code>false</code>` to stop sending session affinity cookies, which route client
requests in the same session to the same instance. Default is :code:`<code>true</code>`.
:type client_affinity_enabled: bool
:param client_cert_enabled: :code:`<code>true</code>` to enable client certificate
authentication (TLS mutual authentication); otherwise, :code:`<code>false</code>`. Default is
:code:`<code>false</code>`.
:type client_cert_enabled: bool
:param client_cert_mode: This composes with ClientCertEnabled setting.
* ClientCertEnabled: false means ClientCert is ignored.
* ClientCertEnabled: true and ClientCertMode: Required means ClientCert is required.
* ClientCertEnabled: true and ClientCertMode: Optional means ClientCert is optional or
accepted. Possible values include: "Required", "Optional".
:type client_cert_mode: str or ~azure.mgmt.web.v2020_06_01.models.ClientCertMode
:param client_cert_exclusion_paths: client certificate authentication comma-separated exclusion
paths.
:type client_cert_exclusion_paths: str
:param host_names_disabled: :code:`<code>true</code>` to disable the public hostnames of the
app; otherwise, :code:`<code>false</code>`.
If :code:`<code>true</code>`, the app is only accessible via API management process.
:type host_names_disabled: bool
:param custom_domain_verification_id: Unique identifier that verifies the custom domains
assigned to the app. Customer will add this id to a txt record for verification.
:type custom_domain_verification_id: str
:ivar outbound_ip_addresses: List of IP addresses that the app uses for outbound connections
(e.g. database access). Includes VIPs from tenants that site can be hosted with current
settings. Read-only.
:vartype outbound_ip_addresses: str
:ivar possible_outbound_ip_addresses: List of IP addresses that the app uses for outbound
connections (e.g. database access). Includes VIPs from all tenants except dataComponent.
Read-only.
:vartype possible_outbound_ip_addresses: str
:param container_size: Size of the function container.
:type container_size: int
:param daily_memory_time_quota: Maximum allowed daily memory-time quota (applicable on dynamic
apps only).
:type daily_memory_time_quota: int
:ivar suspended_till: App suspended till in case memory-time quota is exceeded.
:vartype suspended_till: ~datetime.datetime
:ivar max_number_of_workers: Maximum number of workers.
This only applies to Functions container.
:vartype max_number_of_workers: int
:param cloning_info: If specified during app creation, the app is cloned from a source app.
:type cloning_info: ~azure.mgmt.web.v2020_06_01.models.CloningInfo
:ivar resource_group: Name of the resource group the app belongs to. Read-only.
:vartype resource_group: str
:ivar is_default_container: :code:`<code>true</code>` if the app is a default container;
otherwise, :code:`<code>false</code>`.
:vartype is_default_container: bool
:ivar default_host_name: Default hostname of the app. Read-only.
:vartype default_host_name: str
:ivar slot_swap_status: Status of the last deployment slot swap operation.
:vartype slot_swap_status: ~azure.mgmt.web.v2020_06_01.models.SlotSwapStatus
:param https_only: HttpsOnly: configures a web site to accept only https requests. Issues
redirect for
http requests.
:type https_only: bool
:param redundancy_mode: Site redundancy mode. Possible values include: "None", "Manual",
"Failover", "ActiveActive", "GeoRedundant".
:type redundancy_mode: str or ~azure.mgmt.web.v2020_06_01.models.RedundancyMode
:ivar in_progress_operation_id: Specifies an operation id if this site has a pending operation.
:vartype in_progress_operation_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'state': {'readonly': True},
'host_names': {'readonly': True},
'repository_site_name': {'readonly': True},
'usage_state': {'readonly': True},
'enabled_host_names': {'readonly': True},
'availability_state': {'readonly': True},
'last_modified_time_utc': {'readonly': True},
'traffic_manager_host_names': {'readonly': True},
'target_swap_slot': {'readonly': True},
'outbound_ip_addresses': {'readonly': True},
'possible_outbound_ip_addresses': {'readonly': True},
'suspended_till': {'readonly': True},
'max_number_of_workers': {'readonly': True},
'resource_group': {'readonly': True},
'is_default_container': {'readonly': True},
'default_host_name': {'readonly': True},
'slot_swap_status': {'readonly': True},
'in_progress_operation_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'state': {'key': 'properties.state', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'repository_site_name': {'key': 'properties.repositorySiteName', 'type': 'str'},
'usage_state': {'key': 'properties.usageState', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'enabled_host_names': {'key': 'properties.enabledHostNames', 'type': '[str]'},
'availability_state': {'key': 'properties.availabilityState', 'type': 'str'},
'host_name_ssl_states': {'key': 'properties.hostNameSslStates', 'type': '[HostNameSslState]'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
'reserved': {'key': 'properties.reserved', 'type': 'bool'},
'is_xenon': {'key': 'properties.isXenon', 'type': 'bool'},
'hyper_v': {'key': 'properties.hyperV', 'type': 'bool'},
'last_modified_time_utc': {'key': 'properties.lastModifiedTimeUtc', 'type': 'iso-8601'},
'site_config': {'key': 'properties.siteConfig', 'type': 'SiteConfig'},
'traffic_manager_host_names': {'key': 'properties.trafficManagerHostNames', 'type': '[str]'},
'scm_site_also_stopped': {'key': 'properties.scmSiteAlsoStopped', 'type': 'bool'},
'target_swap_slot': {'key': 'properties.targetSwapSlot', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'client_affinity_enabled': {'key': 'properties.clientAffinityEnabled', 'type': 'bool'},
'client_cert_enabled': {'key': 'properties.clientCertEnabled', 'type': 'bool'},
'client_cert_mode': {'key': 'properties.clientCertMode', 'type': 'str'},
'client_cert_exclusion_paths': {'key': 'properties.clientCertExclusionPaths', 'type': 'str'},
'host_names_disabled': {'key': 'properties.hostNamesDisabled', 'type': 'bool'},
'custom_domain_verification_id': {'key': 'properties.customDomainVerificationId', 'type': 'str'},
'outbound_ip_addresses': {'key': 'properties.outboundIpAddresses', 'type': 'str'},
'possible_outbound_ip_addresses': {'key': 'properties.possibleOutboundIpAddresses', 'type': 'str'},
'container_size': {'key': 'properties.containerSize', 'type': 'int'},
'daily_memory_time_quota': {'key': 'properties.dailyMemoryTimeQuota', 'type': 'int'},
'suspended_till': {'key': 'properties.suspendedTill', 'type': 'iso-8601'},
'max_number_of_workers': {'key': 'properties.maxNumberOfWorkers', 'type': 'int'},
'cloning_info': {'key': 'properties.cloningInfo', 'type': 'CloningInfo'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'is_default_container': {'key': 'properties.isDefaultContainer', 'type': 'bool'},
'default_host_name': {'key': 'properties.defaultHostName', 'type': 'str'},
'slot_swap_status': {'key': 'properties.slotSwapStatus', 'type': 'SlotSwapStatus'},
'https_only': {'key': 'properties.httpsOnly', 'type': 'bool'},
'redundancy_mode': {'key': 'properties.redundancyMode', 'type': 'str'},
'in_progress_operation_id': {'key': 'properties.inProgressOperationId', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
identity: Optional["ManagedServiceIdentity"] = None,
enabled: Optional[bool] = None,
host_name_ssl_states: Optional[List["HostNameSslState"]] = None,
server_farm_id: Optional[str] = None,
reserved: Optional[bool] = False,
is_xenon: Optional[bool] = False,
hyper_v: Optional[bool] = False,
site_config: Optional["SiteConfig"] = None,
scm_site_also_stopped: Optional[bool] = False,
hosting_environment_profile: Optional["HostingEnvironmentProfile"] = None,
client_affinity_enabled: Optional[bool] = None,
client_cert_enabled: Optional[bool] = None,
client_cert_mode: Optional[Union[str, "ClientCertMode"]] = None,
client_cert_exclusion_paths: Optional[str] = None,
host_names_disabled: Optional[bool] = None,
custom_domain_verification_id: Optional[str] = None,
container_size: Optional[int] = None,
daily_memory_time_quota: Optional[int] = None,
cloning_info: Optional["CloningInfo"] = None,
https_only: Optional[bool] = None,
redundancy_mode: Optional[Union[str, "RedundancyMode"]] = None,
**kwargs
):
super(Site, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.identity = identity
self.state = None
self.host_names = None
self.repository_site_name = None
self.usage_state = None
self.enabled = enabled
self.enabled_host_names = None
self.availability_state = None
self.host_name_ssl_states = host_name_ssl_states
self.server_farm_id = server_farm_id
self.reserved = reserved
self.is_xenon = is_xenon
self.hyper_v = hyper_v
self.last_modified_time_utc = None
self.site_config = site_config
self.traffic_manager_host_names = None
self.scm_site_also_stopped = scm_site_also_stopped
self.target_swap_slot = None
self.hosting_environment_profile = hosting_environment_profile
self.client_affinity_enabled = client_affinity_enabled
self.client_cert_enabled = client_cert_enabled
self.client_cert_mode = client_cert_mode
self.client_cert_exclusion_paths = client_cert_exclusion_paths
self.host_names_disabled = host_names_disabled
self.custom_domain_verification_id = custom_domain_verification_id
self.outbound_ip_addresses = None
self.possible_outbound_ip_addresses = None
self.container_size = container_size
self.daily_memory_time_quota = daily_memory_time_quota
self.suspended_till = None
self.max_number_of_workers = None
self.cloning_info = cloning_info
self.resource_group = None
self.is_default_container = None
self.default_host_name = None
self.slot_swap_status = None
self.https_only = https_only
self.redundancy_mode = redundancy_mode
self.in_progress_operation_id = None
class SiteAuthSettings(ProxyOnlyResource):
"""Configuration settings for the Azure App Service Authentication / Authorization feature.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param enabled: :code:`<code>true</code>` if the Authentication / Authorization feature is
enabled for the current app; otherwise, :code:`<code>false</code>`.
:type enabled: bool
:param runtime_version: The RuntimeVersion of the Authentication / Authorization feature in use
for the current app.
The setting in this value can control the behavior of certain features in the Authentication /
Authorization module.
:type runtime_version: str
:param unauthenticated_client_action: The action to take when an unauthenticated client
attempts to access the app. Possible values include: "RedirectToLoginPage", "AllowAnonymous".
:type unauthenticated_client_action: str or
~azure.mgmt.web.v2020_06_01.models.UnauthenticatedClientAction
:param token_store_enabled: :code:`<code>true</code>` to durably store platform-specific
security tokens that are obtained during login flows; otherwise, :code:`<code>false</code>`.
The default is :code:`<code>false</code>`.
:type token_store_enabled: bool
:param allowed_external_redirect_urls: External URLs that can be redirected to as part of
logging in or logging out of the app. Note that the query string part of the URL is ignored.
This is an advanced setting typically only needed by Windows Store application backends.
Note that URLs within the current domain are always implicitly allowed.
:type allowed_external_redirect_urls: list[str]
:param default_provider: The default authentication provider to use when multiple providers are
configured.
This setting is only needed if multiple providers are configured and the unauthenticated
client
action is set to "RedirectToLoginPage". Possible values include: "AzureActiveDirectory",
"Facebook", "Google", "MicrosoftAccount", "Twitter", "Github".
:type default_provider: str or ~azure.mgmt.web.v2020_06_01.models.BuiltInAuthenticationProvider
:param token_refresh_extension_hours: The number of hours after session token expiration that a
session token can be used to
call the token refresh API. The default is 72 hours.
:type token_refresh_extension_hours: float
:param client_id: The Client ID of this relying party application, known as the client_id.
This setting is required for enabling OpenID Connection authentication with Azure Active
Directory or
other 3rd party OpenID Connect providers.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html.
:type client_id: str
:param client_secret: The Client Secret of this relying party application (in Azure Active
Directory, this is also referred to as the Key).
This setting is optional. If no client secret is configured, the OpenID Connect implicit auth
flow is used to authenticate end users.
Otherwise, the OpenID Connect Authorization Code Flow is used to authenticate end users.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html.
:type client_secret: str
:param client_secret_setting_name: The app setting name that contains the client secret of the
relying party application.
:type client_secret_setting_name: str
:param client_secret_certificate_thumbprint: An alternative to the client secret, that is the
thumbprint of a certificate used for signing purposes. This property acts as
a replacement for the Client Secret. It is also optional.
:type client_secret_certificate_thumbprint: str
:param issuer: The OpenID Connect Issuer URI that represents the entity which issues access
tokens for this application.
When using Azure Active Directory, this value is the URI of the directory tenant, e.g.
https://sts.windows.net/{tenant-guid}/.
This URI is a case-sensitive identifier for the token issuer.
More information on OpenID Connect Discovery:
http://openid.net/specs/openid-connect-discovery-1_0.html.
:type issuer: str
:param validate_issuer: Gets a value indicating whether the issuer should be a valid HTTPS url
and be validated as such.
:type validate_issuer: bool
:param allowed_audiences: Allowed audience values to consider when validating JWTs issued by
Azure Active Directory. Note that the :code:`<code>ClientID</code>` value is always considered
an
allowed audience, regardless of this setting.
:type allowed_audiences: list[str]
:param additional_login_params: Login parameters to send to the OpenID Connect authorization
endpoint when
a user logs in. Each parameter must be in the form "key=value".
:type additional_login_params: list[str]
:param aad_claims_authorization: Gets a JSON string containing the Azure AD Acl settings.
:type aad_claims_authorization: str
:param google_client_id: The OpenID Connect Client ID for the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/.
:type google_client_id: str
:param google_client_secret: The client secret associated with the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/.
:type google_client_secret: str
:param google_client_secret_setting_name: The app setting name that contains the client secret
associated with
the Google web application.
:type google_client_secret_setting_name: str
:param google_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of Google
Sign-In authentication.
This setting is optional. If not specified, "openid", "profile", and "email" are used as
default scopes.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/.
:type google_o_auth_scopes: list[str]
:param facebook_app_id: The App ID of the Facebook app used for login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login.
:type facebook_app_id: str
:param facebook_app_secret: The App Secret of the Facebook app used for Facebook Login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login.
:type facebook_app_secret: str
:param facebook_app_secret_setting_name: The app setting name that contains the app secret used
for Facebook Login.
:type facebook_app_secret_setting_name: str
:param facebook_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of Facebook
Login authentication.
This setting is optional.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login.
:type facebook_o_auth_scopes: list[str]
:param git_hub_client_id: The Client Id of the GitHub app used for login.
This setting is required for enabling Github login.
:type git_hub_client_id: str
:param git_hub_client_secret: The Client Secret of the GitHub app used for Github Login.
This setting is required for enabling Github login.
:type git_hub_client_secret: str
:param git_hub_client_secret_setting_name: The app setting name that contains the client secret
of the Github
app used for GitHub Login.
:type git_hub_client_secret_setting_name: str
:param git_hub_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of GitHub
Login authentication.
This setting is optional.
:type git_hub_o_auth_scopes: list[str]
:param twitter_consumer_key: The OAuth 1.0a consumer key of the Twitter application used for
sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in.
:type twitter_consumer_key: str
:param twitter_consumer_secret: The OAuth 1.0a consumer secret of the Twitter application used
for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in.
:type twitter_consumer_secret: str
:param twitter_consumer_secret_setting_name: The app setting name that contains the OAuth 1.0a
consumer secret of the Twitter
application used for sign-in.
:type twitter_consumer_secret_setting_name: str
:param microsoft_account_client_id: The OAuth 2.0 client ID that was created for the app used
for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm.
:type microsoft_account_client_id: str
:param microsoft_account_client_secret: The OAuth 2.0 client secret that was created for the
app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm.
:type microsoft_account_client_secret: str
:param microsoft_account_client_secret_setting_name: The app setting name containing the OAuth
2.0 client secret that was created for the
app used for authentication.
:type microsoft_account_client_secret_setting_name: str
:param microsoft_account_o_auth_scopes: The OAuth 2.0 scopes that will be requested as part of
Microsoft Account authentication.
This setting is optional. If not specified, "wl.basic" is used as the default scope.
Microsoft Account Scopes and permissions documentation:
https://msdn.microsoft.com/en-us/library/dn631845.aspx.
:type microsoft_account_o_auth_scopes: list[str]
:param is_auth_from_file: "true" if the auth config settings should be read from a file,
"false" otherwise.
:type is_auth_from_file: str
:param auth_file_path: The path of the config file containing auth settings.
If the path is relative, base will the site's root directory.
:type auth_file_path: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'runtime_version': {'key': 'properties.runtimeVersion', 'type': 'str'},
'unauthenticated_client_action': {'key': 'properties.unauthenticatedClientAction', 'type': 'str'},
'token_store_enabled': {'key': 'properties.tokenStoreEnabled', 'type': 'bool'},
'allowed_external_redirect_urls': {'key': 'properties.allowedExternalRedirectUrls', 'type': '[str]'},
'default_provider': {'key': 'properties.defaultProvider', 'type': 'str'},
'token_refresh_extension_hours': {'key': 'properties.tokenRefreshExtensionHours', 'type': 'float'},
'client_id': {'key': 'properties.clientId', 'type': 'str'},
'client_secret': {'key': 'properties.clientSecret', 'type': 'str'},
'client_secret_setting_name': {'key': 'properties.clientSecretSettingName', 'type': 'str'},
'client_secret_certificate_thumbprint': {'key': 'properties.clientSecretCertificateThumbprint', 'type': 'str'},
'issuer': {'key': 'properties.issuer', 'type': 'str'},
'validate_issuer': {'key': 'properties.validateIssuer', 'type': 'bool'},
'allowed_audiences': {'key': 'properties.allowedAudiences', 'type': '[str]'},
'additional_login_params': {'key': 'properties.additionalLoginParams', 'type': '[str]'},
'aad_claims_authorization': {'key': 'properties.aadClaimsAuthorization', 'type': 'str'},
'google_client_id': {'key': 'properties.googleClientId', 'type': 'str'},
'google_client_secret': {'key': 'properties.googleClientSecret', 'type': 'str'},
'google_client_secret_setting_name': {'key': 'properties.googleClientSecretSettingName', 'type': 'str'},
'google_o_auth_scopes': {'key': 'properties.googleOAuthScopes', 'type': '[str]'},
'facebook_app_id': {'key': 'properties.facebookAppId', 'type': 'str'},
'facebook_app_secret': {'key': 'properties.facebookAppSecret', 'type': 'str'},
'facebook_app_secret_setting_name': {'key': 'properties.facebookAppSecretSettingName', 'type': 'str'},
'facebook_o_auth_scopes': {'key': 'properties.facebookOAuthScopes', 'type': '[str]'},
'git_hub_client_id': {'key': 'properties.gitHubClientId', 'type': 'str'},
'git_hub_client_secret': {'key': 'properties.gitHubClientSecret', 'type': 'str'},
'git_hub_client_secret_setting_name': {'key': 'properties.gitHubClientSecretSettingName', 'type': 'str'},
'git_hub_o_auth_scopes': {'key': 'properties.gitHubOAuthScopes', 'type': '[str]'},
'twitter_consumer_key': {'key': 'properties.twitterConsumerKey', 'type': 'str'},
'twitter_consumer_secret': {'key': 'properties.twitterConsumerSecret', 'type': 'str'},
'twitter_consumer_secret_setting_name': {'key': 'properties.twitterConsumerSecretSettingName', 'type': 'str'},
'microsoft_account_client_id': {'key': 'properties.microsoftAccountClientId', 'type': 'str'},
'microsoft_account_client_secret': {'key': 'properties.microsoftAccountClientSecret', 'type': 'str'},
'microsoft_account_client_secret_setting_name': {'key': 'properties.microsoftAccountClientSecretSettingName', 'type': 'str'},
'microsoft_account_o_auth_scopes': {'key': 'properties.microsoftAccountOAuthScopes', 'type': '[str]'},
'is_auth_from_file': {'key': 'properties.isAuthFromFile', 'type': 'str'},
'auth_file_path': {'key': 'properties.authFilePath', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
runtime_version: Optional[str] = None,
unauthenticated_client_action: Optional[Union[str, "UnauthenticatedClientAction"]] = None,
token_store_enabled: Optional[bool] = None,
allowed_external_redirect_urls: Optional[List[str]] = None,
default_provider: Optional[Union[str, "BuiltInAuthenticationProvider"]] = None,
token_refresh_extension_hours: Optional[float] = None,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
client_secret_setting_name: Optional[str] = None,
client_secret_certificate_thumbprint: Optional[str] = None,
issuer: Optional[str] = None,
validate_issuer: Optional[bool] = None,
allowed_audiences: Optional[List[str]] = None,
additional_login_params: Optional[List[str]] = None,
aad_claims_authorization: Optional[str] = None,
google_client_id: Optional[str] = None,
google_client_secret: Optional[str] = None,
google_client_secret_setting_name: Optional[str] = None,
google_o_auth_scopes: Optional[List[str]] = None,
facebook_app_id: Optional[str] = None,
facebook_app_secret: Optional[str] = None,
facebook_app_secret_setting_name: Optional[str] = None,
facebook_o_auth_scopes: Optional[List[str]] = None,
git_hub_client_id: Optional[str] = None,
git_hub_client_secret: Optional[str] = None,
git_hub_client_secret_setting_name: Optional[str] = None,
git_hub_o_auth_scopes: Optional[List[str]] = None,
twitter_consumer_key: Optional[str] = None,
twitter_consumer_secret: Optional[str] = None,
twitter_consumer_secret_setting_name: Optional[str] = None,
microsoft_account_client_id: Optional[str] = None,
microsoft_account_client_secret: Optional[str] = None,
microsoft_account_client_secret_setting_name: Optional[str] = None,
microsoft_account_o_auth_scopes: Optional[List[str]] = None,
is_auth_from_file: Optional[str] = None,
auth_file_path: Optional[str] = None,
**kwargs
):
super(SiteAuthSettings, self).__init__(kind=kind, **kwargs)
self.enabled = enabled
self.runtime_version = runtime_version
self.unauthenticated_client_action = unauthenticated_client_action
self.token_store_enabled = token_store_enabled
self.allowed_external_redirect_urls = allowed_external_redirect_urls
self.default_provider = default_provider
self.token_refresh_extension_hours = token_refresh_extension_hours
self.client_id = client_id
self.client_secret = client_secret
self.client_secret_setting_name = client_secret_setting_name
self.client_secret_certificate_thumbprint = client_secret_certificate_thumbprint
self.issuer = issuer
self.validate_issuer = validate_issuer
self.allowed_audiences = allowed_audiences
self.additional_login_params = additional_login_params
self.aad_claims_authorization = aad_claims_authorization
self.google_client_id = google_client_id
self.google_client_secret = google_client_secret
self.google_client_secret_setting_name = google_client_secret_setting_name
self.google_o_auth_scopes = google_o_auth_scopes
self.facebook_app_id = facebook_app_id
self.facebook_app_secret = facebook_app_secret
self.facebook_app_secret_setting_name = facebook_app_secret_setting_name
self.facebook_o_auth_scopes = facebook_o_auth_scopes
self.git_hub_client_id = git_hub_client_id
self.git_hub_client_secret = git_hub_client_secret
self.git_hub_client_secret_setting_name = git_hub_client_secret_setting_name
self.git_hub_o_auth_scopes = git_hub_o_auth_scopes
self.twitter_consumer_key = twitter_consumer_key
self.twitter_consumer_secret = twitter_consumer_secret
self.twitter_consumer_secret_setting_name = twitter_consumer_secret_setting_name
self.microsoft_account_client_id = microsoft_account_client_id
self.microsoft_account_client_secret = microsoft_account_client_secret
self.microsoft_account_client_secret_setting_name = microsoft_account_client_secret_setting_name
self.microsoft_account_o_auth_scopes = microsoft_account_o_auth_scopes
self.is_auth_from_file = is_auth_from_file
self.auth_file_path = auth_file_path
class SiteAuthSettingsV2(ProxyOnlyResource):
"""SiteAuthSettingsV2.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param platform:
:type platform: ~azure.mgmt.web.v2020_06_01.models.AuthPlatform
:param global_validation:
:type global_validation: ~azure.mgmt.web.v2020_06_01.models.GlobalValidation
:param identity_providers:
:type identity_providers: ~azure.mgmt.web.v2020_06_01.models.IdentityProviders
:param login:
:type login: ~azure.mgmt.web.v2020_06_01.models.Login
:param http_settings:
:type http_settings: ~azure.mgmt.web.v2020_06_01.models.HttpSettings
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'platform': {'key': 'properties.platform', 'type': 'AuthPlatform'},
'global_validation': {'key': 'properties.globalValidation', 'type': 'GlobalValidation'},
'identity_providers': {'key': 'properties.identityProviders', 'type': 'IdentityProviders'},
'login': {'key': 'properties.login', 'type': 'Login'},
'http_settings': {'key': 'properties.httpSettings', 'type': 'HttpSettings'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
platform: Optional["AuthPlatform"] = None,
global_validation: Optional["GlobalValidation"] = None,
identity_providers: Optional["IdentityProviders"] = None,
login: Optional["Login"] = None,
http_settings: Optional["HttpSettings"] = None,
**kwargs
):
super(SiteAuthSettingsV2, self).__init__(kind=kind, **kwargs)
self.platform = platform
self.global_validation = global_validation
self.identity_providers = identity_providers
self.login = login
self.http_settings = http_settings
class SiteCloneability(msrest.serialization.Model):
"""Represents whether or not an app is cloneable.
:param result: Name of app. Possible values include: "Cloneable", "PartiallyCloneable",
"NotCloneable".
:type result: str or ~azure.mgmt.web.v2020_06_01.models.CloneAbilityResult
:param blocking_features: List of features enabled on app that prevent cloning.
:type blocking_features: list[~azure.mgmt.web.v2020_06_01.models.SiteCloneabilityCriterion]
:param unsupported_features: List of features enabled on app that are non-blocking but cannot
be cloned. The app can still be cloned
but the features in this list will not be set up on cloned app.
:type unsupported_features: list[~azure.mgmt.web.v2020_06_01.models.SiteCloneabilityCriterion]
:param blocking_characteristics: List of blocking application characteristics.
:type blocking_characteristics:
list[~azure.mgmt.web.v2020_06_01.models.SiteCloneabilityCriterion]
"""
_attribute_map = {
'result': {'key': 'result', 'type': 'str'},
'blocking_features': {'key': 'blockingFeatures', 'type': '[SiteCloneabilityCriterion]'},
'unsupported_features': {'key': 'unsupportedFeatures', 'type': '[SiteCloneabilityCriterion]'},
'blocking_characteristics': {'key': 'blockingCharacteristics', 'type': '[SiteCloneabilityCriterion]'},
}
def __init__(
self,
*,
result: Optional[Union[str, "CloneAbilityResult"]] = None,
blocking_features: Optional[List["SiteCloneabilityCriterion"]] = None,
unsupported_features: Optional[List["SiteCloneabilityCriterion"]] = None,
blocking_characteristics: Optional[List["SiteCloneabilityCriterion"]] = None,
**kwargs
):
super(SiteCloneability, self).__init__(**kwargs)
self.result = result
self.blocking_features = blocking_features
self.unsupported_features = unsupported_features
self.blocking_characteristics = blocking_characteristics
class SiteCloneabilityCriterion(msrest.serialization.Model):
"""An app cloneability criterion.
:param name: Name of criterion.
:type name: str
:param description: Description of criterion.
:type description: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(SiteCloneabilityCriterion, self).__init__(**kwargs)
self.name = name
self.description = description
class SiteConfig(msrest.serialization.Model):
"""Configuration of an App Service app.
Variables are only populated by the server, and will be ignored when sending a request.
:param number_of_workers: Number of workers.
:type number_of_workers: int
:param default_documents: Default documents.
:type default_documents: list[str]
:param net_framework_version: .NET Framework version.
:type net_framework_version: str
:param php_version: Version of PHP.
:type php_version: str
:param python_version: Version of Python.
:type python_version: str
:param node_version: Version of Node.js.
:type node_version: str
:param power_shell_version: Version of PowerShell.
:type power_shell_version: str
:param linux_fx_version: Linux App Framework and version.
:type linux_fx_version: str
:param windows_fx_version: Xenon App Framework and version.
:type windows_fx_version: str
:param request_tracing_enabled: :code:`<code>true</code>` if request tracing is enabled;
otherwise, :code:`<code>false</code>`.
:type request_tracing_enabled: bool
:param request_tracing_expiration_time: Request tracing expiration time.
:type request_tracing_expiration_time: ~datetime.datetime
:param remote_debugging_enabled: :code:`<code>true</code>` if remote debugging is enabled;
otherwise, :code:`<code>false</code>`.
:type remote_debugging_enabled: bool
:param remote_debugging_version: Remote debugging version.
:type remote_debugging_version: str
:param http_logging_enabled: :code:`<code>true</code>` if HTTP logging is enabled; otherwise,
:code:`<code>false</code>`.
:type http_logging_enabled: bool
:param acr_use_managed_identity_creds: Flag to use Managed Identity Creds for ACR pull.
:type acr_use_managed_identity_creds: bool
:param acr_user_managed_identity_id: If using user managed identity, the user managed identity
ClientId.
:type acr_user_managed_identity_id: str
:param logs_directory_size_limit: HTTP logs directory size limit.
:type logs_directory_size_limit: int
:param detailed_error_logging_enabled: :code:`<code>true</code>` if detailed error logging is
enabled; otherwise, :code:`<code>false</code>`.
:type detailed_error_logging_enabled: bool
:param publishing_username: Publishing user name.
:type publishing_username: str
:param app_settings: Application settings.
:type app_settings: list[~azure.mgmt.web.v2020_06_01.models.NameValuePair]
:param connection_strings: Connection strings.
:type connection_strings: list[~azure.mgmt.web.v2020_06_01.models.ConnStringInfo]
:ivar machine_key: Site MachineKey.
:vartype machine_key: ~azure.mgmt.web.v2020_06_01.models.SiteMachineKey
:param handler_mappings: Handler mappings.
:type handler_mappings: list[~azure.mgmt.web.v2020_06_01.models.HandlerMapping]
:param document_root: Document root.
:type document_root: str
:param scm_type: SCM type. Possible values include: "None", "Dropbox", "Tfs", "LocalGit",
"GitHub", "CodePlexGit", "CodePlexHg", "BitbucketGit", "BitbucketHg", "ExternalGit",
"ExternalHg", "OneDrive", "VSO", "VSTSRM".
:type scm_type: str or ~azure.mgmt.web.v2020_06_01.models.ScmType
:param use32_bit_worker_process: :code:`<code>true</code>` to use 32-bit worker process;
otherwise, :code:`<code>false</code>`.
:type use32_bit_worker_process: bool
:param web_sockets_enabled: :code:`<code>true</code>` if WebSocket is enabled; otherwise,
:code:`<code>false</code>`.
:type web_sockets_enabled: bool
:param always_on: :code:`<code>true</code>` if Always On is enabled; otherwise,
:code:`<code>false</code>`.
:type always_on: bool
:param java_version: Java version.
:type java_version: str
:param java_container: Java container.
:type java_container: str
:param java_container_version: Java container version.
:type java_container_version: str
:param app_command_line: App command line to launch.
:type app_command_line: str
:param managed_pipeline_mode: Managed pipeline mode. Possible values include: "Integrated",
"Classic".
:type managed_pipeline_mode: str or ~azure.mgmt.web.v2020_06_01.models.ManagedPipelineMode
:param virtual_applications: Virtual applications.
:type virtual_applications: list[~azure.mgmt.web.v2020_06_01.models.VirtualApplication]
:param load_balancing: Site load balancing. Possible values include: "WeightedRoundRobin",
"LeastRequests", "LeastResponseTime", "WeightedTotalTraffic", "RequestHash".
:type load_balancing: str or ~azure.mgmt.web.v2020_06_01.models.SiteLoadBalancing
:param experiments: This is work around for polymorphic types.
:type experiments: ~azure.mgmt.web.v2020_06_01.models.Experiments
:param limits: Site limits.
:type limits: ~azure.mgmt.web.v2020_06_01.models.SiteLimits
:param auto_heal_enabled: :code:`<code>true</code>` if Auto Heal is enabled; otherwise,
:code:`<code>false</code>`.
:type auto_heal_enabled: bool
:param auto_heal_rules: Auto Heal rules.
:type auto_heal_rules: ~azure.mgmt.web.v2020_06_01.models.AutoHealRules
:param tracing_options: Tracing options.
:type tracing_options: str
:param vnet_name: Virtual Network name.
:type vnet_name: str
:param vnet_route_all_enabled: Virtual Network Route All enabled. This causes all outbound
traffic to have Virtual Network Security Groups and User Defined Routes applied.
:type vnet_route_all_enabled: bool
:param vnet_private_ports_count: The number of private ports assigned to this app. These will
be assigned dynamically on runtime.
:type vnet_private_ports_count: int
:param cors: Cross-Origin Resource Sharing (CORS) settings.
:type cors: ~azure.mgmt.web.v2020_06_01.models.CorsSettings
:param push: Push endpoint settings.
:type push: ~azure.mgmt.web.v2020_06_01.models.PushSettings
:param api_definition: Information about the formal API definition for the app.
:type api_definition: ~azure.mgmt.web.v2020_06_01.models.ApiDefinitionInfo
:param api_management_config: Azure API management settings linked to the app.
:type api_management_config: ~azure.mgmt.web.v2020_06_01.models.ApiManagementConfig
:param auto_swap_slot_name: Auto-swap slot name.
:type auto_swap_slot_name: str
:param local_my_sql_enabled: :code:`<code>true</code>` to enable local MySQL; otherwise,
:code:`<code>false</code>`.
:type local_my_sql_enabled: bool
:param managed_service_identity_id: Managed Service Identity Id.
:type managed_service_identity_id: int
:param x_managed_service_identity_id: Explicit Managed Service Identity Id.
:type x_managed_service_identity_id: int
:param ip_security_restrictions: IP security restrictions for main.
:type ip_security_restrictions: list[~azure.mgmt.web.v2020_06_01.models.IpSecurityRestriction]
:param scm_ip_security_restrictions: IP security restrictions for scm.
:type scm_ip_security_restrictions:
list[~azure.mgmt.web.v2020_06_01.models.IpSecurityRestriction]
:param scm_ip_security_restrictions_use_main: IP security restrictions for scm to use main.
:type scm_ip_security_restrictions_use_main: bool
:param http20_enabled: Http20Enabled: configures a web site to allow clients to connect over
http2.0.
:type http20_enabled: bool
:param min_tls_version: MinTlsVersion: configures the minimum version of TLS required for SSL
requests. Possible values include: "1.0", "1.1", "1.2".
:type min_tls_version: str or ~azure.mgmt.web.v2020_06_01.models.SupportedTlsVersions
:param scm_min_tls_version: ScmMinTlsVersion: configures the minimum version of TLS required
for SSL requests for SCM site. Possible values include: "1.0", "1.1", "1.2".
:type scm_min_tls_version: str or ~azure.mgmt.web.v2020_06_01.models.SupportedTlsVersions
:param ftps_state: State of FTP / FTPS service. Possible values include: "AllAllowed",
"FtpsOnly", "Disabled".
:type ftps_state: str or ~azure.mgmt.web.v2020_06_01.models.FtpsState
:param pre_warmed_instance_count: Number of preWarmed instances.
This setting only applies to the Consumption and Elastic Plans.
:type pre_warmed_instance_count: int
:param health_check_path: Health check path.
:type health_check_path: str
"""
_validation = {
'machine_key': {'readonly': True},
'pre_warmed_instance_count': {'maximum': 10, 'minimum': 0},
}
_attribute_map = {
'number_of_workers': {'key': 'numberOfWorkers', 'type': 'int'},
'default_documents': {'key': 'defaultDocuments', 'type': '[str]'},
'net_framework_version': {'key': 'netFrameworkVersion', 'type': 'str'},
'php_version': {'key': 'phpVersion', 'type': 'str'},
'python_version': {'key': 'pythonVersion', 'type': 'str'},
'node_version': {'key': 'nodeVersion', 'type': 'str'},
'power_shell_version': {'key': 'powerShellVersion', 'type': 'str'},
'linux_fx_version': {'key': 'linuxFxVersion', 'type': 'str'},
'windows_fx_version': {'key': 'windowsFxVersion', 'type': 'str'},
'request_tracing_enabled': {'key': 'requestTracingEnabled', 'type': 'bool'},
'request_tracing_expiration_time': {'key': 'requestTracingExpirationTime', 'type': 'iso-8601'},
'remote_debugging_enabled': {'key': 'remoteDebuggingEnabled', 'type': 'bool'},
'remote_debugging_version': {'key': 'remoteDebuggingVersion', 'type': 'str'},
'http_logging_enabled': {'key': 'httpLoggingEnabled', 'type': 'bool'},
'acr_use_managed_identity_creds': {'key': 'acrUseManagedIdentityCreds', 'type': 'bool'},
'acr_user_managed_identity_id': {'key': 'acrUserManagedIdentityID', 'type': 'str'},
'logs_directory_size_limit': {'key': 'logsDirectorySizeLimit', 'type': 'int'},
'detailed_error_logging_enabled': {'key': 'detailedErrorLoggingEnabled', 'type': 'bool'},
'publishing_username': {'key': 'publishingUsername', 'type': 'str'},
'app_settings': {'key': 'appSettings', 'type': '[NameValuePair]'},
'connection_strings': {'key': 'connectionStrings', 'type': '[ConnStringInfo]'},
'machine_key': {'key': 'machineKey', 'type': 'SiteMachineKey'},
'handler_mappings': {'key': 'handlerMappings', 'type': '[HandlerMapping]'},
'document_root': {'key': 'documentRoot', 'type': 'str'},
'scm_type': {'key': 'scmType', 'type': 'str'},
'use32_bit_worker_process': {'key': 'use32BitWorkerProcess', 'type': 'bool'},
'web_sockets_enabled': {'key': 'webSocketsEnabled', 'type': 'bool'},
'always_on': {'key': 'alwaysOn', 'type': 'bool'},
'java_version': {'key': 'javaVersion', 'type': 'str'},
'java_container': {'key': 'javaContainer', 'type': 'str'},
'java_container_version': {'key': 'javaContainerVersion', 'type': 'str'},
'app_command_line': {'key': 'appCommandLine', 'type': 'str'},
'managed_pipeline_mode': {'key': 'managedPipelineMode', 'type': 'str'},
'virtual_applications': {'key': 'virtualApplications', 'type': '[VirtualApplication]'},
'load_balancing': {'key': 'loadBalancing', 'type': 'str'},
'experiments': {'key': 'experiments', 'type': 'Experiments'},
'limits': {'key': 'limits', 'type': 'SiteLimits'},
'auto_heal_enabled': {'key': 'autoHealEnabled', 'type': 'bool'},
'auto_heal_rules': {'key': 'autoHealRules', 'type': 'AutoHealRules'},
'tracing_options': {'key': 'tracingOptions', 'type': 'str'},
'vnet_name': {'key': 'vnetName', 'type': 'str'},
'vnet_route_all_enabled': {'key': 'vnetRouteAllEnabled', 'type': 'bool'},
'vnet_private_ports_count': {'key': 'vnetPrivatePortsCount', 'type': 'int'},
'cors': {'key': 'cors', 'type': 'CorsSettings'},
'push': {'key': 'push', 'type': 'PushSettings'},
'api_definition': {'key': 'apiDefinition', 'type': 'ApiDefinitionInfo'},
'api_management_config': {'key': 'apiManagementConfig', 'type': 'ApiManagementConfig'},
'auto_swap_slot_name': {'key': 'autoSwapSlotName', 'type': 'str'},
'local_my_sql_enabled': {'key': 'localMySqlEnabled', 'type': 'bool'},
'managed_service_identity_id': {'key': 'managedServiceIdentityId', 'type': 'int'},
'x_managed_service_identity_id': {'key': 'xManagedServiceIdentityId', 'type': 'int'},
'ip_security_restrictions': {'key': 'ipSecurityRestrictions', 'type': '[IpSecurityRestriction]'},
'scm_ip_security_restrictions': {'key': 'scmIpSecurityRestrictions', 'type': '[IpSecurityRestriction]'},
'scm_ip_security_restrictions_use_main': {'key': 'scmIpSecurityRestrictionsUseMain', 'type': 'bool'},
'http20_enabled': {'key': 'http20Enabled', 'type': 'bool'},
'min_tls_version': {'key': 'minTlsVersion', 'type': 'str'},
'scm_min_tls_version': {'key': 'scmMinTlsVersion', 'type': 'str'},
'ftps_state': {'key': 'ftpsState', 'type': 'str'},
'pre_warmed_instance_count': {'key': 'preWarmedInstanceCount', 'type': 'int'},
'health_check_path': {'key': 'healthCheckPath', 'type': 'str'},
}
def __init__(
self,
*,
number_of_workers: Optional[int] = None,
default_documents: Optional[List[str]] = None,
net_framework_version: Optional[str] = "v4.6",
php_version: Optional[str] = None,
python_version: Optional[str] = None,
node_version: Optional[str] = None,
power_shell_version: Optional[str] = None,
linux_fx_version: Optional[str] = None,
windows_fx_version: Optional[str] = None,
request_tracing_enabled: Optional[bool] = None,
request_tracing_expiration_time: Optional[datetime.datetime] = None,
remote_debugging_enabled: Optional[bool] = None,
remote_debugging_version: Optional[str] = None,
http_logging_enabled: Optional[bool] = None,
acr_use_managed_identity_creds: Optional[bool] = None,
acr_user_managed_identity_id: Optional[str] = None,
logs_directory_size_limit: Optional[int] = None,
detailed_error_logging_enabled: Optional[bool] = None,
publishing_username: Optional[str] = None,
app_settings: Optional[List["NameValuePair"]] = None,
connection_strings: Optional[List["ConnStringInfo"]] = None,
handler_mappings: Optional[List["HandlerMapping"]] = None,
document_root: Optional[str] = None,
scm_type: Optional[Union[str, "ScmType"]] = None,
use32_bit_worker_process: Optional[bool] = None,
web_sockets_enabled: Optional[bool] = None,
always_on: Optional[bool] = None,
java_version: Optional[str] = None,
java_container: Optional[str] = None,
java_container_version: Optional[str] = None,
app_command_line: Optional[str] = None,
managed_pipeline_mode: Optional[Union[str, "ManagedPipelineMode"]] = None,
virtual_applications: Optional[List["VirtualApplication"]] = None,
load_balancing: Optional[Union[str, "SiteLoadBalancing"]] = None,
experiments: Optional["Experiments"] = None,
limits: Optional["SiteLimits"] = None,
auto_heal_enabled: Optional[bool] = None,
auto_heal_rules: Optional["AutoHealRules"] = None,
tracing_options: Optional[str] = None,
vnet_name: Optional[str] = None,
vnet_route_all_enabled: Optional[bool] = None,
vnet_private_ports_count: Optional[int] = None,
cors: Optional["CorsSettings"] = None,
push: Optional["PushSettings"] = None,
api_definition: Optional["ApiDefinitionInfo"] = None,
api_management_config: Optional["ApiManagementConfig"] = None,
auto_swap_slot_name: Optional[str] = None,
local_my_sql_enabled: Optional[bool] = False,
managed_service_identity_id: Optional[int] = None,
x_managed_service_identity_id: Optional[int] = None,
ip_security_restrictions: Optional[List["IpSecurityRestriction"]] = None,
scm_ip_security_restrictions: Optional[List["IpSecurityRestriction"]] = None,
scm_ip_security_restrictions_use_main: Optional[bool] = None,
http20_enabled: Optional[bool] = True,
min_tls_version: Optional[Union[str, "SupportedTlsVersions"]] = None,
scm_min_tls_version: Optional[Union[str, "SupportedTlsVersions"]] = None,
ftps_state: Optional[Union[str, "FtpsState"]] = None,
pre_warmed_instance_count: Optional[int] = None,
health_check_path: Optional[str] = None,
**kwargs
):
super(SiteConfig, self).__init__(**kwargs)
self.number_of_workers = number_of_workers
self.default_documents = default_documents
self.net_framework_version = net_framework_version
self.php_version = php_version
self.python_version = python_version
self.node_version = node_version
self.power_shell_version = power_shell_version
self.linux_fx_version = linux_fx_version
self.windows_fx_version = windows_fx_version
self.request_tracing_enabled = request_tracing_enabled
self.request_tracing_expiration_time = request_tracing_expiration_time
self.remote_debugging_enabled = remote_debugging_enabled
self.remote_debugging_version = remote_debugging_version
self.http_logging_enabled = http_logging_enabled
self.acr_use_managed_identity_creds = acr_use_managed_identity_creds
self.acr_user_managed_identity_id = acr_user_managed_identity_id
self.logs_directory_size_limit = logs_directory_size_limit
self.detailed_error_logging_enabled = detailed_error_logging_enabled
self.publishing_username = publishing_username
self.app_settings = app_settings
self.connection_strings = connection_strings
self.machine_key = None
self.handler_mappings = handler_mappings
self.document_root = document_root
self.scm_type = scm_type
self.use32_bit_worker_process = use32_bit_worker_process
self.web_sockets_enabled = web_sockets_enabled
self.always_on = always_on
self.java_version = java_version
self.java_container = java_container
self.java_container_version = java_container_version
self.app_command_line = app_command_line
self.managed_pipeline_mode = managed_pipeline_mode
self.virtual_applications = virtual_applications
self.load_balancing = load_balancing
self.experiments = experiments
self.limits = limits
self.auto_heal_enabled = auto_heal_enabled
self.auto_heal_rules = auto_heal_rules
self.tracing_options = tracing_options
self.vnet_name = vnet_name
self.vnet_route_all_enabled = vnet_route_all_enabled
self.vnet_private_ports_count = vnet_private_ports_count
self.cors = cors
self.push = push
self.api_definition = api_definition
self.api_management_config = api_management_config
self.auto_swap_slot_name = auto_swap_slot_name
self.local_my_sql_enabled = local_my_sql_enabled
self.managed_service_identity_id = managed_service_identity_id
self.x_managed_service_identity_id = x_managed_service_identity_id
self.ip_security_restrictions = ip_security_restrictions
self.scm_ip_security_restrictions = scm_ip_security_restrictions
self.scm_ip_security_restrictions_use_main = scm_ip_security_restrictions_use_main
self.http20_enabled = http20_enabled
self.min_tls_version = min_tls_version
self.scm_min_tls_version = scm_min_tls_version
self.ftps_state = ftps_state
self.pre_warmed_instance_count = pre_warmed_instance_count
self.health_check_path = health_check_path
class SiteConfigResource(ProxyOnlyResource):
"""Web app configuration ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param number_of_workers: Number of workers.
:type number_of_workers: int
:param default_documents: Default documents.
:type default_documents: list[str]
:param net_framework_version: .NET Framework version.
:type net_framework_version: str
:param php_version: Version of PHP.
:type php_version: str
:param python_version: Version of Python.
:type python_version: str
:param node_version: Version of Node.js.
:type node_version: str
:param power_shell_version: Version of PowerShell.
:type power_shell_version: str
:param linux_fx_version: Linux App Framework and version.
:type linux_fx_version: str
:param windows_fx_version: Xenon App Framework and version.
:type windows_fx_version: str
:param request_tracing_enabled: :code:`<code>true</code>` if request tracing is enabled;
otherwise, :code:`<code>false</code>`.
:type request_tracing_enabled: bool
:param request_tracing_expiration_time: Request tracing expiration time.
:type request_tracing_expiration_time: ~datetime.datetime
:param remote_debugging_enabled: :code:`<code>true</code>` if remote debugging is enabled;
otherwise, :code:`<code>false</code>`.
:type remote_debugging_enabled: bool
:param remote_debugging_version: Remote debugging version.
:type remote_debugging_version: str
:param http_logging_enabled: :code:`<code>true</code>` if HTTP logging is enabled; otherwise,
:code:`<code>false</code>`.
:type http_logging_enabled: bool
:param acr_use_managed_identity_creds: Flag to use Managed Identity Creds for ACR pull.
:type acr_use_managed_identity_creds: bool
:param acr_user_managed_identity_id: If using user managed identity, the user managed identity
ClientId.
:type acr_user_managed_identity_id: str
:param logs_directory_size_limit: HTTP logs directory size limit.
:type logs_directory_size_limit: int
:param detailed_error_logging_enabled: :code:`<code>true</code>` if detailed error logging is
enabled; otherwise, :code:`<code>false</code>`.
:type detailed_error_logging_enabled: bool
:param publishing_username: Publishing user name.
:type publishing_username: str
:param app_settings: Application settings.
:type app_settings: list[~azure.mgmt.web.v2020_06_01.models.NameValuePair]
:param connection_strings: Connection strings.
:type connection_strings: list[~azure.mgmt.web.v2020_06_01.models.ConnStringInfo]
:ivar machine_key: Site MachineKey.
:vartype machine_key: ~azure.mgmt.web.v2020_06_01.models.SiteMachineKey
:param handler_mappings: Handler mappings.
:type handler_mappings: list[~azure.mgmt.web.v2020_06_01.models.HandlerMapping]
:param document_root: Document root.
:type document_root: str
:param scm_type: SCM type. Possible values include: "None", "Dropbox", "Tfs", "LocalGit",
"GitHub", "CodePlexGit", "CodePlexHg", "BitbucketGit", "BitbucketHg", "ExternalGit",
"ExternalHg", "OneDrive", "VSO", "VSTSRM".
:type scm_type: str or ~azure.mgmt.web.v2020_06_01.models.ScmType
:param use32_bit_worker_process: :code:`<code>true</code>` to use 32-bit worker process;
otherwise, :code:`<code>false</code>`.
:type use32_bit_worker_process: bool
:param web_sockets_enabled: :code:`<code>true</code>` if WebSocket is enabled; otherwise,
:code:`<code>false</code>`.
:type web_sockets_enabled: bool
:param always_on: :code:`<code>true</code>` if Always On is enabled; otherwise,
:code:`<code>false</code>`.
:type always_on: bool
:param java_version: Java version.
:type java_version: str
:param java_container: Java container.
:type java_container: str
:param java_container_version: Java container version.
:type java_container_version: str
:param app_command_line: App command line to launch.
:type app_command_line: str
:param managed_pipeline_mode: Managed pipeline mode. Possible values include: "Integrated",
"Classic".
:type managed_pipeline_mode: str or ~azure.mgmt.web.v2020_06_01.models.ManagedPipelineMode
:param virtual_applications: Virtual applications.
:type virtual_applications: list[~azure.mgmt.web.v2020_06_01.models.VirtualApplication]
:param load_balancing: Site load balancing. Possible values include: "WeightedRoundRobin",
"LeastRequests", "LeastResponseTime", "WeightedTotalTraffic", "RequestHash".
:type load_balancing: str or ~azure.mgmt.web.v2020_06_01.models.SiteLoadBalancing
:param experiments: This is work around for polymorphic types.
:type experiments: ~azure.mgmt.web.v2020_06_01.models.Experiments
:param limits: Site limits.
:type limits: ~azure.mgmt.web.v2020_06_01.models.SiteLimits
:param auto_heal_enabled: :code:`<code>true</code>` if Auto Heal is enabled; otherwise,
:code:`<code>false</code>`.
:type auto_heal_enabled: bool
:param auto_heal_rules: Auto Heal rules.
:type auto_heal_rules: ~azure.mgmt.web.v2020_06_01.models.AutoHealRules
:param tracing_options: Tracing options.
:type tracing_options: str
:param vnet_name: Virtual Network name.
:type vnet_name: str
:param vnet_route_all_enabled: Virtual Network Route All enabled. This causes all outbound
traffic to have Virtual Network Security Groups and User Defined Routes applied.
:type vnet_route_all_enabled: bool
:param vnet_private_ports_count: The number of private ports assigned to this app. These will
be assigned dynamically on runtime.
:type vnet_private_ports_count: int
:param cors: Cross-Origin Resource Sharing (CORS) settings.
:type cors: ~azure.mgmt.web.v2020_06_01.models.CorsSettings
:param push: Push endpoint settings.
:type push: ~azure.mgmt.web.v2020_06_01.models.PushSettings
:param api_definition: Information about the formal API definition for the app.
:type api_definition: ~azure.mgmt.web.v2020_06_01.models.ApiDefinitionInfo
:param api_management_config: Azure API management settings linked to the app.
:type api_management_config: ~azure.mgmt.web.v2020_06_01.models.ApiManagementConfig
:param auto_swap_slot_name: Auto-swap slot name.
:type auto_swap_slot_name: str
:param local_my_sql_enabled: :code:`<code>true</code>` to enable local MySQL; otherwise,
:code:`<code>false</code>`.
:type local_my_sql_enabled: bool
:param managed_service_identity_id: Managed Service Identity Id.
:type managed_service_identity_id: int
:param x_managed_service_identity_id: Explicit Managed Service Identity Id.
:type x_managed_service_identity_id: int
:param ip_security_restrictions: IP security restrictions for main.
:type ip_security_restrictions: list[~azure.mgmt.web.v2020_06_01.models.IpSecurityRestriction]
:param scm_ip_security_restrictions: IP security restrictions for scm.
:type scm_ip_security_restrictions:
list[~azure.mgmt.web.v2020_06_01.models.IpSecurityRestriction]
:param scm_ip_security_restrictions_use_main: IP security restrictions for scm to use main.
:type scm_ip_security_restrictions_use_main: bool
:param http20_enabled: Http20Enabled: configures a web site to allow clients to connect over
http2.0.
:type http20_enabled: bool
:param min_tls_version: MinTlsVersion: configures the minimum version of TLS required for SSL
requests. Possible values include: "1.0", "1.1", "1.2".
:type min_tls_version: str or ~azure.mgmt.web.v2020_06_01.models.SupportedTlsVersions
:param scm_min_tls_version: ScmMinTlsVersion: configures the minimum version of TLS required
for SSL requests for SCM site. Possible values include: "1.0", "1.1", "1.2".
:type scm_min_tls_version: str or ~azure.mgmt.web.v2020_06_01.models.SupportedTlsVersions
:param ftps_state: State of FTP / FTPS service. Possible values include: "AllAllowed",
"FtpsOnly", "Disabled".
:type ftps_state: str or ~azure.mgmt.web.v2020_06_01.models.FtpsState
:param pre_warmed_instance_count: Number of preWarmed instances.
This setting only applies to the Consumption and Elastic Plans.
:type pre_warmed_instance_count: int
:param health_check_path: Health check path.
:type health_check_path: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'machine_key': {'readonly': True},
'pre_warmed_instance_count': {'maximum': 10, 'minimum': 0},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'number_of_workers': {'key': 'properties.numberOfWorkers', 'type': 'int'},
'default_documents': {'key': 'properties.defaultDocuments', 'type': '[str]'},
'net_framework_version': {'key': 'properties.netFrameworkVersion', 'type': 'str'},
'php_version': {'key': 'properties.phpVersion', 'type': 'str'},
'python_version': {'key': 'properties.pythonVersion', 'type': 'str'},
'node_version': {'key': 'properties.nodeVersion', 'type': 'str'},
'power_shell_version': {'key': 'properties.powerShellVersion', 'type': 'str'},
'linux_fx_version': {'key': 'properties.linuxFxVersion', 'type': 'str'},
'windows_fx_version': {'key': 'properties.windowsFxVersion', 'type': 'str'},
'request_tracing_enabled': {'key': 'properties.requestTracingEnabled', 'type': 'bool'},
'request_tracing_expiration_time': {'key': 'properties.requestTracingExpirationTime', 'type': 'iso-8601'},
'remote_debugging_enabled': {'key': 'properties.remoteDebuggingEnabled', 'type': 'bool'},
'remote_debugging_version': {'key': 'properties.remoteDebuggingVersion', 'type': 'str'},
'http_logging_enabled': {'key': 'properties.httpLoggingEnabled', 'type': 'bool'},
'acr_use_managed_identity_creds': {'key': 'properties.acrUseManagedIdentityCreds', 'type': 'bool'},
'acr_user_managed_identity_id': {'key': 'properties.acrUserManagedIdentityID', 'type': 'str'},
'logs_directory_size_limit': {'key': 'properties.logsDirectorySizeLimit', 'type': 'int'},
'detailed_error_logging_enabled': {'key': 'properties.detailedErrorLoggingEnabled', 'type': 'bool'},
'publishing_username': {'key': 'properties.publishingUsername', 'type': 'str'},
'app_settings': {'key': 'properties.appSettings', 'type': '[NameValuePair]'},
'connection_strings': {'key': 'properties.connectionStrings', 'type': '[ConnStringInfo]'},
'machine_key': {'key': 'properties.machineKey', 'type': 'SiteMachineKey'},
'handler_mappings': {'key': 'properties.handlerMappings', 'type': '[HandlerMapping]'},
'document_root': {'key': 'properties.documentRoot', 'type': 'str'},
'scm_type': {'key': 'properties.scmType', 'type': 'str'},
'use32_bit_worker_process': {'key': 'properties.use32BitWorkerProcess', 'type': 'bool'},
'web_sockets_enabled': {'key': 'properties.webSocketsEnabled', 'type': 'bool'},
'always_on': {'key': 'properties.alwaysOn', 'type': 'bool'},
'java_version': {'key': 'properties.javaVersion', 'type': 'str'},
'java_container': {'key': 'properties.javaContainer', 'type': 'str'},
'java_container_version': {'key': 'properties.javaContainerVersion', 'type': 'str'},
'app_command_line': {'key': 'properties.appCommandLine', 'type': 'str'},
'managed_pipeline_mode': {'key': 'properties.managedPipelineMode', 'type': 'str'},
'virtual_applications': {'key': 'properties.virtualApplications', 'type': '[VirtualApplication]'},
'load_balancing': {'key': 'properties.loadBalancing', 'type': 'str'},
'experiments': {'key': 'properties.experiments', 'type': 'Experiments'},
'limits': {'key': 'properties.limits', 'type': 'SiteLimits'},
'auto_heal_enabled': {'key': 'properties.autoHealEnabled', 'type': 'bool'},
'auto_heal_rules': {'key': 'properties.autoHealRules', 'type': 'AutoHealRules'},
'tracing_options': {'key': 'properties.tracingOptions', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vnet_route_all_enabled': {'key': 'properties.vnetRouteAllEnabled', 'type': 'bool'},
'vnet_private_ports_count': {'key': 'properties.vnetPrivatePortsCount', 'type': 'int'},
'cors': {'key': 'properties.cors', 'type': 'CorsSettings'},
'push': {'key': 'properties.push', 'type': 'PushSettings'},
'api_definition': {'key': 'properties.apiDefinition', 'type': 'ApiDefinitionInfo'},
'api_management_config': {'key': 'properties.apiManagementConfig', 'type': 'ApiManagementConfig'},
'auto_swap_slot_name': {'key': 'properties.autoSwapSlotName', 'type': 'str'},
'local_my_sql_enabled': {'key': 'properties.localMySqlEnabled', 'type': 'bool'},
'managed_service_identity_id': {'key': 'properties.managedServiceIdentityId', 'type': 'int'},
'x_managed_service_identity_id': {'key': 'properties.xManagedServiceIdentityId', 'type': 'int'},
'ip_security_restrictions': {'key': 'properties.ipSecurityRestrictions', 'type': '[IpSecurityRestriction]'},
'scm_ip_security_restrictions': {'key': 'properties.scmIpSecurityRestrictions', 'type': '[IpSecurityRestriction]'},
'scm_ip_security_restrictions_use_main': {'key': 'properties.scmIpSecurityRestrictionsUseMain', 'type': 'bool'},
'http20_enabled': {'key': 'properties.http20Enabled', 'type': 'bool'},
'min_tls_version': {'key': 'properties.minTlsVersion', 'type': 'str'},
'scm_min_tls_version': {'key': 'properties.scmMinTlsVersion', 'type': 'str'},
'ftps_state': {'key': 'properties.ftpsState', 'type': 'str'},
'pre_warmed_instance_count': {'key': 'properties.preWarmedInstanceCount', 'type': 'int'},
'health_check_path': {'key': 'properties.healthCheckPath', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
number_of_workers: Optional[int] = None,
default_documents: Optional[List[str]] = None,
net_framework_version: Optional[str] = "v4.6",
php_version: Optional[str] = None,
python_version: Optional[str] = None,
node_version: Optional[str] = None,
power_shell_version: Optional[str] = None,
linux_fx_version: Optional[str] = None,
windows_fx_version: Optional[str] = None,
request_tracing_enabled: Optional[bool] = None,
request_tracing_expiration_time: Optional[datetime.datetime] = None,
remote_debugging_enabled: Optional[bool] = None,
remote_debugging_version: Optional[str] = None,
http_logging_enabled: Optional[bool] = None,
acr_use_managed_identity_creds: Optional[bool] = None,
acr_user_managed_identity_id: Optional[str] = None,
logs_directory_size_limit: Optional[int] = None,
detailed_error_logging_enabled: Optional[bool] = None,
publishing_username: Optional[str] = None,
app_settings: Optional[List["NameValuePair"]] = None,
connection_strings: Optional[List["ConnStringInfo"]] = None,
handler_mappings: Optional[List["HandlerMapping"]] = None,
document_root: Optional[str] = None,
scm_type: Optional[Union[str, "ScmType"]] = None,
use32_bit_worker_process: Optional[bool] = None,
web_sockets_enabled: Optional[bool] = None,
always_on: Optional[bool] = None,
java_version: Optional[str] = None,
java_container: Optional[str] = None,
java_container_version: Optional[str] = None,
app_command_line: Optional[str] = None,
managed_pipeline_mode: Optional[Union[str, "ManagedPipelineMode"]] = None,
virtual_applications: Optional[List["VirtualApplication"]] = None,
load_balancing: Optional[Union[str, "SiteLoadBalancing"]] = None,
experiments: Optional["Experiments"] = None,
limits: Optional["SiteLimits"] = None,
auto_heal_enabled: Optional[bool] = None,
auto_heal_rules: Optional["AutoHealRules"] = None,
tracing_options: Optional[str] = None,
vnet_name: Optional[str] = None,
vnet_route_all_enabled: Optional[bool] = None,
vnet_private_ports_count: Optional[int] = None,
cors: Optional["CorsSettings"] = None,
push: Optional["PushSettings"] = None,
api_definition: Optional["ApiDefinitionInfo"] = None,
api_management_config: Optional["ApiManagementConfig"] = None,
auto_swap_slot_name: Optional[str] = None,
local_my_sql_enabled: Optional[bool] = False,
managed_service_identity_id: Optional[int] = None,
x_managed_service_identity_id: Optional[int] = None,
ip_security_restrictions: Optional[List["IpSecurityRestriction"]] = None,
scm_ip_security_restrictions: Optional[List["IpSecurityRestriction"]] = None,
scm_ip_security_restrictions_use_main: Optional[bool] = None,
http20_enabled: Optional[bool] = True,
min_tls_version: Optional[Union[str, "SupportedTlsVersions"]] = None,
scm_min_tls_version: Optional[Union[str, "SupportedTlsVersions"]] = None,
ftps_state: Optional[Union[str, "FtpsState"]] = None,
pre_warmed_instance_count: Optional[int] = None,
health_check_path: Optional[str] = None,
**kwargs
):
super(SiteConfigResource, self).__init__(kind=kind, **kwargs)
self.number_of_workers = number_of_workers
self.default_documents = default_documents
self.net_framework_version = net_framework_version
self.php_version = php_version
self.python_version = python_version
self.node_version = node_version
self.power_shell_version = power_shell_version
self.linux_fx_version = linux_fx_version
self.windows_fx_version = windows_fx_version
self.request_tracing_enabled = request_tracing_enabled
self.request_tracing_expiration_time = request_tracing_expiration_time
self.remote_debugging_enabled = remote_debugging_enabled
self.remote_debugging_version = remote_debugging_version
self.http_logging_enabled = http_logging_enabled
self.acr_use_managed_identity_creds = acr_use_managed_identity_creds
self.acr_user_managed_identity_id = acr_user_managed_identity_id
self.logs_directory_size_limit = logs_directory_size_limit
self.detailed_error_logging_enabled = detailed_error_logging_enabled
self.publishing_username = publishing_username
self.app_settings = app_settings
self.connection_strings = connection_strings
self.machine_key = None
self.handler_mappings = handler_mappings
self.document_root = document_root
self.scm_type = scm_type
self.use32_bit_worker_process = use32_bit_worker_process
self.web_sockets_enabled = web_sockets_enabled
self.always_on = always_on
self.java_version = java_version
self.java_container = java_container
self.java_container_version = java_container_version
self.app_command_line = app_command_line
self.managed_pipeline_mode = managed_pipeline_mode
self.virtual_applications = virtual_applications
self.load_balancing = load_balancing
self.experiments = experiments
self.limits = limits
self.auto_heal_enabled = auto_heal_enabled
self.auto_heal_rules = auto_heal_rules
self.tracing_options = tracing_options
self.vnet_name = vnet_name
self.vnet_route_all_enabled = vnet_route_all_enabled
self.vnet_private_ports_count = vnet_private_ports_count
self.cors = cors
self.push = push
self.api_definition = api_definition
self.api_management_config = api_management_config
self.auto_swap_slot_name = auto_swap_slot_name
self.local_my_sql_enabled = local_my_sql_enabled
self.managed_service_identity_id = managed_service_identity_id
self.x_managed_service_identity_id = x_managed_service_identity_id
self.ip_security_restrictions = ip_security_restrictions
self.scm_ip_security_restrictions = scm_ip_security_restrictions
self.scm_ip_security_restrictions_use_main = scm_ip_security_restrictions_use_main
self.http20_enabled = http20_enabled
self.min_tls_version = min_tls_version
self.scm_min_tls_version = scm_min_tls_version
self.ftps_state = ftps_state
self.pre_warmed_instance_count = pre_warmed_instance_count
self.health_check_path = health_check_path
class SiteConfigResourceCollection(msrest.serialization.Model):
"""Collection of site configurations.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.SiteConfigResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SiteConfigResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SiteConfigResource"],
**kwargs
):
super(SiteConfigResourceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SiteConfigurationSnapshotInfo(ProxyOnlyResource):
"""A snapshot of a web app configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar time: The time the snapshot was taken.
:vartype time: ~datetime.datetime
:ivar snapshot_id: The id of the snapshot.
:vartype snapshot_id: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'time': {'readonly': True},
'snapshot_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'time': {'key': 'properties.time', 'type': 'iso-8601'},
'snapshot_id': {'key': 'properties.snapshotId', 'type': 'int'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(SiteConfigurationSnapshotInfo, self).__init__(kind=kind, **kwargs)
self.time = None
self.snapshot_id = None
class SiteConfigurationSnapshotInfoCollection(msrest.serialization.Model):
"""Collection of metadata for the app configuration snapshots that can be restored.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.SiteConfigurationSnapshotInfo]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SiteConfigurationSnapshotInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SiteConfigurationSnapshotInfo"],
**kwargs
):
super(SiteConfigurationSnapshotInfoCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SiteExtensionInfo(ProxyOnlyResource):
"""Site Extension Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param extension_id: Site extension ID.
:type extension_id: str
:param title:
:type title: str
:param extension_type: Site extension type. Possible values include: "Gallery", "WebRoot".
:type extension_type: str or ~azure.mgmt.web.v2020_06_01.models.SiteExtensionType
:param summary: Summary description.
:type summary: str
:param description: Detailed description.
:type description: str
:param version: Version information.
:type version: str
:param extension_url: Extension URL.
:type extension_url: str
:param project_url: Project URL.
:type project_url: str
:param icon_url: Icon URL.
:type icon_url: str
:param license_url: License URL.
:type license_url: str
:param feed_url: Feed URL.
:type feed_url: str
:param authors: List of authors.
:type authors: list[str]
:param installer_command_line_params: Installer command line parameters.
:type installer_command_line_params: str
:param published_date_time: Published timestamp.
:type published_date_time: ~datetime.datetime
:param download_count: Count of downloads.
:type download_count: int
:param local_is_latest_version: :code:`<code>true</code>` if the local version is the latest
version; :code:`<code>false</code>` otherwise.
:type local_is_latest_version: bool
:param local_path: Local path.
:type local_path: str
:param installed_date_time: Installed timestamp.
:type installed_date_time: ~datetime.datetime
:param provisioning_state: Provisioning state.
:type provisioning_state: str
:param comment: Site Extension comment.
:type comment: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'extension_id': {'key': 'properties.extension_id', 'type': 'str'},
'title': {'key': 'properties.title', 'type': 'str'},
'extension_type': {'key': 'properties.extension_type', 'type': 'str'},
'summary': {'key': 'properties.summary', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'extension_url': {'key': 'properties.extension_url', 'type': 'str'},
'project_url': {'key': 'properties.project_url', 'type': 'str'},
'icon_url': {'key': 'properties.icon_url', 'type': 'str'},
'license_url': {'key': 'properties.license_url', 'type': 'str'},
'feed_url': {'key': 'properties.feed_url', 'type': 'str'},
'authors': {'key': 'properties.authors', 'type': '[str]'},
'installer_command_line_params': {'key': 'properties.installer_command_line_params', 'type': 'str'},
'published_date_time': {'key': 'properties.published_date_time', 'type': 'iso-8601'},
'download_count': {'key': 'properties.download_count', 'type': 'int'},
'local_is_latest_version': {'key': 'properties.local_is_latest_version', 'type': 'bool'},
'local_path': {'key': 'properties.local_path', 'type': 'str'},
'installed_date_time': {'key': 'properties.installed_date_time', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'comment': {'key': 'properties.comment', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
extension_id: Optional[str] = None,
title: Optional[str] = None,
extension_type: Optional[Union[str, "SiteExtensionType"]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
version: Optional[str] = None,
extension_url: Optional[str] = None,
project_url: Optional[str] = None,
icon_url: Optional[str] = None,
license_url: Optional[str] = None,
feed_url: Optional[str] = None,
authors: Optional[List[str]] = None,
installer_command_line_params: Optional[str] = None,
published_date_time: Optional[datetime.datetime] = None,
download_count: Optional[int] = None,
local_is_latest_version: Optional[bool] = None,
local_path: Optional[str] = None,
installed_date_time: Optional[datetime.datetime] = None,
provisioning_state: Optional[str] = None,
comment: Optional[str] = None,
**kwargs
):
super(SiteExtensionInfo, self).__init__(kind=kind, **kwargs)
self.extension_id = extension_id
self.title = title
self.extension_type = extension_type
self.summary = summary
self.description = description
self.version = version
self.extension_url = extension_url
self.project_url = project_url
self.icon_url = icon_url
self.license_url = license_url
self.feed_url = feed_url
self.authors = authors
self.installer_command_line_params = installer_command_line_params
self.published_date_time = published_date_time
self.download_count = download_count
self.local_is_latest_version = local_is_latest_version
self.local_path = local_path
self.installed_date_time = installed_date_time
self.provisioning_state = provisioning_state
self.comment = comment
class SiteExtensionInfoCollection(msrest.serialization.Model):
"""Collection of Kudu site extension information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.SiteExtensionInfo]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SiteExtensionInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SiteExtensionInfo"],
**kwargs
):
super(SiteExtensionInfoCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SiteLimits(msrest.serialization.Model):
"""Metric limits set on an app.
:param max_percentage_cpu: Maximum allowed CPU usage percentage.
:type max_percentage_cpu: float
:param max_memory_in_mb: Maximum allowed memory usage in MB.
:type max_memory_in_mb: long
:param max_disk_size_in_mb: Maximum allowed disk size usage in MB.
:type max_disk_size_in_mb: long
"""
_attribute_map = {
'max_percentage_cpu': {'key': 'maxPercentageCpu', 'type': 'float'},
'max_memory_in_mb': {'key': 'maxMemoryInMb', 'type': 'long'},
'max_disk_size_in_mb': {'key': 'maxDiskSizeInMb', 'type': 'long'},
}
def __init__(
self,
*,
max_percentage_cpu: Optional[float] = None,
max_memory_in_mb: Optional[int] = None,
max_disk_size_in_mb: Optional[int] = None,
**kwargs
):
super(SiteLimits, self).__init__(**kwargs)
self.max_percentage_cpu = max_percentage_cpu
self.max_memory_in_mb = max_memory_in_mb
self.max_disk_size_in_mb = max_disk_size_in_mb
class SiteLogsConfig(ProxyOnlyResource):
"""Configuration of App Service site logs.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param application_logs: Application logs configuration.
:type application_logs: ~azure.mgmt.web.v2020_06_01.models.ApplicationLogsConfig
:param http_logs: HTTP logs configuration.
:type http_logs: ~azure.mgmt.web.v2020_06_01.models.HttpLogsConfig
:param failed_requests_tracing: Failed requests tracing configuration.
:type failed_requests_tracing: ~azure.mgmt.web.v2020_06_01.models.EnabledConfig
:param detailed_error_messages: Detailed error messages configuration.
:type detailed_error_messages: ~azure.mgmt.web.v2020_06_01.models.EnabledConfig
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'application_logs': {'key': 'properties.applicationLogs', 'type': 'ApplicationLogsConfig'},
'http_logs': {'key': 'properties.httpLogs', 'type': 'HttpLogsConfig'},
'failed_requests_tracing': {'key': 'properties.failedRequestsTracing', 'type': 'EnabledConfig'},
'detailed_error_messages': {'key': 'properties.detailedErrorMessages', 'type': 'EnabledConfig'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
application_logs: Optional["ApplicationLogsConfig"] = None,
http_logs: Optional["HttpLogsConfig"] = None,
failed_requests_tracing: Optional["EnabledConfig"] = None,
detailed_error_messages: Optional["EnabledConfig"] = None,
**kwargs
):
super(SiteLogsConfig, self).__init__(kind=kind, **kwargs)
self.application_logs = application_logs
self.http_logs = http_logs
self.failed_requests_tracing = failed_requests_tracing
self.detailed_error_messages = detailed_error_messages
class SiteMachineKey(msrest.serialization.Model):
"""MachineKey of an app.
:param validation: MachineKey validation.
:type validation: str
:param validation_key: Validation key.
:type validation_key: str
:param decryption: Algorithm used for decryption.
:type decryption: str
:param decryption_key: Decryption key.
:type decryption_key: str
"""
_attribute_map = {
'validation': {'key': 'validation', 'type': 'str'},
'validation_key': {'key': 'validationKey', 'type': 'str'},
'decryption': {'key': 'decryption', 'type': 'str'},
'decryption_key': {'key': 'decryptionKey', 'type': 'str'},
}
def __init__(
self,
*,
validation: Optional[str] = None,
validation_key: Optional[str] = None,
decryption: Optional[str] = None,
decryption_key: Optional[str] = None,
**kwargs
):
super(SiteMachineKey, self).__init__(**kwargs)
self.validation = validation
self.validation_key = validation_key
self.decryption = decryption
self.decryption_key = decryption_key
class SitePatchResource(ProxyOnlyResource):
"""ARM resource for a site.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param identity: Managed service identity.
:type identity: ~azure.mgmt.web.v2020_06_01.models.ManagedServiceIdentity
:ivar state: Current state of the app.
:vartype state: str
:ivar host_names: Hostnames associated with the app.
:vartype host_names: list[str]
:ivar repository_site_name: Name of the repository site.
:vartype repository_site_name: str
:ivar usage_state: State indicating whether the app has exceeded its quota usage. Read-only.
Possible values include: "Normal", "Exceeded".
:vartype usage_state: str or ~azure.mgmt.web.v2020_06_01.models.UsageState
:param enabled: :code:`<code>true</code>` if the app is enabled; otherwise,
:code:`<code>false</code>`. Setting this value to false disables the app (takes the app
offline).
:type enabled: bool
:ivar enabled_host_names: Enabled hostnames for the app.Hostnames need to be assigned (see
HostNames) AND enabled. Otherwise,
the app is not served on those hostnames.
:vartype enabled_host_names: list[str]
:ivar availability_state: Management information availability state for the app. Possible
values include: "Normal", "Limited", "DisasterRecoveryMode".
:vartype availability_state: str or ~azure.mgmt.web.v2020_06_01.models.SiteAvailabilityState
:param host_name_ssl_states: Hostname SSL states are used to manage the SSL bindings for app's
hostnames.
:type host_name_ssl_states: list[~azure.mgmt.web.v2020_06_01.models.HostNameSslState]
:param server_farm_id: Resource ID of the associated App Service plan, formatted as:
"/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
:type server_farm_id: str
:param reserved: :code:`<code>true</code>` if reserved; otherwise, :code:`<code>false</code>`.
:type reserved: bool
:param is_xenon: Obsolete: Hyper-V sandbox.
:type is_xenon: bool
:param hyper_v: Hyper-V sandbox.
:type hyper_v: bool
:ivar last_modified_time_utc: Last time the app was modified, in UTC. Read-only.
:vartype last_modified_time_utc: ~datetime.datetime
:param site_config: Configuration of the app.
:type site_config: ~azure.mgmt.web.v2020_06_01.models.SiteConfig
:ivar traffic_manager_host_names: Azure Traffic Manager hostnames associated with the app.
Read-only.
:vartype traffic_manager_host_names: list[str]
:param scm_site_also_stopped: :code:`<code>true</code>` to stop SCM (KUDU) site when the app is
stopped; otherwise, :code:`<code>false</code>`. The default is :code:`<code>false</code>`.
:type scm_site_also_stopped: bool
:ivar target_swap_slot: Specifies which deployment slot this app will swap into. Read-only.
:vartype target_swap_slot: str
:param hosting_environment_profile: App Service Environment to use for the app.
:type hosting_environment_profile: ~azure.mgmt.web.v2020_06_01.models.HostingEnvironmentProfile
:param client_affinity_enabled: :code:`<code>true</code>` to enable client affinity;
:code:`<code>false</code>` to stop sending session affinity cookies, which route client
requests in the same session to the same instance. Default is :code:`<code>true</code>`.
:type client_affinity_enabled: bool
:param client_cert_enabled: :code:`<code>true</code>` to enable client certificate
authentication (TLS mutual authentication); otherwise, :code:`<code>false</code>`. Default is
:code:`<code>false</code>`.
:type client_cert_enabled: bool
:param client_cert_mode: This composes with ClientCertEnabled setting.
* ClientCertEnabled: false means ClientCert is ignored.
* ClientCertEnabled: true and ClientCertMode: Required means ClientCert is required.
* ClientCertEnabled: true and ClientCertMode: Optional means ClientCert is optional or
accepted. Possible values include: "Required", "Optional".
:type client_cert_mode: str or ~azure.mgmt.web.v2020_06_01.models.ClientCertMode
:param client_cert_exclusion_paths: client certificate authentication comma-separated exclusion
paths.
:type client_cert_exclusion_paths: str
:param host_names_disabled: :code:`<code>true</code>` to disable the public hostnames of the
app; otherwise, :code:`<code>false</code>`.
If :code:`<code>true</code>`, the app is only accessible via API management process.
:type host_names_disabled: bool
:param custom_domain_verification_id: Unique identifier that verifies the custom domains
assigned to the app. Customer will add this id to a txt record for verification.
:type custom_domain_verification_id: str
:ivar outbound_ip_addresses: List of IP addresses that the app uses for outbound connections
(e.g. database access). Includes VIPs from tenants that site can be hosted with current
settings. Read-only.
:vartype outbound_ip_addresses: str
:ivar possible_outbound_ip_addresses: List of IP addresses that the app uses for outbound
connections (e.g. database access). Includes VIPs from all tenants except dataComponent.
Read-only.
:vartype possible_outbound_ip_addresses: str
:param container_size: Size of the function container.
:type container_size: int
:param daily_memory_time_quota: Maximum allowed daily memory-time quota (applicable on dynamic
apps only).
:type daily_memory_time_quota: int
:ivar suspended_till: App suspended till in case memory-time quota is exceeded.
:vartype suspended_till: ~datetime.datetime
:ivar max_number_of_workers: Maximum number of workers.
This only applies to Functions container.
:vartype max_number_of_workers: int
:param cloning_info: If specified during app creation, the app is cloned from a source app.
:type cloning_info: ~azure.mgmt.web.v2020_06_01.models.CloningInfo
:ivar resource_group: Name of the resource group the app belongs to. Read-only.
:vartype resource_group: str
:ivar is_default_container: :code:`<code>true</code>` if the app is a default container;
otherwise, :code:`<code>false</code>`.
:vartype is_default_container: bool
:ivar default_host_name: Default hostname of the app. Read-only.
:vartype default_host_name: str
:ivar slot_swap_status: Status of the last deployment slot swap operation.
:vartype slot_swap_status: ~azure.mgmt.web.v2020_06_01.models.SlotSwapStatus
:param https_only: HttpsOnly: configures a web site to accept only https requests. Issues
redirect for
http requests.
:type https_only: bool
:param redundancy_mode: Site redundancy mode. Possible values include: "None", "Manual",
"Failover", "ActiveActive", "GeoRedundant".
:type redundancy_mode: str or ~azure.mgmt.web.v2020_06_01.models.RedundancyMode
:ivar in_progress_operation_id: Specifies an operation id if this site has a pending operation.
:vartype in_progress_operation_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'state': {'readonly': True},
'host_names': {'readonly': True},
'repository_site_name': {'readonly': True},
'usage_state': {'readonly': True},
'enabled_host_names': {'readonly': True},
'availability_state': {'readonly': True},
'last_modified_time_utc': {'readonly': True},
'traffic_manager_host_names': {'readonly': True},
'target_swap_slot': {'readonly': True},
'outbound_ip_addresses': {'readonly': True},
'possible_outbound_ip_addresses': {'readonly': True},
'suspended_till': {'readonly': True},
'max_number_of_workers': {'readonly': True},
'resource_group': {'readonly': True},
'is_default_container': {'readonly': True},
'default_host_name': {'readonly': True},
'slot_swap_status': {'readonly': True},
'in_progress_operation_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'state': {'key': 'properties.state', 'type': 'str'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
'repository_site_name': {'key': 'properties.repositorySiteName', 'type': 'str'},
'usage_state': {'key': 'properties.usageState', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'enabled_host_names': {'key': 'properties.enabledHostNames', 'type': '[str]'},
'availability_state': {'key': 'properties.availabilityState', 'type': 'str'},
'host_name_ssl_states': {'key': 'properties.hostNameSslStates', 'type': '[HostNameSslState]'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
'reserved': {'key': 'properties.reserved', 'type': 'bool'},
'is_xenon': {'key': 'properties.isXenon', 'type': 'bool'},
'hyper_v': {'key': 'properties.hyperV', 'type': 'bool'},
'last_modified_time_utc': {'key': 'properties.lastModifiedTimeUtc', 'type': 'iso-8601'},
'site_config': {'key': 'properties.siteConfig', 'type': 'SiteConfig'},
'traffic_manager_host_names': {'key': 'properties.trafficManagerHostNames', 'type': '[str]'},
'scm_site_also_stopped': {'key': 'properties.scmSiteAlsoStopped', 'type': 'bool'},
'target_swap_slot': {'key': 'properties.targetSwapSlot', 'type': 'str'},
'hosting_environment_profile': {'key': 'properties.hostingEnvironmentProfile', 'type': 'HostingEnvironmentProfile'},
'client_affinity_enabled': {'key': 'properties.clientAffinityEnabled', 'type': 'bool'},
'client_cert_enabled': {'key': 'properties.clientCertEnabled', 'type': 'bool'},
'client_cert_mode': {'key': 'properties.clientCertMode', 'type': 'str'},
'client_cert_exclusion_paths': {'key': 'properties.clientCertExclusionPaths', 'type': 'str'},
'host_names_disabled': {'key': 'properties.hostNamesDisabled', 'type': 'bool'},
'custom_domain_verification_id': {'key': 'properties.customDomainVerificationId', 'type': 'str'},
'outbound_ip_addresses': {'key': 'properties.outboundIpAddresses', 'type': 'str'},
'possible_outbound_ip_addresses': {'key': 'properties.possibleOutboundIpAddresses', 'type': 'str'},
'container_size': {'key': 'properties.containerSize', 'type': 'int'},
'daily_memory_time_quota': {'key': 'properties.dailyMemoryTimeQuota', 'type': 'int'},
'suspended_till': {'key': 'properties.suspendedTill', 'type': 'iso-8601'},
'max_number_of_workers': {'key': 'properties.maxNumberOfWorkers', 'type': 'int'},
'cloning_info': {'key': 'properties.cloningInfo', 'type': 'CloningInfo'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'is_default_container': {'key': 'properties.isDefaultContainer', 'type': 'bool'},
'default_host_name': {'key': 'properties.defaultHostName', 'type': 'str'},
'slot_swap_status': {'key': 'properties.slotSwapStatus', 'type': 'SlotSwapStatus'},
'https_only': {'key': 'properties.httpsOnly', 'type': 'bool'},
'redundancy_mode': {'key': 'properties.redundancyMode', 'type': 'str'},
'in_progress_operation_id': {'key': 'properties.inProgressOperationId', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
identity: Optional["ManagedServiceIdentity"] = None,
enabled: Optional[bool] = None,
host_name_ssl_states: Optional[List["HostNameSslState"]] = None,
server_farm_id: Optional[str] = None,
reserved: Optional[bool] = False,
is_xenon: Optional[bool] = False,
hyper_v: Optional[bool] = False,
site_config: Optional["SiteConfig"] = None,
scm_site_also_stopped: Optional[bool] = False,
hosting_environment_profile: Optional["HostingEnvironmentProfile"] = None,
client_affinity_enabled: Optional[bool] = None,
client_cert_enabled: Optional[bool] = None,
client_cert_mode: Optional[Union[str, "ClientCertMode"]] = None,
client_cert_exclusion_paths: Optional[str] = None,
host_names_disabled: Optional[bool] = None,
custom_domain_verification_id: Optional[str] = None,
container_size: Optional[int] = None,
daily_memory_time_quota: Optional[int] = None,
cloning_info: Optional["CloningInfo"] = None,
https_only: Optional[bool] = None,
redundancy_mode: Optional[Union[str, "RedundancyMode"]] = None,
**kwargs
):
super(SitePatchResource, self).__init__(kind=kind, **kwargs)
self.identity = identity
self.state = None
self.host_names = None
self.repository_site_name = None
self.usage_state = None
self.enabled = enabled
self.enabled_host_names = None
self.availability_state = None
self.host_name_ssl_states = host_name_ssl_states
self.server_farm_id = server_farm_id
self.reserved = reserved
self.is_xenon = is_xenon
self.hyper_v = hyper_v
self.last_modified_time_utc = None
self.site_config = site_config
self.traffic_manager_host_names = None
self.scm_site_also_stopped = scm_site_also_stopped
self.target_swap_slot = None
self.hosting_environment_profile = hosting_environment_profile
self.client_affinity_enabled = client_affinity_enabled
self.client_cert_enabled = client_cert_enabled
self.client_cert_mode = client_cert_mode
self.client_cert_exclusion_paths = client_cert_exclusion_paths
self.host_names_disabled = host_names_disabled
self.custom_domain_verification_id = custom_domain_verification_id
self.outbound_ip_addresses = None
self.possible_outbound_ip_addresses = None
self.container_size = container_size
self.daily_memory_time_quota = daily_memory_time_quota
self.suspended_till = None
self.max_number_of_workers = None
self.cloning_info = cloning_info
self.resource_group = None
self.is_default_container = None
self.default_host_name = None
self.slot_swap_status = None
self.https_only = https_only
self.redundancy_mode = redundancy_mode
self.in_progress_operation_id = None
class SitePhpErrorLogFlag(ProxyOnlyResource):
"""Used for getting PHP error logging flag.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param local_log_errors: Local log_errors setting.
:type local_log_errors: str
:param master_log_errors: Master log_errors setting.
:type master_log_errors: str
:param local_log_errors_max_length: Local log_errors_max_len setting.
:type local_log_errors_max_length: str
:param master_log_errors_max_length: Master log_errors_max_len setting.
:type master_log_errors_max_length: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'local_log_errors': {'key': 'properties.localLogErrors', 'type': 'str'},
'master_log_errors': {'key': 'properties.masterLogErrors', 'type': 'str'},
'local_log_errors_max_length': {'key': 'properties.localLogErrorsMaxLength', 'type': 'str'},
'master_log_errors_max_length': {'key': 'properties.masterLogErrorsMaxLength', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
local_log_errors: Optional[str] = None,
master_log_errors: Optional[str] = None,
local_log_errors_max_length: Optional[str] = None,
master_log_errors_max_length: Optional[str] = None,
**kwargs
):
super(SitePhpErrorLogFlag, self).__init__(kind=kind, **kwargs)
self.local_log_errors = local_log_errors
self.master_log_errors = master_log_errors
self.local_log_errors_max_length = local_log_errors_max_length
self.master_log_errors_max_length = master_log_errors_max_length
class SiteSeal(msrest.serialization.Model):
"""Site seal.
All required parameters must be populated in order to send to Azure.
:param html: Required. HTML snippet.
:type html: str
"""
_validation = {
'html': {'required': True},
}
_attribute_map = {
'html': {'key': 'html', 'type': 'str'},
}
def __init__(
self,
*,
html: str,
**kwargs
):
super(SiteSeal, self).__init__(**kwargs)
self.html = html
class SiteSealRequest(msrest.serialization.Model):
"""Site seal request.
:param light_theme: If :code:`<code>true</code>` use the light color theme for site seal;
otherwise, use the default color theme.
:type light_theme: bool
:param locale: Locale of site seal.
:type locale: str
"""
_attribute_map = {
'light_theme': {'key': 'lightTheme', 'type': 'bool'},
'locale': {'key': 'locale', 'type': 'str'},
}
def __init__(
self,
*,
light_theme: Optional[bool] = None,
locale: Optional[str] = None,
**kwargs
):
super(SiteSealRequest, self).__init__(**kwargs)
self.light_theme = light_theme
self.locale = locale
class SiteSourceControl(ProxyOnlyResource):
"""Source control configuration for an app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param repo_url: Repository or source control URL.
:type repo_url: str
:param branch: Name of branch to use for deployment.
:type branch: str
:param is_manual_integration: :code:`<code>true</code>` to limit to manual integration;
:code:`<code>false</code>` to enable continuous integration (which configures webhooks into
online repos like GitHub).
:type is_manual_integration: bool
:param is_git_hub_action: :code:`<code>true</code>` if this is deployed via GitHub action.
:type is_git_hub_action: bool
:param deployment_rollback_enabled: :code:`<code>true</code>` to enable deployment rollback;
otherwise, :code:`<code>false</code>`.
:type deployment_rollback_enabled: bool
:param is_mercurial: :code:`<code>true</code>` for a Mercurial repository;
:code:`<code>false</code>` for a Git repository.
:type is_mercurial: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'repo_url': {'key': 'properties.repoUrl', 'type': 'str'},
'branch': {'key': 'properties.branch', 'type': 'str'},
'is_manual_integration': {'key': 'properties.isManualIntegration', 'type': 'bool'},
'is_git_hub_action': {'key': 'properties.isGitHubAction', 'type': 'bool'},
'deployment_rollback_enabled': {'key': 'properties.deploymentRollbackEnabled', 'type': 'bool'},
'is_mercurial': {'key': 'properties.isMercurial', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
repo_url: Optional[str] = None,
branch: Optional[str] = None,
is_manual_integration: Optional[bool] = None,
is_git_hub_action: Optional[bool] = None,
deployment_rollback_enabled: Optional[bool] = None,
is_mercurial: Optional[bool] = None,
**kwargs
):
super(SiteSourceControl, self).__init__(kind=kind, **kwargs)
self.repo_url = repo_url
self.branch = branch
self.is_manual_integration = is_manual_integration
self.is_git_hub_action = is_git_hub_action
self.deployment_rollback_enabled = deployment_rollback_enabled
self.is_mercurial = is_mercurial
class SkuCapacity(msrest.serialization.Model):
"""Description of the App Service plan scale options.
:param minimum: Minimum number of workers for this App Service plan SKU.
:type minimum: int
:param maximum: Maximum number of workers for this App Service plan SKU.
:type maximum: int
:param default: Default number of workers for this App Service plan SKU.
:type default: int
:param scale_type: Available scale configurations for an App Service plan.
:type scale_type: str
"""
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'int'},
'maximum': {'key': 'maximum', 'type': 'int'},
'default': {'key': 'default', 'type': 'int'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
*,
minimum: Optional[int] = None,
maximum: Optional[int] = None,
default: Optional[int] = None,
scale_type: Optional[str] = None,
**kwargs
):
super(SkuCapacity, self).__init__(**kwargs)
self.minimum = minimum
self.maximum = maximum
self.default = default
self.scale_type = scale_type
class SkuDescription(msrest.serialization.Model):
"""Description of a SKU for a scalable resource.
:param name: Name of the resource SKU.
:type name: str
:param tier: Service tier of the resource SKU.
:type tier: str
:param size: Size specifier of the resource SKU.
:type size: str
:param family: Family code of the resource SKU.
:type family: str
:param capacity: Current number of instances assigned to the resource.
:type capacity: int
:param sku_capacity: Min, max, and default scale values of the SKU.
:type sku_capacity: ~azure.mgmt.web.v2020_06_01.models.SkuCapacity
:param locations: Locations of the SKU.
:type locations: list[str]
:param capabilities: Capabilities of the SKU, e.g., is traffic manager enabled?.
:type capabilities: list[~azure.mgmt.web.v2020_06_01.models.Capability]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
'sku_capacity': {'key': 'skuCapacity', 'type': 'SkuCapacity'},
'locations': {'key': 'locations', 'type': '[str]'},
'capabilities': {'key': 'capabilities', 'type': '[Capability]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
tier: Optional[str] = None,
size: Optional[str] = None,
family: Optional[str] = None,
capacity: Optional[int] = None,
sku_capacity: Optional["SkuCapacity"] = None,
locations: Optional[List[str]] = None,
capabilities: Optional[List["Capability"]] = None,
**kwargs
):
super(SkuDescription, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.size = size
self.family = family
self.capacity = capacity
self.sku_capacity = sku_capacity
self.locations = locations
self.capabilities = capabilities
class SkuInfo(msrest.serialization.Model):
"""SKU discovery information.
:param resource_type: Resource type that this SKU applies to.
:type resource_type: str
:param sku: Name and tier of the SKU.
:type sku: ~azure.mgmt.web.v2020_06_01.models.SkuDescription
:param capacity: Min, max, and default scale values of the SKU.
:type capacity: ~azure.mgmt.web.v2020_06_01.models.SkuCapacity
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'SkuDescription'},
'capacity': {'key': 'capacity', 'type': 'SkuCapacity'},
}
def __init__(
self,
*,
resource_type: Optional[str] = None,
sku: Optional["SkuDescription"] = None,
capacity: Optional["SkuCapacity"] = None,
**kwargs
):
super(SkuInfo, self).__init__(**kwargs)
self.resource_type = resource_type
self.sku = sku
self.capacity = capacity
class SkuInfoCollection(msrest.serialization.Model):
"""Collection of SKU information.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.SkuInfo]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SkuInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SkuInfo"],
**kwargs
):
super(SkuInfoCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SkuInfos(msrest.serialization.Model):
"""Collection of SKU information.
:param resource_type: Resource type that this SKU applies to.
:type resource_type: str
:param skus: List of SKUs the subscription is able to use.
:type skus: list[~azure.mgmt.web.v2020_06_01.models.GlobalCsmSkuDescription]
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'skus': {'key': 'skus', 'type': '[GlobalCsmSkuDescription]'},
}
def __init__(
self,
*,
resource_type: Optional[str] = None,
skus: Optional[List["GlobalCsmSkuDescription"]] = None,
**kwargs
):
super(SkuInfos, self).__init__(**kwargs)
self.resource_type = resource_type
self.skus = skus
class SlotConfigNamesResource(ProxyOnlyResource):
"""Slot Config names azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param connection_string_names: List of connection string names.
:type connection_string_names: list[str]
:param app_setting_names: List of application settings names.
:type app_setting_names: list[str]
:param azure_storage_config_names: List of external Azure storage account identifiers.
:type azure_storage_config_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'connection_string_names': {'key': 'properties.connectionStringNames', 'type': '[str]'},
'app_setting_names': {'key': 'properties.appSettingNames', 'type': '[str]'},
'azure_storage_config_names': {'key': 'properties.azureStorageConfigNames', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
connection_string_names: Optional[List[str]] = None,
app_setting_names: Optional[List[str]] = None,
azure_storage_config_names: Optional[List[str]] = None,
**kwargs
):
super(SlotConfigNamesResource, self).__init__(kind=kind, **kwargs)
self.connection_string_names = connection_string_names
self.app_setting_names = app_setting_names
self.azure_storage_config_names = azure_storage_config_names
class SlotDifference(ProxyOnlyResource):
"""A setting difference between two deployment slots of an app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar level: Level of the difference: Information, Warning or Error.
:vartype level: str
:ivar setting_type: The type of the setting: General, AppSetting or ConnectionString.
:vartype setting_type: str
:ivar diff_rule: Rule that describes how to process the setting difference during a slot swap.
:vartype diff_rule: str
:ivar setting_name: Name of the setting.
:vartype setting_name: str
:ivar value_in_current_slot: Value of the setting in the current slot.
:vartype value_in_current_slot: str
:ivar value_in_target_slot: Value of the setting in the target slot.
:vartype value_in_target_slot: str
:ivar description: Description of the setting difference.
:vartype description: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'level': {'readonly': True},
'setting_type': {'readonly': True},
'diff_rule': {'readonly': True},
'setting_name': {'readonly': True},
'value_in_current_slot': {'readonly': True},
'value_in_target_slot': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'level': {'key': 'properties.level', 'type': 'str'},
'setting_type': {'key': 'properties.settingType', 'type': 'str'},
'diff_rule': {'key': 'properties.diffRule', 'type': 'str'},
'setting_name': {'key': 'properties.settingName', 'type': 'str'},
'value_in_current_slot': {'key': 'properties.valueInCurrentSlot', 'type': 'str'},
'value_in_target_slot': {'key': 'properties.valueInTargetSlot', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(SlotDifference, self).__init__(kind=kind, **kwargs)
self.level = None
self.setting_type = None
self.diff_rule = None
self.setting_name = None
self.value_in_current_slot = None
self.value_in_target_slot = None
self.description = None
class SlotDifferenceCollection(msrest.serialization.Model):
"""Collection of slot differences.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.SlotDifference]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SlotDifference]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SlotDifference"],
**kwargs
):
super(SlotDifferenceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SlotSwapStatus(msrest.serialization.Model):
"""The status of the last successful slot swap operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar timestamp_utc: The time the last successful slot swap completed.
:vartype timestamp_utc: ~datetime.datetime
:ivar source_slot_name: The source slot of the last swap operation.
:vartype source_slot_name: str
:ivar destination_slot_name: The destination slot of the last swap operation.
:vartype destination_slot_name: str
"""
_validation = {
'timestamp_utc': {'readonly': True},
'source_slot_name': {'readonly': True},
'destination_slot_name': {'readonly': True},
}
_attribute_map = {
'timestamp_utc': {'key': 'timestampUtc', 'type': 'iso-8601'},
'source_slot_name': {'key': 'sourceSlotName', 'type': 'str'},
'destination_slot_name': {'key': 'destinationSlotName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SlotSwapStatus, self).__init__(**kwargs)
self.timestamp_utc = None
self.source_slot_name = None
self.destination_slot_name = None
class SlowRequestsBasedTrigger(msrest.serialization.Model):
"""Trigger based on request execution time.
:param time_taken: Time taken.
:type time_taken: str
:param count: Request Count.
:type count: int
:param time_interval: Time interval.
:type time_interval: str
"""
_attribute_map = {
'time_taken': {'key': 'timeTaken', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
'time_interval': {'key': 'timeInterval', 'type': 'str'},
}
def __init__(
self,
*,
time_taken: Optional[str] = None,
count: Optional[int] = None,
time_interval: Optional[str] = None,
**kwargs
):
super(SlowRequestsBasedTrigger, self).__init__(**kwargs)
self.time_taken = time_taken
self.count = count
self.time_interval = time_interval
class Snapshot(ProxyOnlyResource):
"""A snapshot of an app.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar time: The time the snapshot was taken.
:vartype time: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'time': {'key': 'properties.time', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(Snapshot, self).__init__(kind=kind, **kwargs)
self.time = None
class SnapshotCollection(msrest.serialization.Model):
"""Collection of snapshots which can be used to revert an app to a previous time.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.Snapshot]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Snapshot]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Snapshot"],
**kwargs
):
super(SnapshotCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SnapshotRecoverySource(msrest.serialization.Model):
"""Specifies the web app that snapshot contents will be retrieved from.
:param location: Geographical location of the source web app, e.g. SouthEastAsia,
SouthCentralUS.
:type location: str
:param id: ARM resource ID of the source app.
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}
for production slots and
/subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/slots/{slotName}
for other slots.
:type id: str
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
id: Optional[str] = None,
**kwargs
):
super(SnapshotRecoverySource, self).__init__(**kwargs)
self.location = location
self.id = id
class SnapshotRestoreRequest(ProxyOnlyResource):
"""Details about app recovery operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param snapshot_time: Point in time in which the app restore should be done, formatted as a
DateTime string.
:type snapshot_time: str
:param recovery_source: Optional. Specifies the web app that snapshot contents will be
retrieved from.
If empty, the targeted web app will be used as the source.
:type recovery_source: ~azure.mgmt.web.v2020_06_01.models.SnapshotRecoverySource
:param overwrite: If :code:`<code>true</code>` the restore operation can overwrite source app;
otherwise, :code:`<code>false</code>`.
:type overwrite: bool
:param recover_configuration: If true, site configuration, in addition to content, will be
reverted.
:type recover_configuration: bool
:param ignore_conflicting_host_names: If true, custom hostname conflicts will be ignored when
recovering to a target web app.
This setting is only necessary when RecoverConfiguration is enabled.
:type ignore_conflicting_host_names: bool
:param use_dr_secondary: If true, the snapshot is retrieved from DRSecondary endpoint.
:type use_dr_secondary: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'snapshot_time': {'key': 'properties.snapshotTime', 'type': 'str'},
'recovery_source': {'key': 'properties.recoverySource', 'type': 'SnapshotRecoverySource'},
'overwrite': {'key': 'properties.overwrite', 'type': 'bool'},
'recover_configuration': {'key': 'properties.recoverConfiguration', 'type': 'bool'},
'ignore_conflicting_host_names': {'key': 'properties.ignoreConflictingHostNames', 'type': 'bool'},
'use_dr_secondary': {'key': 'properties.useDRSecondary', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
snapshot_time: Optional[str] = None,
recovery_source: Optional["SnapshotRecoverySource"] = None,
overwrite: Optional[bool] = None,
recover_configuration: Optional[bool] = None,
ignore_conflicting_host_names: Optional[bool] = None,
use_dr_secondary: Optional[bool] = None,
**kwargs
):
super(SnapshotRestoreRequest, self).__init__(kind=kind, **kwargs)
self.snapshot_time = snapshot_time
self.recovery_source = recovery_source
self.overwrite = overwrite
self.recover_configuration = recover_configuration
self.ignore_conflicting_host_names = ignore_conflicting_host_names
self.use_dr_secondary = use_dr_secondary
class Solution(msrest.serialization.Model):
"""Class Representing Solution for problems detected.
:param id: Solution Id.
:type id: float
:param display_name: Display Name of the solution.
:type display_name: str
:param order: Order of the solution.
:type order: float
:param description: Description of the solution.
:type description: str
:param type: Type of Solution. Possible values include: "QuickSolution", "DeepInvestigation",
"BestPractices".
:type type: str or ~azure.mgmt.web.v2020_06_01.models.SolutionType
:param data: Solution Data.
:type data: list[list[~azure.mgmt.web.v2020_06_01.models.NameValuePair]]
:param metadata: Solution Metadata.
:type metadata: list[list[~azure.mgmt.web.v2020_06_01.models.NameValuePair]]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'float'},
'display_name': {'key': 'displayName', 'type': 'str'},
'order': {'key': 'order', 'type': 'float'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'data': {'key': 'data', 'type': '[[NameValuePair]]'},
'metadata': {'key': 'metadata', 'type': '[[NameValuePair]]'},
}
def __init__(
self,
*,
id: Optional[float] = None,
display_name: Optional[str] = None,
order: Optional[float] = None,
description: Optional[str] = None,
type: Optional[Union[str, "SolutionType"]] = None,
data: Optional[List[List["NameValuePair"]]] = None,
metadata: Optional[List[List["NameValuePair"]]] = None,
**kwargs
):
super(Solution, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.order = order
self.description = description
self.type = type
self.data = data
self.metadata = metadata
class SourceControl(ProxyOnlyResource):
"""The source control OAuth token.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param token: OAuth access token.
:type token: str
:param token_secret: OAuth access token secret.
:type token_secret: str
:param refresh_token: OAuth refresh token.
:type refresh_token: str
:param expiration_time: OAuth token expiration.
:type expiration_time: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'token': {'key': 'properties.token', 'type': 'str'},
'token_secret': {'key': 'properties.tokenSecret', 'type': 'str'},
'refresh_token': {'key': 'properties.refreshToken', 'type': 'str'},
'expiration_time': {'key': 'properties.expirationTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
token: Optional[str] = None,
token_secret: Optional[str] = None,
refresh_token: Optional[str] = None,
expiration_time: Optional[datetime.datetime] = None,
**kwargs
):
super(SourceControl, self).__init__(kind=kind, **kwargs)
self.token = token
self.token_secret = token_secret
self.refresh_token = refresh_token
self.expiration_time = expiration_time
class SourceControlCollection(msrest.serialization.Model):
"""Collection of source controls.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.SourceControl]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SourceControl]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["SourceControl"],
**kwargs
):
super(SourceControlCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class StackMajorVersion(msrest.serialization.Model):
"""Application stack major version.
:param display_version: Application stack major version (display only).
:type display_version: str
:param runtime_version: Application stack major version (runtime only).
:type runtime_version: str
:param is_default: :code:`<code>true</code>` if this is the default major version; otherwise,
:code:`<code>false</code>`.
:type is_default: bool
:param minor_versions: Minor versions associated with the major version.
:type minor_versions: list[~azure.mgmt.web.v2020_06_01.models.StackMinorVersion]
:param application_insights: :code:`<code>true</code>` if this supports Application Insights;
otherwise, :code:`<code>false</code>`.
:type application_insights: bool
:param is_preview: :code:`<code>true</code>` if this stack is in Preview, otherwise
:code:`<code>false</code>`.
:type is_preview: bool
:param is_deprecated: :code:`<code>true</code>` if this stack has been deprecated, otherwise
:code:`<code>false</code>`.
:type is_deprecated: bool
:param is_hidden: :code:`<code>true</code>` if this stack should be hidden for new customers on
portal, otherwise :code:`<code>false</code>`.
:type is_hidden: bool
"""
_attribute_map = {
'display_version': {'key': 'displayVersion', 'type': 'str'},
'runtime_version': {'key': 'runtimeVersion', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'minor_versions': {'key': 'minorVersions', 'type': '[StackMinorVersion]'},
'application_insights': {'key': 'applicationInsights', 'type': 'bool'},
'is_preview': {'key': 'isPreview', 'type': 'bool'},
'is_deprecated': {'key': 'isDeprecated', 'type': 'bool'},
'is_hidden': {'key': 'isHidden', 'type': 'bool'},
}
def __init__(
self,
*,
display_version: Optional[str] = None,
runtime_version: Optional[str] = None,
is_default: Optional[bool] = None,
minor_versions: Optional[List["StackMinorVersion"]] = None,
application_insights: Optional[bool] = None,
is_preview: Optional[bool] = None,
is_deprecated: Optional[bool] = None,
is_hidden: Optional[bool] = None,
**kwargs
):
super(StackMajorVersion, self).__init__(**kwargs)
self.display_version = display_version
self.runtime_version = runtime_version
self.is_default = is_default
self.minor_versions = minor_versions
self.application_insights = application_insights
self.is_preview = is_preview
self.is_deprecated = is_deprecated
self.is_hidden = is_hidden
class StackMinorVersion(msrest.serialization.Model):
"""Application stack minor version.
:param display_version: Application stack minor version (display only).
:type display_version: str
:param runtime_version: Application stack minor version (runtime only).
:type runtime_version: str
:param is_default: :code:`<code>true</code>` if this is the default minor version; otherwise,
:code:`<code>false</code>`.
:type is_default: bool
:param is_remote_debugging_enabled: :code:`<code>true</code>` if this supports Remote
Debugging, otherwise :code:`<code>false</code>`.
:type is_remote_debugging_enabled: bool
"""
_attribute_map = {
'display_version': {'key': 'displayVersion', 'type': 'str'},
'runtime_version': {'key': 'runtimeVersion', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'is_remote_debugging_enabled': {'key': 'isRemoteDebuggingEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
display_version: Optional[str] = None,
runtime_version: Optional[str] = None,
is_default: Optional[bool] = None,
is_remote_debugging_enabled: Optional[bool] = None,
**kwargs
):
super(StackMinorVersion, self).__init__(**kwargs)
self.display_version = display_version
self.runtime_version = runtime_version
self.is_default = is_default
self.is_remote_debugging_enabled = is_remote_debugging_enabled
class StampCapacity(msrest.serialization.Model):
"""Stamp capacity information.
:param name: Name of the stamp.
:type name: str
:param available_capacity: Available capacity (# of machines, bytes of storage etc...).
:type available_capacity: long
:param total_capacity: Total capacity (# of machines, bytes of storage etc...).
:type total_capacity: long
:param unit: Name of the unit.
:type unit: str
:param compute_mode: Shared/dedicated workers. Possible values include: "Shared", "Dedicated",
"Dynamic".
:type compute_mode: str or ~azure.mgmt.web.v2020_06_01.models.ComputeModeOptions
:param worker_size: Size of the machines. Possible values include: "Small", "Medium", "Large",
"D1", "D2", "D3", "NestedSmall", "Default".
:type worker_size: str or ~azure.mgmt.web.v2020_06_01.models.WorkerSizeOptions
:param worker_size_id: Size ID of machines:
0 - Small
1 - Medium
2 - Large.
:type worker_size_id: int
:param exclude_from_capacity_allocation: If :code:`<code>true</code>`, it includes basic apps.
Basic apps are not used for capacity allocation.
:type exclude_from_capacity_allocation: bool
:param is_applicable_for_all_compute_modes: :code:`<code>true</code>` if capacity is applicable
for all apps; otherwise, :code:`<code>false</code>`.
:type is_applicable_for_all_compute_modes: bool
:param site_mode: Shared or Dedicated.
:type site_mode: str
:param is_linux: Is this a linux stamp capacity.
:type is_linux: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'available_capacity': {'key': 'availableCapacity', 'type': 'long'},
'total_capacity': {'key': 'totalCapacity', 'type': 'long'},
'unit': {'key': 'unit', 'type': 'str'},
'compute_mode': {'key': 'computeMode', 'type': 'str'},
'worker_size': {'key': 'workerSize', 'type': 'str'},
'worker_size_id': {'key': 'workerSizeId', 'type': 'int'},
'exclude_from_capacity_allocation': {'key': 'excludeFromCapacityAllocation', 'type': 'bool'},
'is_applicable_for_all_compute_modes': {'key': 'isApplicableForAllComputeModes', 'type': 'bool'},
'site_mode': {'key': 'siteMode', 'type': 'str'},
'is_linux': {'key': 'isLinux', 'type': 'bool'},
}
def __init__(
self,
*,
name: Optional[str] = None,
available_capacity: Optional[int] = None,
total_capacity: Optional[int] = None,
unit: Optional[str] = None,
compute_mode: Optional[Union[str, "ComputeModeOptions"]] = None,
worker_size: Optional[Union[str, "WorkerSizeOptions"]] = None,
worker_size_id: Optional[int] = None,
exclude_from_capacity_allocation: Optional[bool] = None,
is_applicable_for_all_compute_modes: Optional[bool] = None,
site_mode: Optional[str] = None,
is_linux: Optional[bool] = None,
**kwargs
):
super(StampCapacity, self).__init__(**kwargs)
self.name = name
self.available_capacity = available_capacity
self.total_capacity = total_capacity
self.unit = unit
self.compute_mode = compute_mode
self.worker_size = worker_size
self.worker_size_id = worker_size_id
self.exclude_from_capacity_allocation = exclude_from_capacity_allocation
self.is_applicable_for_all_compute_modes = is_applicable_for_all_compute_modes
self.site_mode = site_mode
self.is_linux = is_linux
class StampCapacityCollection(msrest.serialization.Model):
"""Collection of stamp capacities.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.StampCapacity]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StampCapacity]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["StampCapacity"],
**kwargs
):
super(StampCapacityCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class StaticSiteARMResource(Resource):
"""Static Site ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: Description of a SKU for a scalable resource.
:type sku: ~azure.mgmt.web.v2020_06_01.models.SkuDescription
:ivar default_hostname: The default autogenerated hostname for the static site.
:vartype default_hostname: str
:param repository_url: URL for the repository of the static site.
:type repository_url: str
:param branch: The target branch in the repository.
:type branch: str
:ivar custom_domains: The custom domains associated with this static site.
:vartype custom_domains: list[str]
:param repository_token: A user's github repository token. This is used to setup the Github
Actions workflow file and API secrets.
:type repository_token: str
:param build_properties: Build properties to configure on the repository.
:type build_properties: ~azure.mgmt.web.v2020_06_01.models.StaticSiteBuildProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'default_hostname': {'readonly': True},
'custom_domains': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'SkuDescription'},
'default_hostname': {'key': 'properties.defaultHostname', 'type': 'str'},
'repository_url': {'key': 'properties.repositoryUrl', 'type': 'str'},
'branch': {'key': 'properties.branch', 'type': 'str'},
'custom_domains': {'key': 'properties.customDomains', 'type': '[str]'},
'repository_token': {'key': 'properties.repositoryToken', 'type': 'str'},
'build_properties': {'key': 'properties.buildProperties', 'type': 'StaticSiteBuildProperties'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["SkuDescription"] = None,
repository_url: Optional[str] = None,
branch: Optional[str] = None,
repository_token: Optional[str] = None,
build_properties: Optional["StaticSiteBuildProperties"] = None,
**kwargs
):
super(StaticSiteARMResource, self).__init__(kind=kind, location=location, tags=tags, **kwargs)
self.sku = sku
self.default_hostname = None
self.repository_url = repository_url
self.branch = branch
self.custom_domains = None
self.repository_token = repository_token
self.build_properties = build_properties
class StaticSiteBuildARMResource(ProxyOnlyResource):
"""Static Site Build ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar build_id: An identifier for the static site build.
:vartype build_id: str
:ivar source_branch: The source branch.
:vartype source_branch: str
:ivar pull_request_title: The title of a pull request that a static site build is related to.
:vartype pull_request_title: str
:ivar hostname: The hostname for a static site build.
:vartype hostname: str
:ivar created_time_utc: When this build was created.
:vartype created_time_utc: ~datetime.datetime
:ivar last_updated_on: When this build was updated.
:vartype last_updated_on: ~datetime.datetime
:ivar status: The status of the static site build. Possible values include:
"WaitingForDeployment", "Uploading", "Deploying", "Ready", "Failed", "Deleting", "Detached".
:vartype status: str or ~azure.mgmt.web.v2020_06_01.models.BuildStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'build_id': {'readonly': True},
'source_branch': {'readonly': True},
'pull_request_title': {'readonly': True},
'hostname': {'readonly': True},
'created_time_utc': {'readonly': True},
'last_updated_on': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'build_id': {'key': 'properties.buildId', 'type': 'str'},
'source_branch': {'key': 'properties.sourceBranch', 'type': 'str'},
'pull_request_title': {'key': 'properties.pullRequestTitle', 'type': 'str'},
'hostname': {'key': 'properties.hostname', 'type': 'str'},
'created_time_utc': {'key': 'properties.createdTimeUtc', 'type': 'iso-8601'},
'last_updated_on': {'key': 'properties.lastUpdatedOn', 'type': 'iso-8601'},
'status': {'key': 'properties.status', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(StaticSiteBuildARMResource, self).__init__(kind=kind, **kwargs)
self.build_id = None
self.source_branch = None
self.pull_request_title = None
self.hostname = None
self.created_time_utc = None
self.last_updated_on = None
self.status = None
class StaticSiteBuildCollection(msrest.serialization.Model):
"""Collection of static site builds.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.StaticSiteBuildARMResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StaticSiteBuildARMResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["StaticSiteBuildARMResource"],
**kwargs
):
super(StaticSiteBuildCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class StaticSiteBuildProperties(msrest.serialization.Model):
"""Build properties for the static site.
:param app_location: The path to the app code within the repository.
:type app_location: str
:param api_location: The path to the api code within the repository.
:type api_location: str
:param app_artifact_location: The path of the app artifacts after building.
:type app_artifact_location: str
"""
_attribute_map = {
'app_location': {'key': 'appLocation', 'type': 'str'},
'api_location': {'key': 'apiLocation', 'type': 'str'},
'app_artifact_location': {'key': 'appArtifactLocation', 'type': 'str'},
}
def __init__(
self,
*,
app_location: Optional[str] = None,
api_location: Optional[str] = None,
app_artifact_location: Optional[str] = None,
**kwargs
):
super(StaticSiteBuildProperties, self).__init__(**kwargs)
self.app_location = app_location
self.api_location = api_location
self.app_artifact_location = app_artifact_location
class StaticSiteCollection(msrest.serialization.Model):
"""Collection of static sites.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.StaticSiteARMResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StaticSiteARMResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["StaticSiteARMResource"],
**kwargs
):
super(StaticSiteCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class StaticSiteCustomDomainOverviewARMResource(ProxyOnlyResource):
"""Static Site Custom Domain Overview ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar domain_name: The domain name for the static site custom domain.
:vartype domain_name: str
:ivar created_on: The date and time on which the custom domain was created for the static site.
:vartype created_on: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'domain_name': {'readonly': True},
'created_on': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'domain_name': {'key': 'properties.domainName', 'type': 'str'},
'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(StaticSiteCustomDomainOverviewARMResource, self).__init__(kind=kind, **kwargs)
self.domain_name = None
self.created_on = None
class StaticSiteCustomDomainOverviewCollection(msrest.serialization.Model):
"""Collection of static site custom domains.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.StaticSiteCustomDomainOverviewARMResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StaticSiteCustomDomainOverviewARMResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["StaticSiteCustomDomainOverviewARMResource"],
**kwargs
):
super(StaticSiteCustomDomainOverviewCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class StaticSiteFunctionOverviewARMResource(ProxyOnlyResource):
"""Static Site Function Overview ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar function_name: The name for the function.
:vartype function_name: str
:ivar trigger_type: The trigger type of the function. Possible values include: "HttpTrigger",
"Unknown".
:vartype trigger_type: str or ~azure.mgmt.web.v2020_06_01.models.TriggerTypes
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'function_name': {'readonly': True},
'trigger_type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'function_name': {'key': 'properties.functionName', 'type': 'str'},
'trigger_type': {'key': 'properties.triggerType', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(StaticSiteFunctionOverviewARMResource, self).__init__(kind=kind, **kwargs)
self.function_name = None
self.trigger_type = None
class StaticSiteFunctionOverviewCollection(msrest.serialization.Model):
"""Collection of static site functions.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.StaticSiteFunctionOverviewARMResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StaticSiteFunctionOverviewARMResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["StaticSiteFunctionOverviewARMResource"],
**kwargs
):
super(StaticSiteFunctionOverviewCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class StaticSitePatchResource(ProxyOnlyResource):
"""ARM resource for a static site when patching.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar default_hostname: The default autogenerated hostname for the static site.
:vartype default_hostname: str
:param repository_url: URL for the repository of the static site.
:type repository_url: str
:param branch: The target branch in the repository.
:type branch: str
:ivar custom_domains: The custom domains associated with this static site.
:vartype custom_domains: list[str]
:param repository_token: A user's github repository token. This is used to setup the Github
Actions workflow file and API secrets.
:type repository_token: str
:param build_properties: Build properties to configure on the repository.
:type build_properties: ~azure.mgmt.web.v2020_06_01.models.StaticSiteBuildProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'default_hostname': {'readonly': True},
'custom_domains': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'default_hostname': {'key': 'properties.defaultHostname', 'type': 'str'},
'repository_url': {'key': 'properties.repositoryUrl', 'type': 'str'},
'branch': {'key': 'properties.branch', 'type': 'str'},
'custom_domains': {'key': 'properties.customDomains', 'type': '[str]'},
'repository_token': {'key': 'properties.repositoryToken', 'type': 'str'},
'build_properties': {'key': 'properties.buildProperties', 'type': 'StaticSiteBuildProperties'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
repository_url: Optional[str] = None,
branch: Optional[str] = None,
repository_token: Optional[str] = None,
build_properties: Optional["StaticSiteBuildProperties"] = None,
**kwargs
):
super(StaticSitePatchResource, self).__init__(kind=kind, **kwargs)
self.default_hostname = None
self.repository_url = repository_url
self.branch = branch
self.custom_domains = None
self.repository_token = repository_token
self.build_properties = build_properties
class StaticSiteResetPropertiesARMResource(ProxyOnlyResource):
"""Static Site Reset Properties ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param repository_token: The token which proves admin privileges to the repository.
:type repository_token: str
:param should_update_repository: Determines whether the repository should be updated with the
new properties.
:type should_update_repository: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'repository_token': {'key': 'properties.repositoryToken', 'type': 'str'},
'should_update_repository': {'key': 'properties.shouldUpdateRepository', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
repository_token: Optional[str] = None,
should_update_repository: Optional[bool] = None,
**kwargs
):
super(StaticSiteResetPropertiesARMResource, self).__init__(kind=kind, **kwargs)
self.repository_token = repository_token
self.should_update_repository = should_update_repository
class StaticSitesWorkflowPreview(ProxyOnlyResource):
"""Preview for the Static Site Workflow to be generated.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar path: The path for the workflow file to be generated.
:vartype path: str
:ivar contents: The contents for the workflow file to be generated.
:vartype contents: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'path': {'readonly': True},
'contents': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'path': {'key': 'properties.path', 'type': 'str'},
'contents': {'key': 'properties.contents', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(StaticSitesWorkflowPreview, self).__init__(kind=kind, **kwargs)
self.path = None
self.contents = None
class StaticSitesWorkflowPreviewRequest(ProxyOnlyResource):
"""Request entity for previewing the Static Site workflow.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param repository_url: URL for the repository of the static site.
:type repository_url: str
:param branch: The target branch in the repository.
:type branch: str
:param build_properties: Build properties to configure on the repository.
:type build_properties: ~azure.mgmt.web.v2020_06_01.models.StaticSiteBuildProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'repository_url': {'key': 'properties.repositoryUrl', 'type': 'str'},
'branch': {'key': 'properties.branch', 'type': 'str'},
'build_properties': {'key': 'properties.buildProperties', 'type': 'StaticSiteBuildProperties'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
repository_url: Optional[str] = None,
branch: Optional[str] = None,
build_properties: Optional["StaticSiteBuildProperties"] = None,
**kwargs
):
super(StaticSitesWorkflowPreviewRequest, self).__init__(kind=kind, **kwargs)
self.repository_url = repository_url
self.branch = branch
self.build_properties = build_properties
class StaticSiteUserARMResource(ProxyOnlyResource):
"""Static Site User ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar provider: The identity provider for the static site user.
:vartype provider: str
:ivar user_id: The user id for the static site user.
:vartype user_id: str
:ivar display_name: The display name for the static site user.
:vartype display_name: str
:param roles: The roles for the static site user, in free-form string format.
:type roles: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provider': {'readonly': True},
'user_id': {'readonly': True},
'display_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provider': {'key': 'properties.provider', 'type': 'str'},
'user_id': {'key': 'properties.userId', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'roles': {'key': 'properties.roles', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
roles: Optional[str] = None,
**kwargs
):
super(StaticSiteUserARMResource, self).__init__(kind=kind, **kwargs)
self.provider = None
self.user_id = None
self.display_name = None
self.roles = roles
class StaticSiteUserCollection(msrest.serialization.Model):
"""Collection of static site custom users.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.StaticSiteUserARMResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StaticSiteUserARMResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["StaticSiteUserARMResource"],
**kwargs
):
super(StaticSiteUserCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class StaticSiteUserInvitationRequestResource(ProxyOnlyResource):
"""Static sites user roles invitation resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param domain: The domain name for the static site custom domain.
:type domain: str
:param provider: The identity provider for the static site user.
:type provider: str
:param user_details: The user id for the static site user.
:type user_details: str
:param roles: The roles for the static site user, in free-form string format.
:type roles: str
:param num_hours_to_expiration: The number of hours the sas token stays valid.
:type num_hours_to_expiration: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'domain': {'key': 'properties.domain', 'type': 'str'},
'provider': {'key': 'properties.provider', 'type': 'str'},
'user_details': {'key': 'properties.userDetails', 'type': 'str'},
'roles': {'key': 'properties.roles', 'type': 'str'},
'num_hours_to_expiration': {'key': 'properties.numHoursToExpiration', 'type': 'int'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
domain: Optional[str] = None,
provider: Optional[str] = None,
user_details: Optional[str] = None,
roles: Optional[str] = None,
num_hours_to_expiration: Optional[int] = None,
**kwargs
):
super(StaticSiteUserInvitationRequestResource, self).__init__(kind=kind, **kwargs)
self.domain = domain
self.provider = provider
self.user_details = user_details
self.roles = roles
self.num_hours_to_expiration = num_hours_to_expiration
class StaticSiteUserInvitationResponseResource(ProxyOnlyResource):
"""Static sites user roles invitation link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar expires_on: The expiration time of the invitation.
:vartype expires_on: ~datetime.datetime
:ivar invitation_url: The url for the invitation link.
:vartype invitation_url: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'expires_on': {'readonly': True},
'invitation_url': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'expires_on': {'key': 'properties.expiresOn', 'type': 'iso-8601'},
'invitation_url': {'key': 'properties.invitationUrl', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(StaticSiteUserInvitationResponseResource, self).__init__(kind=kind, **kwargs)
self.expires_on = None
self.invitation_url = None
class StatusCodesBasedTrigger(msrest.serialization.Model):
"""Trigger based on status code.
:param status: HTTP status code.
:type status: int
:param sub_status: Request Sub Status.
:type sub_status: int
:param win32_status: Win32 error code.
:type win32_status: int
:param count: Request Count.
:type count: int
:param time_interval: Time interval.
:type time_interval: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'sub_status': {'key': 'subStatus', 'type': 'int'},
'win32_status': {'key': 'win32Status', 'type': 'int'},
'count': {'key': 'count', 'type': 'int'},
'time_interval': {'key': 'timeInterval', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[int] = None,
sub_status: Optional[int] = None,
win32_status: Optional[int] = None,
count: Optional[int] = None,
time_interval: Optional[str] = None,
**kwargs
):
super(StatusCodesBasedTrigger, self).__init__(**kwargs)
self.status = status
self.sub_status = sub_status
self.win32_status = win32_status
self.count = count
self.time_interval = time_interval
class StorageMigrationOptions(ProxyOnlyResource):
"""Options for app content migration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param azurefiles_connection_string: AzureFiles connection string.
:type azurefiles_connection_string: str
:param azurefiles_share: AzureFiles share.
:type azurefiles_share: str
:param switch_site_after_migration: :code:`<code>true</code>`if the app should be switched
over; otherwise, :code:`<code>false</code>`.
:type switch_site_after_migration: bool
:param block_write_access_to_site: :code:`<code>true</code>` if the app should be read only
during copy operation; otherwise, :code:`<code>false</code>`.
:type block_write_access_to_site: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'azurefiles_connection_string': {'key': 'properties.azurefilesConnectionString', 'type': 'str'},
'azurefiles_share': {'key': 'properties.azurefilesShare', 'type': 'str'},
'switch_site_after_migration': {'key': 'properties.switchSiteAfterMigration', 'type': 'bool'},
'block_write_access_to_site': {'key': 'properties.blockWriteAccessToSite', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
azurefiles_connection_string: Optional[str] = None,
azurefiles_share: Optional[str] = None,
switch_site_after_migration: Optional[bool] = False,
block_write_access_to_site: Optional[bool] = False,
**kwargs
):
super(StorageMigrationOptions, self).__init__(kind=kind, **kwargs)
self.azurefiles_connection_string = azurefiles_connection_string
self.azurefiles_share = azurefiles_share
self.switch_site_after_migration = switch_site_after_migration
self.block_write_access_to_site = block_write_access_to_site
class StorageMigrationResponse(ProxyOnlyResource):
"""Response for a migration of app content request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar operation_id: When server starts the migration process, it will return an operation ID
identifying that particular migration operation.
:vartype operation_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'operation_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'operation_id': {'key': 'properties.operationId', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(StorageMigrationResponse, self).__init__(kind=kind, **kwargs)
self.operation_id = None
class StringDictionary(ProxyOnlyResource):
"""String dictionary resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param properties: Settings.
:type properties: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
**kwargs
):
super(StringDictionary, self).__init__(kind=kind, **kwargs)
self.properties = properties
class SwiftVirtualNetwork(ProxyOnlyResource):
"""Swift Virtual Network Contract. This is used to enable the new Swift way of doing virtual network integration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param subnet_resource_id: The Virtual Network subnet's resource ID. This is the subnet that
this Web App will join. This subnet must have a delegation to Microsoft.Web/serverFarms defined
first.
:type subnet_resource_id: str
:param swift_supported: A flag that specifies if the scale unit this Web App is on supports
Swift integration.
:type swift_supported: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'subnet_resource_id': {'key': 'properties.subnetResourceId', 'type': 'str'},
'swift_supported': {'key': 'properties.swiftSupported', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
subnet_resource_id: Optional[str] = None,
swift_supported: Optional[bool] = None,
**kwargs
):
super(SwiftVirtualNetwork, self).__init__(kind=kind, **kwargs)
self.subnet_resource_id = subnet_resource_id
self.swift_supported = swift_supported
class TldLegalAgreement(msrest.serialization.Model):
"""Legal agreement for a top level domain.
All required parameters must be populated in order to send to Azure.
:param agreement_key: Required. Unique identifier for the agreement.
:type agreement_key: str
:param title: Required. Agreement title.
:type title: str
:param content: Required. Agreement details.
:type content: str
:param url: URL where a copy of the agreement details is hosted.
:type url: str
"""
_validation = {
'agreement_key': {'required': True},
'title': {'required': True},
'content': {'required': True},
}
_attribute_map = {
'agreement_key': {'key': 'agreementKey', 'type': 'str'},
'title': {'key': 'title', 'type': 'str'},
'content': {'key': 'content', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
}
def __init__(
self,
*,
agreement_key: str,
title: str,
content: str,
url: Optional[str] = None,
**kwargs
):
super(TldLegalAgreement, self).__init__(**kwargs)
self.agreement_key = agreement_key
self.title = title
self.content = content
self.url = url
class TldLegalAgreementCollection(msrest.serialization.Model):
"""Collection of top-level domain legal agreements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.TldLegalAgreement]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TldLegalAgreement]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["TldLegalAgreement"],
**kwargs
):
super(TldLegalAgreementCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class TokenStore(ProxyOnlyResource):
"""TokenStore.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param enabled:
:type enabled: bool
:param token_refresh_extension_hours:
:type token_refresh_extension_hours: float
:param file_system:
:type file_system: ~azure.mgmt.web.v2020_06_01.models.FileSystemTokenStore
:param azure_blob_storage:
:type azure_blob_storage: ~azure.mgmt.web.v2020_06_01.models.BlobStorageTokenStore
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'token_refresh_extension_hours': {'key': 'properties.tokenRefreshExtensionHours', 'type': 'float'},
'file_system': {'key': 'properties.fileSystem', 'type': 'FileSystemTokenStore'},
'azure_blob_storage': {'key': 'properties.azureBlobStorage', 'type': 'BlobStorageTokenStore'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
token_refresh_extension_hours: Optional[float] = None,
file_system: Optional["FileSystemTokenStore"] = None,
azure_blob_storage: Optional["BlobStorageTokenStore"] = None,
**kwargs
):
super(TokenStore, self).__init__(kind=kind, **kwargs)
self.enabled = enabled
self.token_refresh_extension_hours = token_refresh_extension_hours
self.file_system = file_system
self.azure_blob_storage = azure_blob_storage
class TopLevelDomain(ProxyOnlyResource):
"""A top level domain object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param privacy: If :code:`<code>true</code>`, then the top level domain supports domain
privacy; otherwise, :code:`<code>false</code>`.
:type privacy: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'privacy': {'key': 'properties.privacy', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
privacy: Optional[bool] = None,
**kwargs
):
super(TopLevelDomain, self).__init__(kind=kind, **kwargs)
self.privacy = privacy
class TopLevelDomainAgreementOption(msrest.serialization.Model):
"""Options for retrieving the list of top level domain legal agreements.
:param include_privacy: If :code:`<code>true</code>`, then the list of agreements will include
agreements for domain privacy as well; otherwise, :code:`<code>false</code>`.
:type include_privacy: bool
:param for_transfer: If :code:`<code>true</code>`, then the list of agreements will include
agreements for domain transfer as well; otherwise, :code:`<code>false</code>`.
:type for_transfer: bool
"""
_attribute_map = {
'include_privacy': {'key': 'includePrivacy', 'type': 'bool'},
'for_transfer': {'key': 'forTransfer', 'type': 'bool'},
}
def __init__(
self,
*,
include_privacy: Optional[bool] = None,
for_transfer: Optional[bool] = None,
**kwargs
):
super(TopLevelDomainAgreementOption, self).__init__(**kwargs)
self.include_privacy = include_privacy
self.for_transfer = for_transfer
class TopLevelDomainCollection(msrest.serialization.Model):
"""Collection of Top-level domains.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.TopLevelDomain]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TopLevelDomain]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["TopLevelDomain"],
**kwargs
):
super(TopLevelDomainCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class TriggeredJobHistory(ProxyOnlyResource):
"""Triggered Web Job History. List of Triggered Web Job Run Information elements.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param runs: List of triggered web job runs.
:type runs: list[~azure.mgmt.web.v2020_06_01.models.TriggeredJobRun]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'runs': {'key': 'properties.runs', 'type': '[TriggeredJobRun]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
runs: Optional[List["TriggeredJobRun"]] = None,
**kwargs
):
super(TriggeredJobHistory, self).__init__(kind=kind, **kwargs)
self.runs = runs
class TriggeredJobHistoryCollection(msrest.serialization.Model):
"""Collection of Kudu continuous web job information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.TriggeredJobHistory]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TriggeredJobHistory]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["TriggeredJobHistory"],
**kwargs
):
super(TriggeredJobHistoryCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class TriggeredJobRun(ProxyOnlyResource):
"""Triggered Web Job Run Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param web_job_id: Job ID.
:type web_job_id: str
:param web_job_name: Job name.
:type web_job_name: str
:param status: Job status. Possible values include: "Success", "Failed", "Error".
:type status: str or ~azure.mgmt.web.v2020_06_01.models.TriggeredWebJobStatus
:param start_time: Start time.
:type start_time: ~datetime.datetime
:param end_time: End time.
:type end_time: ~datetime.datetime
:param duration: Job duration.
:type duration: str
:param output_url: Output URL.
:type output_url: str
:param error_url: Error URL.
:type error_url: str
:param url: Job URL.
:type url: str
:param job_name: Job name.
:type job_name: str
:param trigger: Job trigger.
:type trigger: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'web_job_id': {'key': 'properties.web_job_id', 'type': 'str'},
'web_job_name': {'key': 'properties.web_job_name', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'start_time': {'key': 'properties.start_time', 'type': 'iso-8601'},
'end_time': {'key': 'properties.end_time', 'type': 'iso-8601'},
'duration': {'key': 'properties.duration', 'type': 'str'},
'output_url': {'key': 'properties.output_url', 'type': 'str'},
'error_url': {'key': 'properties.error_url', 'type': 'str'},
'url': {'key': 'properties.url', 'type': 'str'},
'job_name': {'key': 'properties.job_name', 'type': 'str'},
'trigger': {'key': 'properties.trigger', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
web_job_id: Optional[str] = None,
web_job_name: Optional[str] = None,
status: Optional[Union[str, "TriggeredWebJobStatus"]] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
duration: Optional[str] = None,
output_url: Optional[str] = None,
error_url: Optional[str] = None,
url: Optional[str] = None,
job_name: Optional[str] = None,
trigger: Optional[str] = None,
**kwargs
):
super(TriggeredJobRun, self).__init__(kind=kind, **kwargs)
self.web_job_id = web_job_id
self.web_job_name = web_job_name
self.status = status
self.start_time = start_time
self.end_time = end_time
self.duration = duration
self.output_url = output_url
self.error_url = error_url
self.url = url
self.job_name = job_name
self.trigger = trigger
class TriggeredWebJob(ProxyOnlyResource):
"""Triggered Web Job Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param latest_run: Latest job run information.
:type latest_run: ~azure.mgmt.web.v2020_06_01.models.TriggeredJobRun
:param history_url: History URL.
:type history_url: str
:param scheduler_logs_url: Scheduler Logs URL.
:type scheduler_logs_url: str
:param run_command: Run command.
:type run_command: str
:param url: Job URL.
:type url: str
:param extra_info_url: Extra Info URL.
:type extra_info_url: str
:param web_job_type: Job type. Possible values include: "Continuous", "Triggered".
:type web_job_type: str or ~azure.mgmt.web.v2020_06_01.models.WebJobType
:param error: Error information.
:type error: str
:param using_sdk: Using SDK?.
:type using_sdk: bool
:param settings: Job settings.
:type settings: dict[str, any]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'latest_run': {'key': 'properties.latest_run', 'type': 'TriggeredJobRun'},
'history_url': {'key': 'properties.history_url', 'type': 'str'},
'scheduler_logs_url': {'key': 'properties.scheduler_logs_url', 'type': 'str'},
'run_command': {'key': 'properties.run_command', 'type': 'str'},
'url': {'key': 'properties.url', 'type': 'str'},
'extra_info_url': {'key': 'properties.extra_info_url', 'type': 'str'},
'web_job_type': {'key': 'properties.web_job_type', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'str'},
'using_sdk': {'key': 'properties.using_sdk', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': '{object}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
latest_run: Optional["TriggeredJobRun"] = None,
history_url: Optional[str] = None,
scheduler_logs_url: Optional[str] = None,
run_command: Optional[str] = None,
url: Optional[str] = None,
extra_info_url: Optional[str] = None,
web_job_type: Optional[Union[str, "WebJobType"]] = None,
error: Optional[str] = None,
using_sdk: Optional[bool] = None,
settings: Optional[Dict[str, Any]] = None,
**kwargs
):
super(TriggeredWebJob, self).__init__(kind=kind, **kwargs)
self.latest_run = latest_run
self.history_url = history_url
self.scheduler_logs_url = scheduler_logs_url
self.run_command = run_command
self.url = url
self.extra_info_url = extra_info_url
self.web_job_type = web_job_type
self.error = error
self.using_sdk = using_sdk
self.settings = settings
class TriggeredWebJobCollection(msrest.serialization.Model):
"""Collection of Kudu continuous web job information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.TriggeredWebJob]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[TriggeredWebJob]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["TriggeredWebJob"],
**kwargs
):
super(TriggeredWebJobCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class Twitter(ProxyOnlyResource):
"""Twitter.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param enabled:
:type enabled: bool
:param registration:
:type registration: ~azure.mgmt.web.v2020_06_01.models.TwitterRegistration
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'registration': {'key': 'properties.registration', 'type': 'TwitterRegistration'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
enabled: Optional[bool] = None,
registration: Optional["TwitterRegistration"] = None,
**kwargs
):
super(Twitter, self).__init__(kind=kind, **kwargs)
self.enabled = enabled
self.registration = registration
class TwitterRegistration(ProxyOnlyResource):
"""TwitterRegistration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param consumer_key:
:type consumer_key: str
:param consumer_secret_setting_name:
:type consumer_secret_setting_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'consumer_key': {'key': 'properties.consumerKey', 'type': 'str'},
'consumer_secret_setting_name': {'key': 'properties.consumerSecretSettingName', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
consumer_key: Optional[str] = None,
consumer_secret_setting_name: Optional[str] = None,
**kwargs
):
super(TwitterRegistration, self).__init__(kind=kind, **kwargs)
self.consumer_key = consumer_key
self.consumer_secret_setting_name = consumer_secret_setting_name
class Usage(ProxyOnlyResource):
"""Usage of the quota resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar display_name: Friendly name shown in the UI.
:vartype display_name: str
:ivar resource_name: Name of the quota resource.
:vartype resource_name: str
:ivar unit: Units of measurement for the quota resource.
:vartype unit: str
:ivar current_value: The current value of the resource counter.
:vartype current_value: long
:ivar limit: The resource limit.
:vartype limit: long
:ivar next_reset_time: Next reset time for the resource counter.
:vartype next_reset_time: ~datetime.datetime
:ivar compute_mode: Compute mode used for this usage. Possible values include: "Shared",
"Dedicated", "Dynamic".
:vartype compute_mode: str or ~azure.mgmt.web.v2020_06_01.models.ComputeModeOptions
:ivar site_mode: Site mode used for this usage.
:vartype site_mode: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'display_name': {'readonly': True},
'resource_name': {'readonly': True},
'unit': {'readonly': True},
'current_value': {'readonly': True},
'limit': {'readonly': True},
'next_reset_time': {'readonly': True},
'compute_mode': {'readonly': True},
'site_mode': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'resource_name': {'key': 'properties.resourceName', 'type': 'str'},
'unit': {'key': 'properties.unit', 'type': 'str'},
'current_value': {'key': 'properties.currentValue', 'type': 'long'},
'limit': {'key': 'properties.limit', 'type': 'long'},
'next_reset_time': {'key': 'properties.nextResetTime', 'type': 'iso-8601'},
'compute_mode': {'key': 'properties.computeMode', 'type': 'str'},
'site_mode': {'key': 'properties.siteMode', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(Usage, self).__init__(kind=kind, **kwargs)
self.display_name = None
self.resource_name = None
self.unit = None
self.current_value = None
self.limit = None
self.next_reset_time = None
self.compute_mode = None
self.site_mode = None
class UsageCollection(msrest.serialization.Model):
"""Collection of usages.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.Usage]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Usage"],
**kwargs
):
super(UsageCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class User(ProxyOnlyResource):
"""User credentials used for publishing activity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param publishing_user_name: Username used for publishing.
:type publishing_user_name: str
:param publishing_password: Password used for publishing.
:type publishing_password: str
:param publishing_password_hash: Password hash used for publishing.
:type publishing_password_hash: str
:param publishing_password_hash_salt: Password hash salt used for publishing.
:type publishing_password_hash_salt: str
:param scm_uri: Url of SCM site.
:type scm_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'publishing_user_name': {'key': 'properties.publishingUserName', 'type': 'str'},
'publishing_password': {'key': 'properties.publishingPassword', 'type': 'str'},
'publishing_password_hash': {'key': 'properties.publishingPasswordHash', 'type': 'str'},
'publishing_password_hash_salt': {'key': 'properties.publishingPasswordHashSalt', 'type': 'str'},
'scm_uri': {'key': 'properties.scmUri', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
publishing_user_name: Optional[str] = None,
publishing_password: Optional[str] = None,
publishing_password_hash: Optional[str] = None,
publishing_password_hash_salt: Optional[str] = None,
scm_uri: Optional[str] = None,
**kwargs
):
super(User, self).__init__(kind=kind, **kwargs)
self.publishing_user_name = publishing_user_name
self.publishing_password = <PASSWORD>
self.publishing_password_hash = <PASSWORD>_password_hash
self.publishing_password_hash_salt = publishing_password_hash_salt
self.scm_uri = scm_uri
class ValidateRequest(msrest.serialization.Model):
"""Resource validation request content.
All required parameters must be populated in order to send to Azure.
:param name: Required. Resource name to verify.
:type name: str
:param type: Required. Resource type used for verification. Possible values include:
"ServerFarm", "Site".
:type type: str or ~azure.mgmt.web.v2020_06_01.models.ValidateResourceTypes
:param location: Required. Expected location of the resource.
:type location: str
:param server_farm_id: ARM resource ID of an App Service plan that would host the app.
:type server_farm_id: str
:param sku_name: Name of the target SKU for the App Service plan.
:type sku_name: str
:param need_linux_workers: :code:`<code>true</code>` if App Service plan is for Linux workers;
otherwise, :code:`<code>false</code>`.
:type need_linux_workers: bool
:param is_spot: :code:`<code>true</code>` if App Service plan is for Spot instances; otherwise,
:code:`<code>false</code>`.
:type is_spot: bool
:param capacity: Target capacity of the App Service plan (number of VMs).
:type capacity: int
:param hosting_environment: Name of App Service Environment where app or App Service plan
should be created.
:type hosting_environment: str
:param is_xenon: :code:`<code>true</code>` if App Service plan is running as a windows
container.
:type is_xenon: bool
:param container_registry_base_url: Base URL of the container registry.
:type container_registry_base_url: str
:param container_registry_username: Username for to access the container registry.
:type container_registry_username: str
:param container_registry_password: Password for to access the container registry.
:type container_registry_password: str
:param container_image_repository: Repository name (image name).
:type container_image_repository: str
:param container_image_tag: Image tag.
:type container_image_tag: str
:param container_image_platform: Platform (windows or linux).
:type container_image_platform: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
'location': {'required': True},
'capacity': {'minimum': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'server_farm_id': {'key': 'properties.serverFarmId', 'type': 'str'},
'sku_name': {'key': 'properties.skuName', 'type': 'str'},
'need_linux_workers': {'key': 'properties.needLinuxWorkers', 'type': 'bool'},
'is_spot': {'key': 'properties.isSpot', 'type': 'bool'},
'capacity': {'key': 'properties.capacity', 'type': 'int'},
'hosting_environment': {'key': 'properties.hostingEnvironment', 'type': 'str'},
'is_xenon': {'key': 'properties.isXenon', 'type': 'bool'},
'container_registry_base_url': {'key': 'properties.containerRegistryBaseUrl', 'type': 'str'},
'container_registry_username': {'key': 'properties.containerRegistryUsername', 'type': 'str'},
'container_registry_password': {'key': 'properties.containerRegistryPassword', 'type': 'str'},
'container_image_repository': {'key': 'properties.containerImageRepository', 'type': 'str'},
'container_image_tag': {'key': 'properties.containerImageTag', 'type': 'str'},
'container_image_platform': {'key': 'properties.containerImagePlatform', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
type: Union[str, "ValidateResourceTypes"],
location: str,
server_farm_id: Optional[str] = None,
sku_name: Optional[str] = None,
need_linux_workers: Optional[bool] = None,
is_spot: Optional[bool] = None,
capacity: Optional[int] = None,
hosting_environment: Optional[str] = None,
is_xenon: Optional[bool] = None,
container_registry_base_url: Optional[str] = None,
container_registry_username: Optional[str] = None,
container_registry_password: Optional[str] = None,
container_image_repository: Optional[str] = None,
container_image_tag: Optional[str] = None,
container_image_platform: Optional[str] = None,
**kwargs
):
super(ValidateRequest, self).__init__(**kwargs)
self.name = name
self.type = type
self.location = location
self.server_farm_id = server_farm_id
self.sku_name = sku_name
self.need_linux_workers = need_linux_workers
self.is_spot = is_spot
self.capacity = capacity
self.hosting_environment = hosting_environment
self.is_xenon = is_xenon
self.container_registry_base_url = container_registry_base_url
self.container_registry_username = container_registry_username
self.container_registry_password = container_registry_password
self.container_image_repository = container_image_repository
self.container_image_tag = container_image_tag
self.container_image_platform = container_image_platform
class ValidateResponse(msrest.serialization.Model):
"""Describes the result of resource validation.
:param status: Result of validation.
:type status: str
:param error: Error details for the case when validation fails.
:type error: ~azure.mgmt.web.v2020_06_01.models.ValidateResponseError
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ValidateResponseError'},
}
def __init__(
self,
*,
status: Optional[str] = None,
error: Optional["ValidateResponseError"] = None,
**kwargs
):
super(ValidateResponse, self).__init__(**kwargs)
self.status = status
self.error = error
class ValidateResponseError(msrest.serialization.Model):
"""Error details for when validation fails.
:param code: Validation error code.
:type code: str
:param message: Validation error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ValidateResponseError, self).__init__(**kwargs)
self.code = code
self.message = message
class VirtualApplication(msrest.serialization.Model):
"""Virtual application in an app.
:param virtual_path: Virtual path.
:type virtual_path: str
:param physical_path: Physical path.
:type physical_path: str
:param preload_enabled: :code:`<code>true</code>` if preloading is enabled; otherwise,
:code:`<code>false</code>`.
:type preload_enabled: bool
:param virtual_directories: Virtual directories for virtual application.
:type virtual_directories: list[~azure.mgmt.web.v2020_06_01.models.VirtualDirectory]
"""
_attribute_map = {
'virtual_path': {'key': 'virtualPath', 'type': 'str'},
'physical_path': {'key': 'physicalPath', 'type': 'str'},
'preload_enabled': {'key': 'preloadEnabled', 'type': 'bool'},
'virtual_directories': {'key': 'virtualDirectories', 'type': '[VirtualDirectory]'},
}
def __init__(
self,
*,
virtual_path: Optional[str] = None,
physical_path: Optional[str] = None,
preload_enabled: Optional[bool] = None,
virtual_directories: Optional[List["VirtualDirectory"]] = None,
**kwargs
):
super(VirtualApplication, self).__init__(**kwargs)
self.virtual_path = virtual_path
self.physical_path = physical_path
self.preload_enabled = preload_enabled
self.virtual_directories = virtual_directories
class VirtualDirectory(msrest.serialization.Model):
"""Directory for virtual application.
:param virtual_path: Path to virtual application.
:type virtual_path: str
:param physical_path: Physical path.
:type physical_path: str
"""
_attribute_map = {
'virtual_path': {'key': 'virtualPath', 'type': 'str'},
'physical_path': {'key': 'physicalPath', 'type': 'str'},
}
def __init__(
self,
*,
virtual_path: Optional[str] = None,
physical_path: Optional[str] = None,
**kwargs
):
super(VirtualDirectory, self).__init__(**kwargs)
self.virtual_path = virtual_path
self.physical_path = physical_path
class VirtualIPMapping(msrest.serialization.Model):
"""Virtual IP mapping.
:param virtual_ip: Virtual IP address.
:type virtual_ip: str
:param internal_http_port: Internal HTTP port.
:type internal_http_port: int
:param internal_https_port: Internal HTTPS port.
:type internal_https_port: int
:param in_use: Is virtual IP mapping in use.
:type in_use: bool
:param service_name: name of the service that virtual IP is assigned to.
:type service_name: str
"""
_attribute_map = {
'virtual_ip': {'key': 'virtualIP', 'type': 'str'},
'internal_http_port': {'key': 'internalHttpPort', 'type': 'int'},
'internal_https_port': {'key': 'internalHttpsPort', 'type': 'int'},
'in_use': {'key': 'inUse', 'type': 'bool'},
'service_name': {'key': 'serviceName', 'type': 'str'},
}
def __init__(
self,
*,
virtual_ip: Optional[str] = None,
internal_http_port: Optional[int] = None,
internal_https_port: Optional[int] = None,
in_use: Optional[bool] = None,
service_name: Optional[str] = None,
**kwargs
):
super(VirtualIPMapping, self).__init__(**kwargs)
self.virtual_ip = virtual_ip
self.internal_http_port = internal_http_port
self.internal_https_port = internal_https_port
self.in_use = in_use
self.service_name = service_name
class VirtualNetworkProfile(msrest.serialization.Model):
"""Specification for using a Virtual Network.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource id of the Virtual Network.
:type id: str
:ivar name: Name of the Virtual Network (read-only).
:vartype name: str
:ivar type: Resource type of the Virtual Network (read-only).
:vartype type: str
:param subnet: Subnet within the Virtual Network.
:type subnet: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
subnet: Optional[str] = None,
**kwargs
):
super(VirtualNetworkProfile, self).__init__(**kwargs)
self.id = id
self.name = None
self.type = None
self.subnet = subnet
class VnetGateway(ProxyOnlyResource):
"""The Virtual Network gateway contract. This is used to give the Virtual Network gateway access to the VPN package.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param vnet_name: The Virtual Network name.
:type vnet_name: str
:param vpn_package_uri: The URI where the VPN package can be downloaded.
:type vpn_package_uri: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vpn_package_uri': {'key': 'properties.vpnPackageUri', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
vnet_name: Optional[str] = None,
vpn_package_uri: Optional[str] = None,
**kwargs
):
super(VnetGateway, self).__init__(kind=kind, **kwargs)
self.vnet_name = vnet_name
self.vpn_package_uri = vpn_package_uri
class VnetInfo(ProxyOnlyResource):
"""Virtual Network information contract.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param vnet_resource_id: The Virtual Network's resource ID.
:type vnet_resource_id: str
:ivar cert_thumbprint: The client certificate thumbprint.
:vartype cert_thumbprint: str
:param cert_blob: A certificate file (.cer) blob containing the public key of the private key
used to authenticate a
Point-To-Site VPN connection.
:type cert_blob: str
:ivar routes: The routes that this Virtual Network connection uses.
:vartype routes: list[~azure.mgmt.web.v2020_06_01.models.VnetRoute]
:ivar resync_required: :code:`<code>true</code>` if a resync is required; otherwise,
:code:`<code>false</code>`.
:vartype resync_required: bool
:param dns_servers: DNS servers to be used by this Virtual Network. This should be a
comma-separated list of IP addresses.
:type dns_servers: str
:param is_swift: Flag that is used to denote if this is VNET injection.
:type is_swift: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'cert_thumbprint': {'readonly': True},
'routes': {'readonly': True},
'resync_required': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_resource_id': {'key': 'properties.vnetResourceId', 'type': 'str'},
'cert_thumbprint': {'key': 'properties.certThumbprint', 'type': 'str'},
'cert_blob': {'key': 'properties.certBlob', 'type': 'str'},
'routes': {'key': 'properties.routes', 'type': '[VnetRoute]'},
'resync_required': {'key': 'properties.resyncRequired', 'type': 'bool'},
'dns_servers': {'key': 'properties.dnsServers', 'type': 'str'},
'is_swift': {'key': 'properties.isSwift', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
vnet_resource_id: Optional[str] = None,
cert_blob: Optional[str] = None,
dns_servers: Optional[str] = None,
is_swift: Optional[bool] = None,
**kwargs
):
super(VnetInfo, self).__init__(kind=kind, **kwargs)
self.vnet_resource_id = vnet_resource_id
self.cert_thumbprint = None
self.cert_blob = cert_blob
self.routes = None
self.resync_required = None
self.dns_servers = dns_servers
self.is_swift = is_swift
class VnetParameters(ProxyOnlyResource):
"""The required set of inputs to validate a VNET.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param vnet_resource_group: The Resource Group of the VNET to be validated.
:type vnet_resource_group: str
:param vnet_name: The name of the VNET to be validated.
:type vnet_name: str
:param vnet_subnet_name: The subnet name to be validated.
:type vnet_subnet_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vnet_resource_group': {'key': 'properties.vnetResourceGroup', 'type': 'str'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vnet_subnet_name': {'key': 'properties.vnetSubnetName', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
vnet_resource_group: Optional[str] = None,
vnet_name: Optional[str] = None,
vnet_subnet_name: Optional[str] = None,
**kwargs
):
super(VnetParameters, self).__init__(kind=kind, **kwargs)
self.vnet_resource_group = vnet_resource_group
self.vnet_name = vnet_name
self.vnet_subnet_name = vnet_subnet_name
class VnetRoute(ProxyOnlyResource):
"""Virtual Network route contract used to pass routing information for a Virtual Network.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param start_address: The starting address for this route. This may also include a CIDR
notation, in which case the end address must not be specified.
:type start_address: str
:param end_address: The ending address for this route. If the start address is specified in
CIDR notation, this must be omitted.
:type end_address: str
:param route_type: The type of route this is:
DEFAULT - By default, every app has routes to the local address ranges specified by RFC1918
INHERITED - Routes inherited from the real Virtual Network routes
STATIC - Static route set on the app only
These values will be used for syncing an app's routes with those from a Virtual Network.
Possible values include: "DEFAULT", "INHERITED", "STATIC".
:type route_type: str or ~azure.mgmt.web.v2020_06_01.models.RouteType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'start_address': {'key': 'properties.startAddress', 'type': 'str'},
'end_address': {'key': 'properties.endAddress', 'type': 'str'},
'route_type': {'key': 'properties.routeType', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
start_address: Optional[str] = None,
end_address: Optional[str] = None,
route_type: Optional[Union[str, "RouteType"]] = None,
**kwargs
):
super(VnetRoute, self).__init__(kind=kind, **kwargs)
self.start_address = start_address
self.end_address = end_address
self.route_type = route_type
class VnetValidationFailureDetails(ProxyOnlyResource):
"""A class that describes the reason for a validation failure.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param failed: A flag describing whether or not validation failed.
:type failed: bool
:param failed_tests: A list of tests that failed in the validation.
:type failed_tests: list[~azure.mgmt.web.v2020_06_01.models.VnetValidationTestFailure]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'failed': {'key': 'properties.failed', 'type': 'bool'},
'failed_tests': {'key': 'properties.failedTests', 'type': '[VnetValidationTestFailure]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
failed: Optional[bool] = None,
failed_tests: Optional[List["VnetValidationTestFailure"]] = None,
**kwargs
):
super(VnetValidationFailureDetails, self).__init__(kind=kind, **kwargs)
self.failed = failed
self.failed_tests = failed_tests
class VnetValidationTestFailure(ProxyOnlyResource):
"""A class that describes a test that failed during NSG and UDR validation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param test_name: The name of the test that failed.
:type test_name: str
:param details: The details of what caused the failure, e.g. the blocking rule name, etc.
:type details: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'test_name': {'key': 'properties.testName', 'type': 'str'},
'details': {'key': 'properties.details', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
test_name: Optional[str] = None,
details: Optional[str] = None,
**kwargs
):
super(VnetValidationTestFailure, self).__init__(kind=kind, **kwargs)
self.test_name = test_name
self.details = details
class WebAppCollection(msrest.serialization.Model):
"""Collection of App Service apps.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.Site]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Site]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Site"],
**kwargs
):
super(WebAppCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class WebAppInstanceStatusCollection(msrest.serialization.Model):
"""Collection of app instances.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.WebSiteInstanceStatus]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[WebSiteInstanceStatus]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["WebSiteInstanceStatus"],
**kwargs
):
super(WebAppInstanceStatusCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class WebJob(ProxyOnlyResource):
"""Web Job Information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param run_command: Run command.
:type run_command: str
:param url: Job URL.
:type url: str
:param extra_info_url: Extra Info URL.
:type extra_info_url: str
:param web_job_type: Job type. Possible values include: "Continuous", "Triggered".
:type web_job_type: str or ~azure.mgmt.web.v2020_06_01.models.WebJobType
:param error: Error information.
:type error: str
:param using_sdk: Using SDK?.
:type using_sdk: bool
:param settings: Job settings.
:type settings: dict[str, any]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'run_command': {'key': 'properties.run_command', 'type': 'str'},
'url': {'key': 'properties.url', 'type': 'str'},
'extra_info_url': {'key': 'properties.extra_info_url', 'type': 'str'},
'web_job_type': {'key': 'properties.web_job_type', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'str'},
'using_sdk': {'key': 'properties.using_sdk', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': '{object}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
run_command: Optional[str] = None,
url: Optional[str] = None,
extra_info_url: Optional[str] = None,
web_job_type: Optional[Union[str, "WebJobType"]] = None,
error: Optional[str] = None,
using_sdk: Optional[bool] = None,
settings: Optional[Dict[str, Any]] = None,
**kwargs
):
super(WebJob, self).__init__(kind=kind, **kwargs)
self.run_command = run_command
self.url = url
self.extra_info_url = extra_info_url
self.web_job_type = web_job_type
self.error = error
self.using_sdk = using_sdk
self.settings = settings
class WebJobCollection(msrest.serialization.Model):
"""Collection of Kudu web job information elements.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.WebJob]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[WebJob]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["WebJob"],
**kwargs
):
super(WebJobCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class WebSiteInstanceStatus(ProxyOnlyResource):
"""WebSiteInstanceStatus.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param state: Possible values include: "READY", "STOPPED", "UNKNOWN".
:type state: str or ~azure.mgmt.web.v2020_06_01.models.SiteRuntimeState
:param status_url: Link to the GetStatusApi in Kudu.
:type status_url: str
:param detector_url: Link to the Diagnose and Solve Portal.
:type detector_url: str
:param console_url: Link to the console to web app instance.
:type console_url: str
:param health_check_url: Link to the console to web app instance.
:type health_check_url: str
:param containers: Dictionary of :code:`<ContainerInfo>`.
:type containers: dict[str, ~azure.mgmt.web.v2020_06_01.models.ContainerInfo]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'status_url': {'key': 'properties.statusUrl', 'type': 'str'},
'detector_url': {'key': 'properties.detectorUrl', 'type': 'str'},
'console_url': {'key': 'properties.consoleUrl', 'type': 'str'},
'health_check_url': {'key': 'properties.healthCheckUrl', 'type': 'str'},
'containers': {'key': 'properties.containers', 'type': '{ContainerInfo}'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
state: Optional[Union[str, "SiteRuntimeState"]] = None,
status_url: Optional[str] = None,
detector_url: Optional[str] = None,
console_url: Optional[str] = None,
health_check_url: Optional[str] = None,
containers: Optional[Dict[str, "ContainerInfo"]] = None,
**kwargs
):
super(WebSiteInstanceStatus, self).__init__(kind=kind, **kwargs)
self.state = state
self.status_url = status_url
self.detector_url = detector_url
self.console_url = console_url
self.health_check_url = health_check_url
self.containers = containers
class WorkerPool(msrest.serialization.Model):
"""Worker pool of an App Service Environment.
Variables are only populated by the server, and will be ignored when sending a request.
:param worker_size_id: Worker size ID for referencing this worker pool.
:type worker_size_id: int
:param compute_mode: Shared or dedicated app hosting. Possible values include: "Shared",
"Dedicated", "Dynamic".
:type compute_mode: str or ~azure.mgmt.web.v2020_06_01.models.ComputeModeOptions
:param worker_size: VM size of the worker pool instances.
:type worker_size: str
:param worker_count: Number of instances in the worker pool.
:type worker_count: int
:ivar instance_names: Names of all instances in the worker pool (read only).
:vartype instance_names: list[str]
"""
_validation = {
'instance_names': {'readonly': True},
}
_attribute_map = {
'worker_size_id': {'key': 'workerSizeId', 'type': 'int'},
'compute_mode': {'key': 'computeMode', 'type': 'str'},
'worker_size': {'key': 'workerSize', 'type': 'str'},
'worker_count': {'key': 'workerCount', 'type': 'int'},
'instance_names': {'key': 'instanceNames', 'type': '[str]'},
}
def __init__(
self,
*,
worker_size_id: Optional[int] = None,
compute_mode: Optional[Union[str, "ComputeModeOptions"]] = None,
worker_size: Optional[str] = None,
worker_count: Optional[int] = None,
**kwargs
):
super(WorkerPool, self).__init__(**kwargs)
self.worker_size_id = worker_size_id
self.compute_mode = compute_mode
self.worker_size = worker_size
self.worker_count = worker_count
self.instance_names = None
class WorkerPoolCollection(msrest.serialization.Model):
"""Collection of worker pools.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.WorkerPoolResource]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[WorkerPoolResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["WorkerPoolResource"],
**kwargs
):
super(WorkerPoolCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class WorkerPoolResource(ProxyOnlyResource):
"""Worker pool of an App Service Environment ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param sku: Description of a SKU for a scalable resource.
:type sku: ~azure.mgmt.web.v2020_06_01.models.SkuDescription
:param worker_size_id: Worker size ID for referencing this worker pool.
:type worker_size_id: int
:param compute_mode: Shared or dedicated app hosting. Possible values include: "Shared",
"Dedicated", "Dynamic".
:type compute_mode: str or ~azure.mgmt.web.v2020_06_01.models.ComputeModeOptions
:param worker_size: VM size of the worker pool instances.
:type worker_size: str
:param worker_count: Number of instances in the worker pool.
:type worker_count: int
:ivar instance_names: Names of all instances in the worker pool (read only).
:vartype instance_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'instance_names': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'SkuDescription'},
'worker_size_id': {'key': 'properties.workerSizeId', 'type': 'int'},
'compute_mode': {'key': 'properties.computeMode', 'type': 'str'},
'worker_size': {'key': 'properties.workerSize', 'type': 'str'},
'worker_count': {'key': 'properties.workerCount', 'type': 'int'},
'instance_names': {'key': 'properties.instanceNames', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
sku: Optional["SkuDescription"] = None,
worker_size_id: Optional[int] = None,
compute_mode: Optional[Union[str, "ComputeModeOptions"]] = None,
worker_size: Optional[str] = None,
worker_count: Optional[int] = None,
**kwargs
):
super(WorkerPoolResource, self).__init__(kind=kind, **kwargs)
self.sku = sku
self.worker_size_id = worker_size_id
self.compute_mode = compute_mode
self.worker_size = worker_size
self.worker_count = worker_count
self.instance_names = None
```
#### File: batch/operations/_pool_operations.py
```python
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class PoolOperations(object):
"""PoolOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API Version. Constant value: "2020-09-01.12.0".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-09-01.12.0"
self.config = config
def list_usage_metrics(
self, pool_list_usage_metrics_options=None, custom_headers=None, raw=False, **operation_config):
"""Lists the usage metrics, aggregated by Pool across individual time
intervals, for the specified Account.
If you do not specify a $filter clause including a poolId, the response
includes all Pools that existed in the Account in the time range of the
returned aggregation intervals. If you do not specify a $filter clause
including a startTime or endTime these filters default to the start and
end times of the last aggregation interval currently available; that
is, only the last aggregation interval is returned.
:param pool_list_usage_metrics_options: Additional parameters for the
operation
:type pool_list_usage_metrics_options:
~azure.batch.models.PoolListUsageMetricsOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PoolUsageMetrics
:rtype:
~azure.batch.models.PoolUsageMetricsPaged[~azure.batch.models.PoolUsageMetrics]
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
start_time = None
if pool_list_usage_metrics_options is not None:
start_time = pool_list_usage_metrics_options.start_time
end_time = None
if pool_list_usage_metrics_options is not None:
end_time = pool_list_usage_metrics_options.end_time
filter = None
if pool_list_usage_metrics_options is not None:
filter = pool_list_usage_metrics_options.filter
max_results = None
if pool_list_usage_metrics_options is not None:
max_results = pool_list_usage_metrics_options.max_results
timeout = None
if pool_list_usage_metrics_options is not None:
timeout = pool_list_usage_metrics_options.timeout
client_request_id = None
if pool_list_usage_metrics_options is not None:
client_request_id = pool_list_usage_metrics_options.client_request_id
return_client_request_id = None
if pool_list_usage_metrics_options is not None:
return_client_request_id = pool_list_usage_metrics_options.return_client_request_id
ocp_date = None
if pool_list_usage_metrics_options is not None:
ocp_date = pool_list_usage_metrics_options.ocp_date
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_usage_metrics.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if start_time is not None:
query_parameters['starttime'] = self._serialize.query("start_time", start_time, 'iso-8601')
if end_time is not None:
query_parameters['endtime'] = self._serialize.query("end_time", end_time, 'iso-8601')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.PoolUsageMetricsPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_usage_metrics.metadata = {'url': '/poolusagemetrics'}
def get_all_lifetime_statistics(
self, pool_get_all_lifetime_statistics_options=None, custom_headers=None, raw=False, **operation_config):
"""Gets lifetime summary statistics for all of the Pools in the specified
Account.
Statistics are aggregated across all Pools that have ever existed in
the Account, from Account creation to the last update time of the
statistics. The statistics may not be immediately available. The Batch
service performs periodic roll-up of statistics. The typical delay is
about 30 minutes.
:param pool_get_all_lifetime_statistics_options: Additional parameters
for the operation
:type pool_get_all_lifetime_statistics_options:
~azure.batch.models.PoolGetAllLifetimeStatisticsOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PoolStatistics or ClientRawResponse if raw=true
:rtype: ~azure.batch.models.PoolStatistics or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_get_all_lifetime_statistics_options is not None:
timeout = pool_get_all_lifetime_statistics_options.timeout
client_request_id = None
if pool_get_all_lifetime_statistics_options is not None:
client_request_id = pool_get_all_lifetime_statistics_options.client_request_id
return_client_request_id = None
if pool_get_all_lifetime_statistics_options is not None:
return_client_request_id = pool_get_all_lifetime_statistics_options.return_client_request_id
ocp_date = None
if pool_get_all_lifetime_statistics_options is not None:
ocp_date = pool_get_all_lifetime_statistics_options.ocp_date
# Construct URL
url = self.get_all_lifetime_statistics.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
header_dict = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PoolStatistics', response)
header_dict = {
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
get_all_lifetime_statistics.metadata = {'url': '/lifetimepoolstats'}
def add(
self, pool, pool_add_options=None, custom_headers=None, raw=False, **operation_config):
"""Adds a Pool to the specified Account.
When naming Pools, avoid including sensitive information such as user
names or secret project names. This information may appear in telemetry
logs accessible to Microsoft Support engineers.
:param pool: The Pool to be added.
:type pool: ~azure.batch.models.PoolAddParameter
:param pool_add_options: Additional parameters for the operation
:type pool_add_options: ~azure.batch.models.PoolAddOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_add_options is not None:
timeout = pool_add_options.timeout
client_request_id = None
if pool_add_options is not None:
client_request_id = pool_add_options.client_request_id
return_client_request_id = None
if pool_add_options is not None:
return_client_request_id = pool_add_options.return_client_request_id
ocp_date = None
if pool_add_options is not None:
ocp_date = pool_add_options.ocp_date
# Construct URL
url = self.add.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct body
body_content = self._serialize.body(pool, 'PoolAddParameter')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
add.metadata = {'url': '/pools'}
def list(
self, pool_list_options=None, custom_headers=None, raw=False, **operation_config):
"""Lists all of the Pools in the specified Account.
:param pool_list_options: Additional parameters for the operation
:type pool_list_options: ~azure.batch.models.PoolListOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of CloudPool
:rtype:
~azure.batch.models.CloudPoolPaged[~azure.batch.models.CloudPool]
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
filter = None
if pool_list_options is not None:
filter = pool_list_options.filter
select = None
if pool_list_options is not None:
select = pool_list_options.select
expand = None
if pool_list_options is not None:
expand = pool_list_options.expand
max_results = None
if pool_list_options is not None:
max_results = pool_list_options.max_results
timeout = None
if pool_list_options is not None:
timeout = pool_list_options.timeout
client_request_id = None
if pool_list_options is not None:
client_request_id = pool_list_options.client_request_id
return_client_request_id = None
if pool_list_options is not None:
return_client_request_id = pool_list_options.return_client_request_id
ocp_date = None
if pool_list_options is not None:
ocp_date = pool_list_options.ocp_date
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1)
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.CloudPoolPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/pools'}
def delete(
self, pool_id, pool_delete_options=None, custom_headers=None, raw=False, **operation_config):
"""Deletes a Pool from the specified Account.
When you request that a Pool be deleted, the following actions occur:
the Pool state is set to deleting; any ongoing resize operation on the
Pool are stopped; the Batch service starts resizing the Pool to zero
Compute Nodes; any Tasks running on existing Compute Nodes are
terminated and requeued (as if a resize Pool operation had been
requested with the default requeue option); finally, the Pool is
removed from the system. Because running Tasks are requeued, the user
can rerun these Tasks by updating their Job to target a different Pool.
The Tasks can then run on the new Pool. If you want to override the
requeue behavior, then you should call resize Pool explicitly to shrink
the Pool to zero size before deleting the Pool. If you call an Update,
Patch or Delete API on a Pool in the deleting state, it will fail with
HTTP status code 409 with error code PoolBeingDeleted.
:param pool_id: The ID of the Pool to delete.
:type pool_id: str
:param pool_delete_options: Additional parameters for the operation
:type pool_delete_options: ~azure.batch.models.PoolDeleteOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_delete_options is not None:
timeout = pool_delete_options.timeout
client_request_id = None
if pool_delete_options is not None:
client_request_id = pool_delete_options.client_request_id
return_client_request_id = None
if pool_delete_options is not None:
return_client_request_id = pool_delete_options.return_client_request_id
ocp_date = None
if pool_delete_options is not None:
ocp_date = pool_delete_options.ocp_date
if_match = None
if pool_delete_options is not None:
if_match = pool_delete_options.if_match
if_none_match = None
if pool_delete_options is not None:
if_none_match = pool_delete_options.if_none_match
if_modified_since = None
if pool_delete_options is not None:
if_modified_since = pool_delete_options.if_modified_since
if_unmodified_since = None
if pool_delete_options is not None:
if_unmodified_since = pool_delete_options.if_unmodified_since
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True),
'poolId': self._serialize.url("pool_id", pool_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
})
return client_raw_response
delete.metadata = {'url': '/pools/{poolId}'}
def exists(
self, pool_id, pool_exists_options=None, custom_headers=None, raw=False, **operation_config):
"""Gets basic properties of a Pool.
:param pool_id: The ID of the Pool to get.
:type pool_id: str
:param pool_exists_options: Additional parameters for the operation
:type pool_exists_options: ~azure.batch.models.PoolExistsOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: bool or ClientRawResponse if raw=true
:rtype: bool or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_exists_options is not None:
timeout = pool_exists_options.timeout
client_request_id = None
if pool_exists_options is not None:
client_request_id = pool_exists_options.client_request_id
return_client_request_id = None
if pool_exists_options is not None:
return_client_request_id = pool_exists_options.return_client_request_id
ocp_date = None
if pool_exists_options is not None:
ocp_date = pool_exists_options.ocp_date
if_match = None
if pool_exists_options is not None:
if_match = pool_exists_options.if_match
if_none_match = None
if pool_exists_options is not None:
if_none_match = pool_exists_options.if_none_match
if_modified_since = None
if pool_exists_options is not None:
if_modified_since = pool_exists_options.if_modified_since
if_unmodified_since = None
if pool_exists_options is not None:
if_unmodified_since = pool_exists_options.if_unmodified_since
# Construct URL
url = self.exists.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True),
'poolId': self._serialize.url("pool_id", pool_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct and send request
request = self._client.head(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 404]:
raise models.BatchErrorException(self._deserialize, response)
deserialized = (response.status_code == 200)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
})
return client_raw_response
return deserialized
exists.metadata = {'url': '/pools/{poolId}'}
def get(
self, pool_id, pool_get_options=None, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified Pool.
:param pool_id: The ID of the Pool to get.
:type pool_id: str
:param pool_get_options: Additional parameters for the operation
:type pool_get_options: ~azure.batch.models.PoolGetOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CloudPool or ClientRawResponse if raw=true
:rtype: ~azure.batch.models.CloudPool or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
select = None
if pool_get_options is not None:
select = pool_get_options.select
expand = None
if pool_get_options is not None:
expand = pool_get_options.expand
timeout = None
if pool_get_options is not None:
timeout = pool_get_options.timeout
client_request_id = None
if pool_get_options is not None:
client_request_id = pool_get_options.client_request_id
return_client_request_id = None
if pool_get_options is not None:
return_client_request_id = pool_get_options.return_client_request_id
ocp_date = None
if pool_get_options is not None:
ocp_date = pool_get_options.ocp_date
if_match = None
if pool_get_options is not None:
if_match = pool_get_options.if_match
if_none_match = None
if pool_get_options is not None:
if_none_match = pool_get_options.if_none_match
if_modified_since = None
if pool_get_options is not None:
if_modified_since = pool_get_options.if_modified_since
if_unmodified_since = None
if pool_get_options is not None:
if_unmodified_since = pool_get_options.if_unmodified_since
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True),
'poolId': self._serialize.url("pool_id", pool_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
header_dict = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CloudPool', response)
header_dict = {
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
get.metadata = {'url': '/pools/{poolId}'}
def patch(
self, pool_id, pool_patch_parameter, pool_patch_options=None, custom_headers=None, raw=False, **operation_config):
"""Updates the properties of the specified Pool.
This only replaces the Pool properties specified in the request. For
example, if the Pool has a StartTask associated with it, and a request
does not specify a StartTask element, then the Pool keeps the existing
StartTask.
:param pool_id: The ID of the Pool to update.
:type pool_id: str
:param pool_patch_parameter: The parameters for the request.
:type pool_patch_parameter: ~azure.batch.models.PoolPatchParameter
:param pool_patch_options: Additional parameters for the operation
:type pool_patch_options: ~azure.batch.models.PoolPatchOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_patch_options is not None:
timeout = pool_patch_options.timeout
client_request_id = None
if pool_patch_options is not None:
client_request_id = pool_patch_options.client_request_id
return_client_request_id = None
if pool_patch_options is not None:
return_client_request_id = pool_patch_options.return_client_request_id
ocp_date = None
if pool_patch_options is not None:
ocp_date = pool_patch_options.ocp_date
if_match = None
if pool_patch_options is not None:
if_match = pool_patch_options.if_match
if_none_match = None
if pool_patch_options is not None:
if_none_match = pool_patch_options.if_none_match
if_modified_since = None
if pool_patch_options is not None:
if_modified_since = pool_patch_options.if_modified_since
if_unmodified_since = None
if pool_patch_options is not None:
if_unmodified_since = pool_patch_options.if_unmodified_since
# Construct URL
url = self.patch.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True),
'poolId': self._serialize.url("pool_id", pool_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct body
body_content = self._serialize.body(pool_patch_parameter, 'PoolPatchParameter')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
patch.metadata = {'url': '/pools/{poolId}'}
def disable_auto_scale(
self, pool_id, pool_disable_auto_scale_options=None, custom_headers=None, raw=False, **operation_config):
"""Disables automatic scaling for a Pool.
:param pool_id: The ID of the Pool on which to disable automatic
scaling.
:type pool_id: str
:param pool_disable_auto_scale_options: Additional parameters for the
operation
:type pool_disable_auto_scale_options:
~azure.batch.models.PoolDisableAutoScaleOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_disable_auto_scale_options is not None:
timeout = pool_disable_auto_scale_options.timeout
client_request_id = None
if pool_disable_auto_scale_options is not None:
client_request_id = pool_disable_auto_scale_options.client_request_id
return_client_request_id = None
if pool_disable_auto_scale_options is not None:
return_client_request_id = pool_disable_auto_scale_options.return_client_request_id
ocp_date = None
if pool_disable_auto_scale_options is not None:
ocp_date = pool_disable_auto_scale_options.ocp_date
# Construct URL
url = self.disable_auto_scale.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True),
'poolId': self._serialize.url("pool_id", pool_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
disable_auto_scale.metadata = {'url': '/pools/{poolId}/disableautoscale'}
def enable_auto_scale(
self, pool_id, auto_scale_formula=None, auto_scale_evaluation_interval=None, pool_enable_auto_scale_options=None, custom_headers=None, raw=False, **operation_config):
"""Enables automatic scaling for a Pool.
You cannot enable automatic scaling on a Pool if a resize operation is
in progress on the Pool. If automatic scaling of the Pool is currently
disabled, you must specify a valid autoscale formula as part of the
request. If automatic scaling of the Pool is already enabled, you may
specify a new autoscale formula and/or a new evaluation interval. You
cannot call this API for the same Pool more than once every 30 seconds.
:param pool_id: The ID of the Pool on which to enable automatic
scaling.
:type pool_id: str
:param auto_scale_formula: The formula for the desired number of
Compute Nodes in the Pool. The formula is checked for validity before
it is applied to the Pool. If the formula is not valid, the Batch
service rejects the request with detailed error information. For more
information about specifying this formula, see Automatically scale
Compute Nodes in an Azure Batch Pool
(https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling).
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: The time interval at which to
automatically adjust the Pool size according to the autoscale formula.
The default value is 15 minutes. The minimum and maximum value are 5
minutes and 168 hours respectively. If you specify a value less than 5
minutes or greater than 168 hours, the Batch service rejects the
request with an invalid property value error; if you are calling the
REST API directly, the HTTP status code is 400 (Bad Request). If you
specify a new interval, then the existing autoscale evaluation
schedule will be stopped and a new autoscale evaluation schedule will
be started, with its starting time being the time when this request
was issued.
:type auto_scale_evaluation_interval: timedelta
:param pool_enable_auto_scale_options: Additional parameters for the
operation
:type pool_enable_auto_scale_options:
~azure.batch.models.PoolEnableAutoScaleOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_enable_auto_scale_options is not None:
timeout = pool_enable_auto_scale_options.timeout
client_request_id = None
if pool_enable_auto_scale_options is not None:
client_request_id = pool_enable_auto_scale_options.client_request_id
return_client_request_id = None
if pool_enable_auto_scale_options is not None:
return_client_request_id = pool_enable_auto_scale_options.return_client_request_id
ocp_date = None
if pool_enable_auto_scale_options is not None:
ocp_date = pool_enable_auto_scale_options.ocp_date
if_match = None
if pool_enable_auto_scale_options is not None:
if_match = pool_enable_auto_scale_options.if_match
if_none_match = None
if pool_enable_auto_scale_options is not None:
if_none_match = pool_enable_auto_scale_options.if_none_match
if_modified_since = None
if pool_enable_auto_scale_options is not None:
if_modified_since = pool_enable_auto_scale_options.if_modified_since
if_unmodified_since = None
if pool_enable_auto_scale_options is not None:
if_unmodified_since = pool_enable_auto_scale_options.if_unmodified_since
pool_enable_auto_scale_parameter = models.PoolEnableAutoScaleParameter(auto_scale_formula=auto_scale_formula, auto_scale_evaluation_interval=auto_scale_evaluation_interval)
# Construct URL
url = self.enable_auto_scale.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True),
'poolId': self._serialize.url("pool_id", pool_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct body
body_content = self._serialize.body(pool_enable_auto_scale_parameter, 'PoolEnableAutoScaleParameter')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
enable_auto_scale.metadata = {'url': '/pools/{poolId}/enableautoscale'}
def evaluate_auto_scale(
self, pool_id, auto_scale_formula, pool_evaluate_auto_scale_options=None, custom_headers=None, raw=False, **operation_config):
"""Gets the result of evaluating an automatic scaling formula on the Pool.
This API is primarily for validating an autoscale formula, as it simply
returns the result without applying the formula to the Pool. The Pool
must have auto scaling enabled in order to evaluate a formula.
:param pool_id: The ID of the Pool on which to evaluate the automatic
scaling formula.
:type pool_id: str
:param auto_scale_formula: The formula for the desired number of
Compute Nodes in the Pool. The formula is validated and its results
calculated, but it is not applied to the Pool. To apply the formula to
the Pool, 'Enable automatic scaling on a Pool'. For more information
about specifying this formula, see Automatically scale Compute Nodes
in an Azure Batch Pool
(https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling).
:type auto_scale_formula: str
:param pool_evaluate_auto_scale_options: Additional parameters for the
operation
:type pool_evaluate_auto_scale_options:
~azure.batch.models.PoolEvaluateAutoScaleOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AutoScaleRun or ClientRawResponse if raw=true
:rtype: ~azure.batch.models.AutoScaleRun or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_evaluate_auto_scale_options is not None:
timeout = pool_evaluate_auto_scale_options.timeout
client_request_id = None
if pool_evaluate_auto_scale_options is not None:
client_request_id = pool_evaluate_auto_scale_options.client_request_id
return_client_request_id = None
if pool_evaluate_auto_scale_options is not None:
return_client_request_id = pool_evaluate_auto_scale_options.return_client_request_id
ocp_date = None
if pool_evaluate_auto_scale_options is not None:
ocp_date = pool_evaluate_auto_scale_options.ocp_date
pool_evaluate_auto_scale_parameter = models.PoolEvaluateAutoScaleParameter(auto_scale_formula=auto_scale_formula)
# Construct URL
url = self.evaluate_auto_scale.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True),
'poolId': self._serialize.url("pool_id", pool_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct body
body_content = self._serialize.body(pool_evaluate_auto_scale_parameter, 'PoolEvaluateAutoScaleParameter')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
header_dict = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AutoScaleRun', response)
header_dict = {
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
evaluate_auto_scale.metadata = {'url': '/pools/{poolId}/evaluateautoscale'}
def resize(
self, pool_id, pool_resize_parameter, pool_resize_options=None, custom_headers=None, raw=False, **operation_config):
"""Changes the number of Compute Nodes that are assigned to a Pool.
You can only resize a Pool when its allocation state is steady. If the
Pool is already resizing, the request fails with status code 409. When
you resize a Pool, the Pool's allocation state changes from steady to
resizing. You cannot resize Pools which are configured for automatic
scaling. If you try to do this, the Batch service returns an error 409.
If you resize a Pool downwards, the Batch service chooses which Compute
Nodes to remove. To remove specific Compute Nodes, use the Pool remove
Compute Nodes API instead.
:param pool_id: The ID of the Pool to resize.
:type pool_id: str
:param pool_resize_parameter: The parameters for the request.
:type pool_resize_parameter: ~azure.batch.models.PoolResizeParameter
:param pool_resize_options: Additional parameters for the operation
:type pool_resize_options: ~azure.batch.models.PoolResizeOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_resize_options is not None:
timeout = pool_resize_options.timeout
client_request_id = None
if pool_resize_options is not None:
client_request_id = pool_resize_options.client_request_id
return_client_request_id = None
if pool_resize_options is not None:
return_client_request_id = pool_resize_options.return_client_request_id
ocp_date = None
if pool_resize_options is not None:
ocp_date = pool_resize_options.ocp_date
if_match = None
if pool_resize_options is not None:
if_match = pool_resize_options.if_match
if_none_match = None
if pool_resize_options is not None:
if_none_match = pool_resize_options.if_none_match
if_modified_since = None
if pool_resize_options is not None:
if_modified_since = pool_resize_options.if_modified_since
if_unmodified_since = None
if pool_resize_options is not None:
if_unmodified_since = pool_resize_options.if_unmodified_since
# Construct URL
url = self.resize.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True),
'poolId': self._serialize.url("pool_id", pool_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct body
body_content = self._serialize.body(pool_resize_parameter, 'PoolResizeParameter')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
resize.metadata = {'url': '/pools/{poolId}/resize'}
def stop_resize(
self, pool_id, pool_stop_resize_options=None, custom_headers=None, raw=False, **operation_config):
"""Stops an ongoing resize operation on the Pool.
This does not restore the Pool to its previous state before the resize
operation: it only stops any further changes being made, and the Pool
maintains its current state. After stopping, the Pool stabilizes at the
number of Compute Nodes it was at when the stop operation was done.
During the stop operation, the Pool allocation state changes first to
stopping and then to steady. A resize operation need not be an explicit
resize Pool request; this API can also be used to halt the initial
sizing of the Pool when it is created.
:param pool_id: The ID of the Pool whose resizing you want to stop.
:type pool_id: str
:param pool_stop_resize_options: Additional parameters for the
operation
:type pool_stop_resize_options:
~azure.batch.models.PoolStopResizeOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_stop_resize_options is not None:
timeout = pool_stop_resize_options.timeout
client_request_id = None
if pool_stop_resize_options is not None:
client_request_id = pool_stop_resize_options.client_request_id
return_client_request_id = None
if pool_stop_resize_options is not None:
return_client_request_id = pool_stop_resize_options.return_client_request_id
ocp_date = None
if pool_stop_resize_options is not None:
ocp_date = pool_stop_resize_options.ocp_date
if_match = None
if pool_stop_resize_options is not None:
if_match = pool_stop_resize_options.if_match
if_none_match = None
if pool_stop_resize_options is not None:
if_none_match = pool_stop_resize_options.if_none_match
if_modified_since = None
if pool_stop_resize_options is not None:
if_modified_since = pool_stop_resize_options.if_modified_since
if_unmodified_since = None
if pool_stop_resize_options is not None:
if_unmodified_since = pool_stop_resize_options.if_unmodified_since
# Construct URL
url = self.stop_resize.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True),
'poolId': self._serialize.url("pool_id", pool_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
stop_resize.metadata = {'url': '/pools/{poolId}/stopresize'}
def update_properties(
self, pool_id, pool_update_properties_parameter, pool_update_properties_options=None, custom_headers=None, raw=False, **operation_config):
"""Updates the properties of the specified Pool.
This fully replaces all the updatable properties of the Pool. For
example, if the Pool has a StartTask associated with it and if
StartTask is not specified with this request, then the Batch service
will remove the existing StartTask.
:param pool_id: The ID of the Pool to update.
:type pool_id: str
:param pool_update_properties_parameter: The parameters for the
request.
:type pool_update_properties_parameter:
~azure.batch.models.PoolUpdatePropertiesParameter
:param pool_update_properties_options: Additional parameters for the
operation
:type pool_update_properties_options:
~azure.batch.models.PoolUpdatePropertiesOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_update_properties_options is not None:
timeout = pool_update_properties_options.timeout
client_request_id = None
if pool_update_properties_options is not None:
client_request_id = pool_update_properties_options.client_request_id
return_client_request_id = None
if pool_update_properties_options is not None:
return_client_request_id = pool_update_properties_options.return_client_request_id
ocp_date = None
if pool_update_properties_options is not None:
ocp_date = pool_update_properties_options.ocp_date
# Construct URL
url = self.update_properties.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True),
'poolId': self._serialize.url("pool_id", pool_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
# Construct body
body_content = self._serialize.body(pool_update_properties_parameter, 'PoolUpdatePropertiesParameter')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
update_properties.metadata = {'url': '/pools/{poolId}/updateproperties'}
def remove_nodes(
self, pool_id, node_remove_parameter, pool_remove_nodes_options=None, custom_headers=None, raw=False, **operation_config):
"""Removes Compute Nodes from the specified Pool.
This operation can only run when the allocation state of the Pool is
steady. When this operation runs, the allocation state changes from
steady to resizing.
:param pool_id: The ID of the Pool from which you want to remove
Compute Nodes.
:type pool_id: str
:param node_remove_parameter: The parameters for the request.
:type node_remove_parameter: ~azure.batch.models.NodeRemoveParameter
:param pool_remove_nodes_options: Additional parameters for the
operation
:type pool_remove_nodes_options:
~azure.batch.models.PoolRemoveNodesOptions
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`BatchErrorException<azure.batch.models.BatchErrorException>`
"""
timeout = None
if pool_remove_nodes_options is not None:
timeout = pool_remove_nodes_options.timeout
client_request_id = None
if pool_remove_nodes_options is not None:
client_request_id = pool_remove_nodes_options.client_request_id
return_client_request_id = None
if pool_remove_nodes_options is not None:
return_client_request_id = pool_remove_nodes_options.return_client_request_id
ocp_date = None
if pool_remove_nodes_options is not None:
ocp_date = pool_remove_nodes_options.ocp_date
if_match = None
if pool_remove_nodes_options is not None:
if_match = pool_remove_nodes_options.if_match
if_none_match = None
if pool_remove_nodes_options is not None:
if_none_match = pool_remove_nodes_options.if_none_match
if_modified_since = None
if pool_remove_nodes_options is not None:
if_modified_since = pool_remove_nodes_options.if_modified_since
if_unmodified_since = None
if pool_remove_nodes_options is not None:
if_unmodified_since = pool_remove_nodes_options.if_unmodified_since
# Construct URL
url = self.remove_nodes.metadata['url']
path_format_arguments = {
'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True),
'poolId': self._serialize.url("pool_id", pool_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if return_client_request_id is not None:
header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool')
if ocp_date is not None:
header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123')
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
if if_modified_since is not None:
header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
if if_unmodified_since is not None:
header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
# Construct body
body_content = self._serialize.body(node_remove_parameter, 'NodeRemoveParameter')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202]:
raise models.BatchErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'client-request-id': 'str',
'request-id': 'str',
'ETag': 'str',
'Last-Modified': 'rfc-1123',
'DataServiceId': 'str',
})
return client_raw_response
remove_nodes.metadata = {'url': '/pools/{poolId}/removenodes'}
```
#### File: containerinstance/models/_models_py3.py
```python
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._container_instance_management_client_enums import *
class AzureFileVolume(msrest.serialization.Model):
"""The properties of the Azure File volume. Azure File shares are mounted as volumes.
All required parameters must be populated in order to send to Azure.
:param share_name: Required. The name of the Azure File share to be mounted as a volume.
:type share_name: str
:param read_only: The flag indicating whether the Azure File shared mounted as a volume is
read-only.
:type read_only: bool
:param storage_account_name: Required. The name of the storage account that contains the Azure
File share.
:type storage_account_name: str
:param storage_account_key: The storage account access key used to access the Azure File share.
:type storage_account_key: str
"""
_validation = {
'share_name': {'required': True},
'storage_account_name': {'required': True},
}
_attribute_map = {
'share_name': {'key': 'shareName', 'type': 'str'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'storage_account_name': {'key': 'storageAccountName', 'type': 'str'},
'storage_account_key': {'key': 'storageAccountKey', 'type': 'str'},
}
def __init__(
self,
*,
share_name: str,
storage_account_name: str,
read_only: Optional[bool] = None,
storage_account_key: Optional[str] = None,
**kwargs
):
super(AzureFileVolume, self).__init__(**kwargs)
self.share_name = share_name
self.read_only = read_only
self.storage_account_name = storage_account_name
self.storage_account_key = storage_account_key
class CachedImages(msrest.serialization.Model):
"""The cached image and OS type.
All required parameters must be populated in order to send to Azure.
:param os_type: Required. The OS type of the cached image.
:type os_type: str
:param image: Required. The cached image name.
:type image: str
"""
_validation = {
'os_type': {'required': True},
'image': {'required': True},
}
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'str'},
'image': {'key': 'image', 'type': 'str'},
}
def __init__(
self,
*,
os_type: str,
image: str,
**kwargs
):
super(CachedImages, self).__init__(**kwargs)
self.os_type = os_type
self.image = image
class CachedImagesListResult(msrest.serialization.Model):
"""The response containing cached images.
:param value: The list of cached images.
:type value: list[~azure.mgmt.containerinstance.models.CachedImages]
:param next_link: The URI to fetch the next page of cached images.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CachedImages]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["CachedImages"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(CachedImagesListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class Capabilities(msrest.serialization.Model):
"""The regional capabilities.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_type: The resource type that this capability describes.
:vartype resource_type: str
:ivar os_type: The OS type that this capability describes.
:vartype os_type: str
:ivar location: The resource location.
:vartype location: str
:ivar ip_address_type: The ip address type that this capability describes.
:vartype ip_address_type: str
:ivar gpu: The GPU sku that this capability describes.
:vartype gpu: str
:ivar capabilities: The supported capabilities.
:vartype capabilities: ~azure.mgmt.containerinstance.models.CapabilitiesAutoGenerated
"""
_validation = {
'resource_type': {'readonly': True},
'os_type': {'readonly': True},
'location': {'readonly': True},
'ip_address_type': {'readonly': True},
'gpu': {'readonly': True},
'capabilities': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'ip_address_type': {'key': 'ipAddressType', 'type': 'str'},
'gpu': {'key': 'gpu', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': 'CapabilitiesAutoGenerated'},
}
def __init__(
self,
**kwargs
):
super(Capabilities, self).__init__(**kwargs)
self.resource_type = None
self.os_type = None
self.location = None
self.ip_address_type = None
self.gpu = None
self.capabilities = None
class CapabilitiesAutoGenerated(msrest.serialization.Model):
"""The supported capabilities.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar max_memory_in_gb: The maximum allowed memory request in GB.
:vartype max_memory_in_gb: float
:ivar max_cpu: The maximum allowed CPU request in cores.
:vartype max_cpu: float
:ivar max_gpu_count: The maximum allowed GPU count.
:vartype max_gpu_count: float
"""
_validation = {
'max_memory_in_gb': {'readonly': True},
'max_cpu': {'readonly': True},
'max_gpu_count': {'readonly': True},
}
_attribute_map = {
'max_memory_in_gb': {'key': 'maxMemoryInGB', 'type': 'float'},
'max_cpu': {'key': 'maxCpu', 'type': 'float'},
'max_gpu_count': {'key': 'maxGpuCount', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(CapabilitiesAutoGenerated, self).__init__(**kwargs)
self.max_memory_in_gb = None
self.max_cpu = None
self.max_gpu_count = None
class CapabilitiesListResult(msrest.serialization.Model):
"""The response containing list of capabilities.
:param value: The list of capabilities.
:type value: list[~azure.mgmt.containerinstance.models.Capabilities]
:param next_link: The URI to fetch the next page of capabilities.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Capabilities]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Capabilities"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(CapabilitiesListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class CloudErrorBody(msrest.serialization.Model):
"""An error response from the Container Instance service.
:param code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable for display in a user
interface.
:type message: str
:param target: The target of the particular error. For example, the name of the property in
error.
:type target: str
:param details: A list of additional details about the error.
:type details: list[~azure.mgmt.containerinstance.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None,
details: Optional[List["CloudErrorBody"]] = None,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class Components10Wh5UdSchemasContainergroupidentityPropertiesUserassignedidentitiesAdditionalproperties(msrest.serialization.Model):
"""Components10Wh5UdSchemasContainergroupidentityPropertiesUserassignedidentitiesAdditionalproperties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Components10Wh5UdSchemasContainergroupidentityPropertiesUserassignedidentitiesAdditionalproperties, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class Container(msrest.serialization.Model):
"""A container instance.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The user-provided name of the container instance.
:type name: str
:param image: Required. The name of the image used to create the container instance.
:type image: str
:param command: The commands to execute within the container instance in exec form.
:type command: list[str]
:param ports: The exposed ports on the container instance.
:type ports: list[~azure.mgmt.containerinstance.models.ContainerPort]
:param environment_variables: The environment variables to set in the container instance.
:type environment_variables: list[~azure.mgmt.containerinstance.models.EnvironmentVariable]
:ivar instance_view: The instance view of the container instance. Only valid in response.
:vartype instance_view: ~azure.mgmt.containerinstance.models.ContainerPropertiesInstanceView
:param resources: Required. The resource requirements of the container instance.
:type resources: ~azure.mgmt.containerinstance.models.ResourceRequirements
:param volume_mounts: The volume mounts available to the container instance.
:type volume_mounts: list[~azure.mgmt.containerinstance.models.VolumeMount]
:param liveness_probe: The liveness probe.
:type liveness_probe: ~azure.mgmt.containerinstance.models.ContainerProbe
:param readiness_probe: The readiness probe.
:type readiness_probe: ~azure.mgmt.containerinstance.models.ContainerProbe
"""
_validation = {
'name': {'required': True},
'image': {'required': True},
'instance_view': {'readonly': True},
'resources': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'image': {'key': 'properties.image', 'type': 'str'},
'command': {'key': 'properties.command', 'type': '[str]'},
'ports': {'key': 'properties.ports', 'type': '[ContainerPort]'},
'environment_variables': {'key': 'properties.environmentVariables', 'type': '[EnvironmentVariable]'},
'instance_view': {'key': 'properties.instanceView', 'type': 'ContainerPropertiesInstanceView'},
'resources': {'key': 'properties.resources', 'type': 'ResourceRequirements'},
'volume_mounts': {'key': 'properties.volumeMounts', 'type': '[VolumeMount]'},
'liveness_probe': {'key': 'properties.livenessProbe', 'type': 'ContainerProbe'},
'readiness_probe': {'key': 'properties.readinessProbe', 'type': 'ContainerProbe'},
}
def __init__(
self,
*,
name: str,
image: str,
resources: "ResourceRequirements",
command: Optional[List[str]] = None,
ports: Optional[List["ContainerPort"]] = None,
environment_variables: Optional[List["EnvironmentVariable"]] = None,
volume_mounts: Optional[List["VolumeMount"]] = None,
liveness_probe: Optional["ContainerProbe"] = None,
readiness_probe: Optional["ContainerProbe"] = None,
**kwargs
):
super(Container, self).__init__(**kwargs)
self.name = name
self.image = image
self.command = command
self.ports = ports
self.environment_variables = environment_variables
self.instance_view = None
self.resources = resources
self.volume_mounts = volume_mounts
self.liveness_probe = liveness_probe
self.readiness_probe = readiness_probe
class ContainerExec(msrest.serialization.Model):
"""The container execution command, for liveness or readiness probe.
:param command: The commands to execute within the container.
:type command: list[str]
"""
_attribute_map = {
'command': {'key': 'command', 'type': '[str]'},
}
def __init__(
self,
*,
command: Optional[List[str]] = None,
**kwargs
):
super(ContainerExec, self).__init__(**kwargs)
self.command = command
class ContainerExecRequest(msrest.serialization.Model):
"""The container exec request.
:param command: The command to be executed.
:type command: str
:param terminal_size: The size of the terminal.
:type terminal_size: ~azure.mgmt.containerinstance.models.ContainerExecRequestTerminalSize
"""
_attribute_map = {
'command': {'key': 'command', 'type': 'str'},
'terminal_size': {'key': 'terminalSize', 'type': 'ContainerExecRequestTerminalSize'},
}
def __init__(
self,
*,
command: Optional[str] = None,
terminal_size: Optional["ContainerExecRequestTerminalSize"] = None,
**kwargs
):
super(ContainerExecRequest, self).__init__(**kwargs)
self.command = command
self.terminal_size = terminal_size
class ContainerExecRequestTerminalSize(msrest.serialization.Model):
"""The size of the terminal.
:param rows: The row size of the terminal.
:type rows: int
:param cols: The column size of the terminal.
:type cols: int
"""
_attribute_map = {
'rows': {'key': 'rows', 'type': 'int'},
'cols': {'key': 'cols', 'type': 'int'},
}
def __init__(
self,
*,
rows: Optional[int] = None,
cols: Optional[int] = None,
**kwargs
):
super(ContainerExecRequestTerminalSize, self).__init__(**kwargs)
self.rows = rows
self.cols = cols
class ContainerExecResponse(msrest.serialization.Model):
"""The information for the container exec command.
:param web_socket_uri: The uri for the exec websocket.
:type web_socket_uri: str
:param password: The password to start the exec command.
:type password: str
"""
_attribute_map = {
'web_socket_uri': {'key': 'webSocketUri', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(
self,
*,
web_socket_uri: Optional[str] = None,
password: Optional[str] = None,
**kwargs
):
super(ContainerExecResponse, self).__init__(**kwargs)
self.web_socket_uri = web_socket_uri
self.password = password
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class ContainerGroup(Resource):
"""A container group.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource id.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param identity: The identity of the container group, if configured.
:type identity: ~azure.mgmt.containerinstance.models.ContainerGroupIdentity
:ivar provisioning_state: The provisioning state of the container group. This only appears in
the response.
:vartype provisioning_state: str
:param containers: Required. The containers within the container group.
:type containers: list[~azure.mgmt.containerinstance.models.Container]
:param image_registry_credentials: The image registry credentials by which the container group
is created from.
:type image_registry_credentials:
list[~azure.mgmt.containerinstance.models.ImageRegistryCredential]
:param restart_policy: Restart policy for all containers within the container group.
* ``Always`` Always restart
* ``OnFailure`` Restart on failure
* ``Never`` Never restart. Possible values include: "Always", "OnFailure", "Never".
:type restart_policy: str or ~azure.mgmt.containerinstance.models.ContainerGroupRestartPolicy
:param ip_address: The IP address type of the container group.
:type ip_address: ~azure.mgmt.containerinstance.models.IpAddress
:param os_type: Required. The operating system type required by the containers in the container
group. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.containerinstance.models.OperatingSystemTypes
:param volumes: The list of volumes that can be mounted by containers in this container group.
:type volumes: list[~azure.mgmt.containerinstance.models.Volume]
:ivar instance_view: The instance view of the container group. Only valid in response.
:vartype instance_view:
~azure.mgmt.containerinstance.models.ContainerGroupPropertiesInstanceView
:param diagnostics: The diagnostic information for a container group.
:type diagnostics: ~azure.mgmt.containerinstance.models.ContainerGroupDiagnostics
:param network_profile: The network profile information for a container group.
:type network_profile: ~azure.mgmt.containerinstance.models.ContainerGroupNetworkProfile
:param dns_config: The DNS config information for a container group.
:type dns_config: ~azure.mgmt.containerinstance.models.DnsConfiguration
:param sku: The SKU for a container group. Possible values include: "Standard", "Dedicated".
:type sku: str or ~azure.mgmt.containerinstance.models.ContainerGroupSku
:param encryption_properties: The encryption properties for a container group.
:type encryption_properties: ~azure.mgmt.containerinstance.models.EncryptionProperties
:param init_containers: The init containers for a container group.
:type init_containers: list[~azure.mgmt.containerinstance.models.InitContainerDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'containers': {'required': True},
'os_type': {'required': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ContainerGroupIdentity'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'containers': {'key': 'properties.containers', 'type': '[Container]'},
'image_registry_credentials': {'key': 'properties.imageRegistryCredentials', 'type': '[ImageRegistryCredential]'},
'restart_policy': {'key': 'properties.restartPolicy', 'type': 'str'},
'ip_address': {'key': 'properties.ipAddress', 'type': 'IpAddress'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'volumes': {'key': 'properties.volumes', 'type': '[Volume]'},
'instance_view': {'key': 'properties.instanceView', 'type': 'ContainerGroupPropertiesInstanceView'},
'diagnostics': {'key': 'properties.diagnostics', 'type': 'ContainerGroupDiagnostics'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'ContainerGroupNetworkProfile'},
'dns_config': {'key': 'properties.dnsConfig', 'type': 'DnsConfiguration'},
'sku': {'key': 'properties.sku', 'type': 'str'},
'encryption_properties': {'key': 'properties.encryptionProperties', 'type': 'EncryptionProperties'},
'init_containers': {'key': 'properties.initContainers', 'type': '[InitContainerDefinition]'},
}
def __init__(
self,
*,
containers: List["Container"],
os_type: Union[str, "OperatingSystemTypes"],
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
identity: Optional["ContainerGroupIdentity"] = None,
image_registry_credentials: Optional[List["ImageRegistryCredential"]] = None,
restart_policy: Optional[Union[str, "ContainerGroupRestartPolicy"]] = None,
ip_address: Optional["IpAddress"] = None,
volumes: Optional[List["Volume"]] = None,
diagnostics: Optional["ContainerGroupDiagnostics"] = None,
network_profile: Optional["ContainerGroupNetworkProfile"] = None,
dns_config: Optional["DnsConfiguration"] = None,
sku: Optional[Union[str, "ContainerGroupSku"]] = None,
encryption_properties: Optional["EncryptionProperties"] = None,
init_containers: Optional[List["InitContainerDefinition"]] = None,
**kwargs
):
super(ContainerGroup, self).__init__(location=location, tags=tags, **kwargs)
self.identity = identity
self.provisioning_state = None
self.containers = containers
self.image_registry_credentials = image_registry_credentials
self.restart_policy = restart_policy
self.ip_address = ip_address
self.os_type = os_type
self.volumes = volumes
self.instance_view = None
self.diagnostics = diagnostics
self.network_profile = network_profile
self.dns_config = dns_config
self.sku = sku
self.encryption_properties = encryption_properties
self.init_containers = init_containers
class ContainerGroupDiagnostics(msrest.serialization.Model):
"""Container group diagnostic information.
:param log_analytics: Container group log analytics information.
:type log_analytics: ~azure.mgmt.containerinstance.models.LogAnalytics
"""
_attribute_map = {
'log_analytics': {'key': 'logAnalytics', 'type': 'LogAnalytics'},
}
def __init__(
self,
*,
log_analytics: Optional["LogAnalytics"] = None,
**kwargs
):
super(ContainerGroupDiagnostics, self).__init__(**kwargs)
self.log_analytics = log_analytics
class ContainerGroupIdentity(msrest.serialization.Model):
"""Identity for the container group.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of the container group identity. This property will only
be provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id associated with the container group. This property will only be
provided for a system assigned identity.
:vartype tenant_id: str
:param type: The type of identity used for the container group. The type 'SystemAssigned,
UserAssigned' includes both an implicitly created identity and a set of user assigned
identities. The type 'None' will remove any identities from the container group. Possible
values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned", "None".
:type type: str or ~azure.mgmt.containerinstance.models.ResourceIdentityType
:param user_assigned_identities: The list of user identities associated with the container
group. The user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:type user_assigned_identities: dict[str,
~azure.mgmt.containerinstance.models.Components10Wh5UdSchemasContainergroupidentityPropertiesUserassignedidentitiesAdditionalproperties]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{Components10Wh5UdSchemasContainergroupidentityPropertiesUserassignedidentitiesAdditionalproperties}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "Components10Wh5UdSchemasContainergroupidentityPropertiesUserassignedidentitiesAdditionalproperties"]] = None,
**kwargs
):
super(ContainerGroupIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class ContainerGroupListResult(msrest.serialization.Model):
"""The container group list response that contains the container group properties.
:param value: The list of container groups.
:type value: list[~azure.mgmt.containerinstance.models.ContainerGroup]
:param next_link: The URI to fetch the next page of container groups.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ContainerGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ContainerGroup"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(ContainerGroupListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ContainerGroupNetworkProfile(msrest.serialization.Model):
"""Container group network profile information.
All required parameters must be populated in order to send to Azure.
:param id: Required. The identifier for a network profile.
:type id: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: str,
**kwargs
):
super(ContainerGroupNetworkProfile, self).__init__(**kwargs)
self.id = id
class ContainerGroupPropertiesInstanceView(msrest.serialization.Model):
"""The instance view of the container group. Only valid in response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar events: The events of this container group.
:vartype events: list[~azure.mgmt.containerinstance.models.Event]
:ivar state: The state of the container group. Only valid in response.
:vartype state: str
"""
_validation = {
'events': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'events': {'key': 'events', 'type': '[Event]'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContainerGroupPropertiesInstanceView, self).__init__(**kwargs)
self.events = None
self.state = None
class ContainerHttpGet(msrest.serialization.Model):
"""The container Http Get settings, for liveness or readiness probe.
All required parameters must be populated in order to send to Azure.
:param path: The path to probe.
:type path: str
:param port: Required. The port number to probe.
:type port: int
:param scheme: The scheme. Possible values include: "http", "https".
:type scheme: str or ~azure.mgmt.containerinstance.models.Scheme
"""
_validation = {
'port': {'required': True},
}
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'scheme': {'key': 'scheme', 'type': 'str'},
}
def __init__(
self,
*,
port: int,
path: Optional[str] = None,
scheme: Optional[Union[str, "Scheme"]] = None,
**kwargs
):
super(ContainerHttpGet, self).__init__(**kwargs)
self.path = path
self.port = port
self.scheme = scheme
class ContainerPort(msrest.serialization.Model):
"""The port exposed on the container instance.
All required parameters must be populated in order to send to Azure.
:param protocol: The protocol associated with the port. Possible values include: "TCP", "UDP".
:type protocol: str or ~azure.mgmt.containerinstance.models.ContainerNetworkProtocol
:param port: Required. The port number exposed within the container group.
:type port: int
"""
_validation = {
'port': {'required': True},
}
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(
self,
*,
port: int,
protocol: Optional[Union[str, "ContainerNetworkProtocol"]] = None,
**kwargs
):
super(ContainerPort, self).__init__(**kwargs)
self.protocol = protocol
self.port = port
class ContainerProbe(msrest.serialization.Model):
"""The container probe, for liveness or readiness.
:param exec_property: The execution command to probe.
:type exec_property: ~azure.mgmt.containerinstance.models.ContainerExec
:param http_get: The Http Get settings to probe.
:type http_get: ~azure.mgmt.containerinstance.models.ContainerHttpGet
:param initial_delay_seconds: The initial delay seconds.
:type initial_delay_seconds: int
:param period_seconds: The period seconds.
:type period_seconds: int
:param failure_threshold: The failure threshold.
:type failure_threshold: int
:param success_threshold: The success threshold.
:type success_threshold: int
:param timeout_seconds: The timeout seconds.
:type timeout_seconds: int
"""
_attribute_map = {
'exec_property': {'key': 'exec', 'type': 'ContainerExec'},
'http_get': {'key': 'httpGet', 'type': 'ContainerHttpGet'},
'initial_delay_seconds': {'key': 'initialDelaySeconds', 'type': 'int'},
'period_seconds': {'key': 'periodSeconds', 'type': 'int'},
'failure_threshold': {'key': 'failureThreshold', 'type': 'int'},
'success_threshold': {'key': 'successThreshold', 'type': 'int'},
'timeout_seconds': {'key': 'timeoutSeconds', 'type': 'int'},
}
def __init__(
self,
*,
exec_property: Optional["ContainerExec"] = None,
http_get: Optional["ContainerHttpGet"] = None,
initial_delay_seconds: Optional[int] = None,
period_seconds: Optional[int] = None,
failure_threshold: Optional[int] = None,
success_threshold: Optional[int] = None,
timeout_seconds: Optional[int] = None,
**kwargs
):
super(ContainerProbe, self).__init__(**kwargs)
self.exec_property = exec_property
self.http_get = http_get
self.initial_delay_seconds = initial_delay_seconds
self.period_seconds = period_seconds
self.failure_threshold = failure_threshold
self.success_threshold = success_threshold
self.timeout_seconds = timeout_seconds
class ContainerPropertiesInstanceView(msrest.serialization.Model):
"""The instance view of the container instance. Only valid in response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar restart_count: The number of times that the container instance has been restarted.
:vartype restart_count: int
:ivar current_state: Current container instance state.
:vartype current_state: ~azure.mgmt.containerinstance.models.ContainerState
:ivar previous_state: Previous container instance state.
:vartype previous_state: ~azure.mgmt.containerinstance.models.ContainerState
:ivar events: The events of the container instance.
:vartype events: list[~azure.mgmt.containerinstance.models.Event]
"""
_validation = {
'restart_count': {'readonly': True},
'current_state': {'readonly': True},
'previous_state': {'readonly': True},
'events': {'readonly': True},
}
_attribute_map = {
'restart_count': {'key': 'restartCount', 'type': 'int'},
'current_state': {'key': 'currentState', 'type': 'ContainerState'},
'previous_state': {'key': 'previousState', 'type': 'ContainerState'},
'events': {'key': 'events', 'type': '[Event]'},
}
def __init__(
self,
**kwargs
):
super(ContainerPropertiesInstanceView, self).__init__(**kwargs)
self.restart_count = None
self.current_state = None
self.previous_state = None
self.events = None
class ContainerState(msrest.serialization.Model):
"""The container instance state.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar state: The state of the container instance.
:vartype state: str
:ivar start_time: The date-time when the container instance state started.
:vartype start_time: ~datetime.datetime
:ivar exit_code: The container instance exit codes correspond to those from the ``docker run``
command.
:vartype exit_code: int
:ivar finish_time: The date-time when the container instance state finished.
:vartype finish_time: ~datetime.datetime
:ivar detail_status: The human-readable status of the container instance state.
:vartype detail_status: str
"""
_validation = {
'state': {'readonly': True},
'start_time': {'readonly': True},
'exit_code': {'readonly': True},
'finish_time': {'readonly': True},
'detail_status': {'readonly': True},
}
_attribute_map = {
'state': {'key': 'state', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'exit_code': {'key': 'exitCode', 'type': 'int'},
'finish_time': {'key': 'finishTime', 'type': 'iso-8601'},
'detail_status': {'key': 'detailStatus', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContainerState, self).__init__(**kwargs)
self.state = None
self.start_time = None
self.exit_code = None
self.finish_time = None
self.detail_status = None
class DnsConfiguration(msrest.serialization.Model):
"""DNS configuration for the container group.
All required parameters must be populated in order to send to Azure.
:param name_servers: Required. The DNS servers for the container group.
:type name_servers: list[str]
:param search_domains: The DNS search domains for hostname lookup in the container group.
:type search_domains: str
:param options: The DNS options for the container group.
:type options: str
"""
_validation = {
'name_servers': {'required': True},
}
_attribute_map = {
'name_servers': {'key': 'nameServers', 'type': '[str]'},
'search_domains': {'key': 'searchDomains', 'type': 'str'},
'options': {'key': 'options', 'type': 'str'},
}
def __init__(
self,
*,
name_servers: List[str],
search_domains: Optional[str] = None,
options: Optional[str] = None,
**kwargs
):
super(DnsConfiguration, self).__init__(**kwargs)
self.name_servers = name_servers
self.search_domains = search_domains
self.options = options
class EncryptionProperties(msrest.serialization.Model):
"""The container group encryption properties.
All required parameters must be populated in order to send to Azure.
:param vault_base_url: Required. The keyvault base url.
:type vault_base_url: str
:param key_name: Required. The encryption key name.
:type key_name: str
:param key_version: Required. The encryption key version.
:type key_version: str
"""
_validation = {
'vault_base_url': {'required': True},
'key_name': {'required': True},
'key_version': {'required': True},
}
_attribute_map = {
'vault_base_url': {'key': 'vaultBaseUrl', 'type': 'str'},
'key_name': {'key': 'keyName', 'type': 'str'},
'key_version': {'key': 'keyVersion', 'type': 'str'},
}
def __init__(
self,
*,
vault_base_url: str,
key_name: str,
key_version: str,
**kwargs
):
super(EncryptionProperties, self).__init__(**kwargs)
self.vault_base_url = vault_base_url
self.key_name = key_name
self.key_version = key_version
class EnvironmentVariable(msrest.serialization.Model):
"""The environment variable to set within the container instance.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the environment variable.
:type name: str
:param value: The value of the environment variable.
:type value: str
:param secure_value: The value of the secure environment variable.
:type secure_value: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'secure_value': {'key': 'secureValue', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
value: Optional[str] = None,
secure_value: Optional[str] = None,
**kwargs
):
super(EnvironmentVariable, self).__init__(**kwargs)
self.name = name
self.value = value
self.secure_value = secure_value
class Event(msrest.serialization.Model):
"""A container group or container instance event.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar count: The count of the event.
:vartype count: int
:ivar first_timestamp: The date-time of the earliest logged event.
:vartype first_timestamp: ~datetime.datetime
:ivar last_timestamp: The date-time of the latest logged event.
:vartype last_timestamp: ~datetime.datetime
:ivar name: The event name.
:vartype name: str
:ivar message: The event message.
:vartype message: str
:ivar type: The event type.
:vartype type: str
"""
_validation = {
'count': {'readonly': True},
'first_timestamp': {'readonly': True},
'last_timestamp': {'readonly': True},
'name': {'readonly': True},
'message': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'first_timestamp': {'key': 'firstTimestamp', 'type': 'iso-8601'},
'last_timestamp': {'key': 'lastTimestamp', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Event, self).__init__(**kwargs)
self.count = None
self.first_timestamp = None
self.last_timestamp = None
self.name = None
self.message = None
self.type = None
class GitRepoVolume(msrest.serialization.Model):
"""Represents a volume that is populated with the contents of a git repository.
All required parameters must be populated in order to send to Azure.
:param directory: Target directory name. Must not contain or start with '..'. If '.' is
supplied, the volume directory will be the git repository. Otherwise, if specified, the volume
will contain the git repository in the subdirectory with the given name.
:type directory: str
:param repository: Required. Repository URL.
:type repository: str
:param revision: Commit hash for the specified revision.
:type revision: str
"""
_validation = {
'repository': {'required': True},
}
_attribute_map = {
'directory': {'key': 'directory', 'type': 'str'},
'repository': {'key': 'repository', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'str'},
}
def __init__(
self,
*,
repository: str,
directory: Optional[str] = None,
revision: Optional[str] = None,
**kwargs
):
super(GitRepoVolume, self).__init__(**kwargs)
self.directory = directory
self.repository = repository
self.revision = revision
class GpuResource(msrest.serialization.Model):
"""The GPU resource.
All required parameters must be populated in order to send to Azure.
:param count: Required. The count of the GPU resource.
:type count: int
:param sku: Required. The SKU of the GPU resource. Possible values include: "K80", "P100",
"V100".
:type sku: str or ~azure.mgmt.containerinstance.models.GpuSku
"""
_validation = {
'count': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'sku': {'key': 'sku', 'type': 'str'},
}
def __init__(
self,
*,
count: int,
sku: Union[str, "GpuSku"],
**kwargs
):
super(GpuResource, self).__init__(**kwargs)
self.count = count
self.sku = sku
class ImageRegistryCredential(msrest.serialization.Model):
"""Image registry credential.
All required parameters must be populated in order to send to Azure.
:param server: Required. The Docker image registry server without a protocol such as "http" and
"https".
:type server: str
:param username: Required. The username for the private registry.
:type username: str
:param password: The password for the private registry.
:type password: str
"""
_validation = {
'server': {'required': True},
'username': {'required': True},
}
_attribute_map = {
'server': {'key': 'server', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(
self,
*,
server: str,
username: str,
password: Optional[str] = None,
**kwargs
):
super(ImageRegistryCredential, self).__init__(**kwargs)
self.server = server
self.username = username
self.password = password
class InitContainerDefinition(msrest.serialization.Model):
"""The init container definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name for the init container.
:type name: str
:param image: The image of the init container.
:type image: str
:param command: The command to execute within the init container in exec form.
:type command: list[str]
:param environment_variables: The environment variables to set in the init container.
:type environment_variables: list[~azure.mgmt.containerinstance.models.EnvironmentVariable]
:ivar instance_view: The instance view of the init container. Only valid in response.
:vartype instance_view:
~azure.mgmt.containerinstance.models.InitContainerPropertiesDefinitionInstanceView
:param volume_mounts: The volume mounts available to the init container.
:type volume_mounts: list[~azure.mgmt.containerinstance.models.VolumeMount]
"""
_validation = {
'name': {'required': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'image': {'key': 'properties.image', 'type': 'str'},
'command': {'key': 'properties.command', 'type': '[str]'},
'environment_variables': {'key': 'properties.environmentVariables', 'type': '[EnvironmentVariable]'},
'instance_view': {'key': 'properties.instanceView', 'type': 'InitContainerPropertiesDefinitionInstanceView'},
'volume_mounts': {'key': 'properties.volumeMounts', 'type': '[VolumeMount]'},
}
def __init__(
self,
*,
name: str,
image: Optional[str] = None,
command: Optional[List[str]] = None,
environment_variables: Optional[List["EnvironmentVariable"]] = None,
volume_mounts: Optional[List["VolumeMount"]] = None,
**kwargs
):
super(InitContainerDefinition, self).__init__(**kwargs)
self.name = name
self.image = image
self.command = command
self.environment_variables = environment_variables
self.instance_view = None
self.volume_mounts = volume_mounts
class InitContainerPropertiesDefinitionInstanceView(msrest.serialization.Model):
"""The instance view of the init container. Only valid in response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar restart_count: The number of times that the init container has been restarted.
:vartype restart_count: int
:ivar current_state: The current state of the init container.
:vartype current_state: ~azure.mgmt.containerinstance.models.ContainerState
:ivar previous_state: The previous state of the init container.
:vartype previous_state: ~azure.mgmt.containerinstance.models.ContainerState
:ivar events: The events of the init container.
:vartype events: list[~azure.mgmt.containerinstance.models.Event]
"""
_validation = {
'restart_count': {'readonly': True},
'current_state': {'readonly': True},
'previous_state': {'readonly': True},
'events': {'readonly': True},
}
_attribute_map = {
'restart_count': {'key': 'restartCount', 'type': 'int'},
'current_state': {'key': 'currentState', 'type': 'ContainerState'},
'previous_state': {'key': 'previousState', 'type': 'ContainerState'},
'events': {'key': 'events', 'type': '[Event]'},
}
def __init__(
self,
**kwargs
):
super(InitContainerPropertiesDefinitionInstanceView, self).__init__(**kwargs)
self.restart_count = None
self.current_state = None
self.previous_state = None
self.events = None
class IpAddress(msrest.serialization.Model):
"""IP address for the container group.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param ports: Required. The list of ports exposed on the container group.
:type ports: list[~azure.mgmt.containerinstance.models.Port]
:param type: Required. Specifies if the IP is exposed to the public internet or private VNET.
Possible values include: "Public", "Private".
:type type: str or ~azure.mgmt.containerinstance.models.ContainerGroupIpAddressType
:param ip: The IP exposed to the public internet.
:type ip: str
:param dns_name_label: The Dns name label for the IP.
:type dns_name_label: str
:ivar fqdn: The FQDN for the IP.
:vartype fqdn: str
"""
_validation = {
'ports': {'required': True},
'type': {'required': True},
'fqdn': {'readonly': True},
}
_attribute_map = {
'ports': {'key': 'ports', 'type': '[Port]'},
'type': {'key': 'type', 'type': 'str'},
'ip': {'key': 'ip', 'type': 'str'},
'dns_name_label': {'key': 'dnsNameLabel', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
}
def __init__(
self,
*,
ports: List["Port"],
type: Union[str, "ContainerGroupIpAddressType"],
ip: Optional[str] = None,
dns_name_label: Optional[str] = None,
**kwargs
):
super(IpAddress, self).__init__(**kwargs)
self.ports = ports
self.type = type
self.ip = ip
self.dns_name_label = dns_name_label
self.fqdn = None
class LogAnalytics(msrest.serialization.Model):
"""Container group log analytics information.
All required parameters must be populated in order to send to Azure.
:param workspace_id: Required. The workspace id for log analytics.
:type workspace_id: str
:param workspace_key: Required. The workspace key for log analytics.
:type workspace_key: str
:param log_type: The log type to be used. Possible values include: "ContainerInsights",
"ContainerInstanceLogs".
:type log_type: str or ~azure.mgmt.containerinstance.models.LogAnalyticsLogType
:param metadata: Metadata for log analytics.
:type metadata: dict[str, str]
"""
_validation = {
'workspace_id': {'required': True},
'workspace_key': {'required': True},
}
_attribute_map = {
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'workspace_key': {'key': 'workspaceKey', 'type': 'str'},
'log_type': {'key': 'logType', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': '{str}'},
}
def __init__(
self,
*,
workspace_id: str,
workspace_key: str,
log_type: Optional[Union[str, "LogAnalyticsLogType"]] = None,
metadata: Optional[Dict[str, str]] = None,
**kwargs
):
super(LogAnalytics, self).__init__(**kwargs)
self.workspace_id = workspace_id
self.workspace_key = workspace_key
self.log_type = log_type
self.metadata = metadata
class Logs(msrest.serialization.Model):
"""The logs.
:param content: The content of the log.
:type content: str
"""
_attribute_map = {
'content': {'key': 'content', 'type': 'str'},
}
def __init__(
self,
*,
content: Optional[str] = None,
**kwargs
):
super(Logs, self).__init__(**kwargs)
self.content = content
class Operation(msrest.serialization.Model):
"""An operation for Azure Container Instance service.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the operation.
:type name: str
:param display: Required. The display information of the operation.
:type display: ~azure.mgmt.containerinstance.models.OperationDisplay
:param properties: The additional properties.
:type properties: object
:param origin: The intended executor of the operation. Possible values include: "User",
"System".
:type origin: str or ~azure.mgmt.containerinstance.models.ContainerInstanceOperationsOrigin
"""
_validation = {
'name': {'required': True},
'display': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'properties': {'key': 'properties', 'type': 'object'},
'origin': {'key': 'origin', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
display: "OperationDisplay",
properties: Optional[object] = None,
origin: Optional[Union[str, "ContainerInstanceOperationsOrigin"]] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
self.properties = properties
self.origin = origin
class OperationDisplay(msrest.serialization.Model):
"""The display information of the operation.
:param provider: The name of the provider of the operation.
:type provider: str
:param resource: The name of the resource type of the operation.
:type resource: str
:param operation: The friendly name of the operation.
:type operation: str
:param description: The description of the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(msrest.serialization.Model):
"""The operation list response that contains all operations for Azure Container Instance service.
:param value: The list of operations.
:type value: list[~azure.mgmt.containerinstance.models.Operation]
:param next_link: The URI to fetch the next page of operations.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class Port(msrest.serialization.Model):
"""The port exposed on the container group.
All required parameters must be populated in order to send to Azure.
:param protocol: The protocol associated with the port. Possible values include: "TCP", "UDP".
:type protocol: str or ~azure.mgmt.containerinstance.models.ContainerGroupNetworkProtocol
:param port: Required. The port number.
:type port: int
"""
_validation = {
'port': {'required': True},
}
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(
self,
*,
port: int,
protocol: Optional[Union[str, "ContainerGroupNetworkProtocol"]] = None,
**kwargs
):
super(Port, self).__init__(**kwargs)
self.protocol = protocol
self.port = port
class ResourceLimits(msrest.serialization.Model):
"""The resource limits.
:param memory_in_gb: The memory limit in GB of this container instance.
:type memory_in_gb: float
:param cpu: The CPU limit of this container instance.
:type cpu: float
:param gpu: The GPU limit of this container instance.
:type gpu: ~azure.mgmt.containerinstance.models.GpuResource
"""
_attribute_map = {
'memory_in_gb': {'key': 'memoryInGB', 'type': 'float'},
'cpu': {'key': 'cpu', 'type': 'float'},
'gpu': {'key': 'gpu', 'type': 'GpuResource'},
}
def __init__(
self,
*,
memory_in_gb: Optional[float] = None,
cpu: Optional[float] = None,
gpu: Optional["GpuResource"] = None,
**kwargs
):
super(ResourceLimits, self).__init__(**kwargs)
self.memory_in_gb = memory_in_gb
self.cpu = cpu
self.gpu = gpu
class ResourceRequests(msrest.serialization.Model):
"""The resource requests.
All required parameters must be populated in order to send to Azure.
:param memory_in_gb: Required. The memory request in GB of this container instance.
:type memory_in_gb: float
:param cpu: Required. The CPU request of this container instance.
:type cpu: float
:param gpu: The GPU request of this container instance.
:type gpu: ~azure.mgmt.containerinstance.models.GpuResource
"""
_validation = {
'memory_in_gb': {'required': True},
'cpu': {'required': True},
}
_attribute_map = {
'memory_in_gb': {'key': 'memoryInGB', 'type': 'float'},
'cpu': {'key': 'cpu', 'type': 'float'},
'gpu': {'key': 'gpu', 'type': 'GpuResource'},
}
def __init__(
self,
*,
memory_in_gb: float,
cpu: float,
gpu: Optional["GpuResource"] = None,
**kwargs
):
super(ResourceRequests, self).__init__(**kwargs)
self.memory_in_gb = memory_in_gb
self.cpu = cpu
self.gpu = gpu
class ResourceRequirements(msrest.serialization.Model):
"""The resource requirements.
All required parameters must be populated in order to send to Azure.
:param requests: Required. The resource requests of this container instance.
:type requests: ~azure.mgmt.containerinstance.models.ResourceRequests
:param limits: The resource limits of this container instance.
:type limits: ~azure.mgmt.containerinstance.models.ResourceLimits
"""
_validation = {
'requests': {'required': True},
}
_attribute_map = {
'requests': {'key': 'requests', 'type': 'ResourceRequests'},
'limits': {'key': 'limits', 'type': 'ResourceLimits'},
}
def __init__(
self,
*,
requests: "ResourceRequests",
limits: Optional["ResourceLimits"] = None,
**kwargs
):
super(ResourceRequirements, self).__init__(**kwargs)
self.requests = requests
self.limits = limits
class Usage(msrest.serialization.Model):
"""A single usage result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar unit: Unit of the usage result.
:vartype unit: str
:ivar current_value: The current usage of the resource.
:vartype current_value: int
:ivar limit: The maximum permitted usage of the resource.
:vartype limit: int
:ivar name: The name object of the resource.
:vartype name: ~azure.mgmt.containerinstance.models.UsageName
"""
_validation = {
'unit': {'readonly': True},
'current_value': {'readonly': True},
'limit': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(
self,
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.unit = None
self.current_value = None
self.limit = None
self.name = None
class UsageListResult(msrest.serialization.Model):
"""The response containing the usage data.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The usage data.
:vartype value: list[~azure.mgmt.containerinstance.models.Usage]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
}
def __init__(
self,
**kwargs
):
super(UsageListResult, self).__init__(**kwargs)
self.value = None
class UsageName(msrest.serialization.Model):
"""The name object of the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The name of the resource.
:vartype value: str
:ivar localized_value: The localized name of the resource.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsageName, self).__init__(**kwargs)
self.value = None
self.localized_value = None
class Volume(msrest.serialization.Model):
"""The properties of the volume.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the volume.
:type name: str
:param azure_file: The Azure File volume.
:type azure_file: ~azure.mgmt.containerinstance.models.AzureFileVolume
:param empty_dir: The empty directory volume.
:type empty_dir: object
:param secret: The secret volume.
:type secret: dict[str, str]
:param git_repo: The git repo volume.
:type git_repo: ~azure.mgmt.containerinstance.models.GitRepoVolume
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'azure_file': {'key': 'azureFile', 'type': 'AzureFileVolume'},
'empty_dir': {'key': 'emptyDir', 'type': 'object'},
'secret': {'key': 'secret', 'type': '{str}'},
'git_repo': {'key': 'gitRepo', 'type': 'GitRepoVolume'},
}
def __init__(
self,
*,
name: str,
azure_file: Optional["AzureFileVolume"] = None,
empty_dir: Optional[object] = None,
secret: Optional[Dict[str, str]] = None,
git_repo: Optional["GitRepoVolume"] = None,
**kwargs
):
super(Volume, self).__init__(**kwargs)
self.name = name
self.azure_file = azure_file
self.empty_dir = empty_dir
self.secret = secret
self.git_repo = git_repo
class VolumeMount(msrest.serialization.Model):
"""The properties of the volume mount.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the volume mount.
:type name: str
:param mount_path: Required. The path within the container where the volume should be mounted.
Must not contain colon (:).
:type mount_path: str
:param read_only: The flag indicating whether the volume mount is read-only.
:type read_only: bool
"""
_validation = {
'name': {'required': True},
'mount_path': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'mount_path': {'key': 'mountPath', 'type': 'str'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
mount_path: str,
read_only: Optional[bool] = None,
**kwargs
):
super(VolumeMount, self).__init__(**kwargs)
self.name = name
self.mount_path = mount_path
self.read_only = read_only
```
#### File: azure-eventgrid/tests/test_eg_publisher_client_async.py
```python
import logging
import asyncio
import sys
import os
import json
import pytest
from datetime import timedelta
from msrest.serialization import UTC
from urllib.parse import urlparse
import datetime as dt
from devtools_testutils import AzureMgmtTestCase, CachedResourceGroupPreparer
from azure_devtools.scenario_tests import ReplayableTest
from azure.core.credentials import AzureKeyCredential, AzureSasCredential
from azure.core.messaging import CloudEvent
from azure.core.serialization import NULL
from azure.eventgrid import EventGridEvent, generate_sas
from azure.eventgrid.aio import EventGridPublisherClient
from azure.eventgrid._helpers import _cloud_event_to_generated
from eventgrid_preparer import (
CachedEventGridTopicPreparer
)
class EventGridPublisherClientTests(AzureMgmtTestCase):
FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['aeg-sas-key', 'aeg-sas-token']
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='eventgridtest')
@pytest.mark.asyncio
async def test_send_event_grid_event_data_dict(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
eg_event = EventGridEvent(
subject="sample",
data={"sample": "eventgridevent"},
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
await client.send(eg_event)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='eventgridtest')
@pytest.mark.asyncio
async def test_send_event_grid_event_data_as_list(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
eg_event1 = EventGridEvent(
subject="sample",
data="eventgridevent",
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
eg_event2 = EventGridEvent(
subject="sample2",
data="eventgridevent2",
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
await client.send([eg_event1, eg_event2])
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='eventgridtest')
@pytest.mark.asyncio
async def test_send_event_grid_event_fails_without_full_url(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
parsed_url = urlparse(eventgrid_topic_endpoint)
client = EventGridPublisherClient(parsed_url.netloc, akc_credential)
eg_event = EventGridEvent(
subject="sample",
data={"sample": "eventgridevent"},
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
with pytest.raises(ValueError):
await client.send(eg_event)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='eventgridtest')
@pytest.mark.asyncio
async def test_send_event_grid_event_data_str(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
eg_event = EventGridEvent(
subject="sample",
data="eventgridevent",
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
await client.send(eg_event)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='eventgridtest')
@pytest.mark.asyncio
async def test_send_event_grid_event_data_bytes(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
eg_event = EventGridEvent(
subject="sample",
data=b"eventgridevent",
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
with pytest.raises(TypeError, match="Data in EventGridEvent cannot be bytes*"):
await client.send(eg_event)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='eventgridtest')
@pytest.mark.asyncio
async def test_send_event_grid_event_dict_data_bytes(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
eg_event = {
"subject":"sample",
"data":b"eventgridevent",
"eventType":"Sample.EventGrid.Event",
"dataVersion":"2.0",
"id": "123-ddf-133-324255ffd",
"eventTime": dt.datetime.utcnow()
}
with pytest.raises(TypeError, match="Data in EventGridEvent cannot be bytes*"):
await client.send(eg_event)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='cloudeventgridtest')
@pytest.mark.asyncio
async def test_send_cloud_event_data_dict(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = {"sample": "cloudevent"},
type="Sample.Cloud.Event"
)
await client.send(cloud_event)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='cloudeventgridtest')
@pytest.mark.asyncio
async def test_send_cloud_event_data_str(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = "cloudevent",
type="Sample.Cloud.Event"
)
await client.send(cloud_event)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='cloudeventgridtest')
@pytest.mark.asyncio
async def test_send_cloud_event_data_bytes(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = b"cloudevent",
type="Sample.Cloud.Event"
)
await client.send(cloud_event)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='cloudeventgridtest')
@pytest.mark.asyncio
async def test_send_cloud_event_data_as_list(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = "cloudevent",
type="Sample.Cloud.Event"
)
await client.send([cloud_event])
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='cloudeventgridtest')
@pytest.mark.asyncio
async def test_send_cloud_event_data_with_extensions(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = "cloudevent",
type="Sample.Cloud.Event",
extensions={
'reasoncode':204,
'extension':'hello'
}
)
await client.send([cloud_event])
internal = _cloud_event_to_generated(cloud_event).serialize()
assert 'reasoncode' in internal
assert 'extension' in internal
assert internal['reasoncode'] == 204
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='cloudeventgridtest')
@pytest.mark.asyncio
async def test_send_cloud_event_dict(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
cloud_event1 = {
"id": "1234",
"source": "http://samplesource.dev",
"specversion": "1.0",
"data": "cloudevent",
"type": "Sample.Cloud.Event"
}
await client.send(cloud_event1)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='cloudeventgridtest')
@pytest.mark.asyncio
async def test_send_cloud_event_data_none(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = None,
type="Sample.Cloud.Event"
)
await client.send(cloud_event)
@pytest.mark.skip("https://github.com/Azure/azure-sdk-for-python/issues/16993")
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='cloudeventgridtest')
@pytest.mark.asyncio
async def test_send_cloud_event_data_NULL(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = NULL,
type="Sample.Cloud.Event"
)
def callback(request):
req = json.loads(request.http_request.body)
assert req[0].get("data") is None
await client.send(cloud_event, raw_request_hook=callback)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='eventgridtest')
@pytest.mark.asyncio
async def test_send_signature_credential(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
expiration_date_utc = dt.datetime.now(UTC()) + timedelta(hours=1)
signature = generate_sas(eventgrid_topic_endpoint, eventgrid_topic_primary_key, expiration_date_utc)
credential = AzureSasCredential(signature)
client = EventGridPublisherClient(eventgrid_topic_endpoint, credential)
eg_event = EventGridEvent(
subject="sample",
data={"sample": "eventgridevent"},
event_type="Sample.EventGrid.Event",
data_version="2.0"
)
await client.send(eg_event)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='customeventgridtest')
@pytest.mark.asyncio
async def test_send_custom_schema_event(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
custom_event = {
"customSubject": "sample",
"customEventType": "sample.event",
"customDataVersion": "2.0",
"customId": "1234",
"customEventTime": dt.datetime.now(UTC()).isoformat(),
"customData": "sample data"
}
await client.send(custom_event)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='customeventgridtest')
@pytest.mark.asyncio
async def test_send_custom_schema_event_as_list(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
custom_event1 = {
"customSubject": "sample",
"customEventType": "sample.event",
"customDataVersion": "2.0",
"customId": "1234",
"customEventTime": dt.datetime.now(UTC()).isoformat(),
"customData": "sample data"
}
custom_event2 = {
"customSubject": "sample2",
"customEventType": "sample.event",
"customDataVersion": "2.0",
"customId": "12345",
"customEventTime": dt.datetime.now(UTC()).isoformat(),
"customData": "sample data 2"
}
await client.send([custom_event1, custom_event2])
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='cloudeventgridtest')
@pytest.mark.asyncio
async def test_send_and_close_async_session(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
akc_credential = AzureKeyCredential(eventgrid_topic_primary_key)
client = EventGridPublisherClient(eventgrid_topic_endpoint, akc_credential)
async with client: # this throws if client can't close
cloud_event = CloudEvent(
source = "http://samplesource.dev",
data = "cloudevent",
type="Sample.Cloud.Event"
)
await client.send(cloud_event)
@CachedResourceGroupPreparer(name_prefix='eventgridtest')
@CachedEventGridTopicPreparer(name_prefix='cloudeventgridtest')
@pytest.mark.asyncio
def test_send_NONE_credential_async(self, resource_group, eventgrid_topic, eventgrid_topic_primary_key, eventgrid_topic_endpoint):
with pytest.raises(ValueError, match="Parameter 'self._credential' must not be None."):
client = EventGridPublisherClient(eventgrid_topic_endpoint, None)
```
#### File: identity/_credentials/vscode.py
```python
import abc
import os
import sys
from typing import cast, TYPE_CHECKING
from .._exceptions import CredentialUnavailableError
from .._constants import AzureAuthorityHosts, AZURE_VSCODE_CLIENT_ID, EnvironmentVariables
from .._internal import normalize_authority, validate_tenant_id
from .._internal.aad_client import AadClient
from .._internal.get_token_mixin import GetTokenMixin
if sys.platform.startswith("win"):
from .._internal.win_vscode_adapter import get_refresh_token, get_user_settings
elif sys.platform.startswith("darwin"):
from .._internal.macos_vscode_adapter import get_refresh_token, get_user_settings
else:
from .._internal.linux_vscode_adapter import get_refresh_token, get_user_settings
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from typing import Any, Dict, Optional
from azure.core.credentials import AccessToken
from .._internal.aad_client import AadClientBase
try:
ABC = abc.ABC
except AttributeError: # Python 2.7, abc exists, but not ABC
ABC = abc.ABCMeta("ABC", (object,), {"__slots__": ()}) # type: ignore
class _VSCodeCredentialBase(ABC):
def __init__(self, **kwargs):
# type: (**Any) -> None
super(_VSCodeCredentialBase, self).__init__()
user_settings = get_user_settings()
self._cloud = user_settings.get("azure.cloud", "AzureCloud")
self._refresh_token = None
self._unavailable_reason = ""
self._client = kwargs.get("_client")
if not self._client:
self._initialize(user_settings, **kwargs)
if not (self._client or self._unavailable_reason):
self._unavailable_reason = "Initialization failed"
@abc.abstractmethod
def _get_client(self, **kwargs):
# type: (**Any) -> AadClientBase
pass
def _get_refresh_token(self):
# type: () -> str
if not self._refresh_token:
self._refresh_token = get_refresh_token(self._cloud)
if not self._refresh_token:
raise CredentialUnavailableError(message="Failed to get Azure user details from Visual Studio Code.")
return self._refresh_token
def _initialize(self, vscode_user_settings, **kwargs):
# type: (Dict, **Any) -> None
"""Build a client from kwargs merged with VS Code user settings.
The first stable version of this credential defaulted to Public Cloud and the "organizations"
tenant when it failed to read VS Code user settings. That behavior is preserved here.
"""
# Precedence for authority:
# 1) VisualStudioCodeCredential(authority=...)
# 2) $AZURE_AUTHORITY_HOST
# 3) authority matching VS Code's "azure.cloud" setting
# 4) default: Public Cloud
authority = kwargs.pop("authority", None) or os.environ.get(EnvironmentVariables.AZURE_AUTHORITY_HOST)
if not authority:
# the application didn't specify an authority, so we figure it out from VS Code settings
if self._cloud == "AzureCloud":
authority = AzureAuthorityHosts.AZURE_PUBLIC_CLOUD
elif self._cloud == "AzureChinaCloud":
authority = AzureAuthorityHosts.AZURE_CHINA
elif self._cloud == "AzureGermanCloud":
authority = AzureAuthorityHosts.AZURE_GERMANY
elif self._cloud == "AzureUSGovernment":
authority = AzureAuthorityHosts.AZURE_GOVERNMENT
else:
# If the value is anything else ("AzureCustomCloud" is the only other known value),
# we need the user to provide the authority because VS Code has no setting for it and
# we can't guess confidently.
self._unavailable_reason = (
'VS Code is configured to use a custom cloud. Set keyword argument "authority"'
+ ' with the Azure Active Directory endpoint for cloud "{}"'.format(self._cloud)
)
return
# Precedence for tenant ID:
# 1) VisualStudioCodeCredential(tenant_id=...)
# 2) "azure.tenant" in VS Code user settings
# 3) default: organizations
tenant_id = kwargs.pop("tenant_id", None) or vscode_user_settings.get("azure.tenant", "organizations")
validate_tenant_id(tenant_id)
if tenant_id.lower() == "adfs":
self._unavailable_reason = "VisualStudioCodeCredential authentication unavailable. ADFS is not supported."
return
self._client = self._get_client(
authority=normalize_authority(authority), client_id=AZURE_VSCODE_CLIENT_ID, tenant_id=tenant_id, **kwargs
)
class VisualStudioCodeCredential(_VSCodeCredentialBase, GetTokenMixin):
"""Authenticates as the Azure user signed in to Visual Studio Code.
:keyword str authority: authority of an Azure Active Directory endpoint, for example "login.microsoftonline.com".
This argument is required for a custom cloud and usually unnecessary otherwise. Defaults to the authority
matching the "Azure: Cloud" setting in VS Code's user settings or, when that setting has no value, the
authority for Azure Public Cloud.
:keyword str tenant_id: ID of the tenant the credential should authenticate in. Defaults to the "Azure: Tenant"
setting in VS Code's user settings or, when that setting has no value, the "organizations" tenant, which
supports only Azure Active Directory work or school accounts.
"""
def get_token(self, *scopes, **kwargs):
# type: (*str, **Any) -> AccessToken
"""Request an access token for `scopes` as the user currently signed in to Visual Studio Code.
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
:rtype: :class:`azure.core.credentials.AccessToken`
:raises ~azure.identity.CredentialUnavailableError: the credential cannot retrieve user details from Visual
Studio Code
"""
if self._unavailable_reason:
raise CredentialUnavailableError(message=self._unavailable_reason)
return super(VisualStudioCodeCredential, self).get_token(*scopes, **kwargs)
def _acquire_token_silently(self, *scopes):
# type: (*str) -> Optional[AccessToken]
self._client = cast(AadClient, self._client)
return self._client.get_cached_access_token(scopes)
def _request_token(self, *scopes, **kwargs):
# type: (*str, **Any) -> AccessToken
refresh_token = self._get_refresh_token()
self._client = cast(AadClient, self._client)
return self._client.obtain_token_by_refresh_token(scopes, refresh_token, **kwargs)
def _get_client(self, **kwargs):
# type: (**Any) -> AadClient
return AadClient(**kwargs)
```
#### File: azure-identity/tests/test_vscode_credential_async.py
```python
import time
from unittest import mock
from urllib.parse import urlparse
from azure.core.credentials import AccessToken
from azure.identity import AzureAuthorityHosts, CredentialUnavailableError
from azure.identity._constants import EnvironmentVariables
from azure.identity._internal.user_agent import USER_AGENT
from azure.identity.aio import VisualStudioCodeCredential
from azure.core.pipeline.policies import SansIOHTTPPolicy
import pytest
from helpers import build_aad_response, mock_response, Request
from helpers_async import async_validating_transport, wrap_in_future
from test_vscode_credential import GET_REFRESH_TOKEN, GET_USER_SETTINGS
def get_credential(user_settings=None, **kwargs):
# defaulting to empty user settings ensures tests work when real user settings are available
with mock.patch(GET_USER_SETTINGS, lambda: user_settings or {}):
return VisualStudioCodeCredential(**kwargs)
@pytest.mark.asyncio
async def test_tenant_id():
def get_transport(expected_tenant):
return async_validating_transport(
requests=[
Request(base_url="https://{}/{}".format(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD, expected_tenant))
],
responses=[mock_response(json_payload=build_aad_response(access_token="**"))],
)
# credential should default to "organizations" tenant
transport = get_transport("organizations")
credential = get_credential(transport=transport)
with mock.patch(GET_REFRESH_TOKEN, lambda _: "**"):
await credential.get_token("scope")
assert transport.send.call_count == 1
# ... unless VS Code has a tenant configured
user_settings = {"azure.tenant": "vs-code-setting"}
transport = get_transport(user_settings["azure.tenant"])
credential = get_credential(user_settings, transport=transport)
with mock.patch(GET_REFRESH_TOKEN, lambda _: "**"):
await credential.get_token("scope")
assert transport.send.call_count == 1
# ... and a tenant specified by the application prevails over VS Code configuration
transport = get_transport("from-application")
credential = get_credential(user_settings, tenant_id="from-application", transport=transport)
with mock.patch(GET_REFRESH_TOKEN, lambda _: "**"):
await credential.get_token("scope")
assert transport.send.call_count == 1
def test_tenant_id_validation():
"""The credential should raise ValueError when given an invalid tenant_id"""
valid_ids = {"c878a2ab-8ef4-413b-83a0-199afb84d7fb", "contoso.onmicrosoft.com", "organizations", "common"}
for tenant in valid_ids:
get_credential(tenant_id=tenant)
invalid_ids = {"my tenant", "my_tenant", "/", "\\", '"my-tenant"', "'my-tenant'"}
for tenant in invalid_ids:
with pytest.raises(ValueError):
get_credential(tenant_id=tenant)
@pytest.mark.asyncio
async def test_no_scopes():
"""The credential should raise ValueError when get_token is called with no scopes"""
credential = get_credential()
with pytest.raises(ValueError):
await credential.get_token()
@pytest.mark.asyncio
async def test_policies_configurable():
policy = mock.Mock(spec_set=SansIOHTTPPolicy, on_request=mock.Mock())
async def send(*_, **__):
return mock_response(json_payload=build_aad_response(access_token="**"))
credential = get_credential(policies=[policy], transport=mock.Mock(send=send))
with mock.patch(GET_REFRESH_TOKEN, lambda _: "**"):
await credential.get_token("scope")
assert policy.on_request.called
@pytest.mark.asyncio
async def test_user_agent():
transport = async_validating_transport(
requests=[Request(required_headers={"User-Agent": USER_AGENT})],
responses=[mock_response(json_payload=build_aad_response(access_token="**"))],
)
credential = get_credential(transport=transport)
with mock.patch(GET_REFRESH_TOKEN, lambda _: "**"):
await credential.get_token("scope")
@pytest.mark.asyncio
@pytest.mark.parametrize("authority", ("localhost", "https://localhost"))
async def test_request_url(authority):
"""the credential should accept an authority, with or without scheme, as an argument or environment variable"""
tenant_id = "expected-tenant"
access_token = "***"
parsed_authority = urlparse(authority)
expected_netloc = parsed_authority.netloc or authority # "localhost" parses to netloc "", path "localhost"
expected_refresh_token = "<PASSWORD>"
async def mock_send(request, **kwargs):
actual = urlparse(request.url)
assert actual.scheme == "https"
assert actual.netloc == expected_netloc
assert actual.path.startswith("/" + tenant_id)
assert request.body["refresh_token"] == expected_refresh_token
return mock_response(json_payload={"token_type": "Bearer", "expires_in": 42, "access_token": access_token})
credential = get_credential(
tenant_id=tenant_id, transport=mock.Mock(send=mock_send), authority=authority
)
with mock.patch(GET_REFRESH_TOKEN, return_value=expected_refresh_token):
token = await credential.get_token("scope")
assert token.token == access_token
# authority can be configured via environment variable
with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_AUTHORITY_HOST: authority}, clear=True):
credential = get_credential(tenant_id=tenant_id, transport=mock.Mock(send=mock_send))
with mock.patch(
GET_REFRESH_TOKEN, return_value=expected_refresh_token
):
await credential.get_token("scope")
assert token.token == access_token
@pytest.mark.asyncio
async def test_credential_unavailable_error():
credential = get_credential()
with mock.patch(GET_REFRESH_TOKEN, return_value=None):
with pytest.raises(CredentialUnavailableError):
await credential.get_token("scope")
@pytest.mark.asyncio
async def test_redeem_token():
expected_token = AccessToken("token", 42)
expected_value = "value"
mock_client = mock.Mock(spec=object)
token_by_refresh_token = mock.Mock(return_value=expected_token)
mock_client.obtain_token_by_refresh_token = wrap_in_future(token_by_refresh_token)
mock_client.get_cached_access_token = mock.Mock(return_value=None)
with mock.patch(GET_REFRESH_TOKEN, return_value=expected_value):
credential = get_credential(_client=mock_client)
token = await credential.get_token("scope")
assert token is expected_token
token_by_refresh_token.assert_called_with(("scope",), expected_value)
@pytest.mark.asyncio
async def test_cache_refresh_token():
expected_token = AccessToken("token", 42)
mock_client = mock.Mock(spec=object)
token_by_refresh_token = mock.Mock(return_value=expected_token)
mock_client.obtain_token_by_refresh_token = wrap_in_future(token_by_refresh_token)
mock_client.get_cached_access_token = mock.Mock(return_value=None)
mock_get_credentials = mock.Mock(return_value="VALUE")
credential = get_credential(_client=mock_client)
with mock.patch(GET_REFRESH_TOKEN, mock_get_credentials):
await credential.get_token("scope")
assert mock_get_credentials.call_count == 1
await credential.get_token("scope")
assert mock_get_credentials.call_count == 1
@pytest.mark.asyncio
async def test_no_obtain_token_if_cached():
expected_token = AccessToken("token", time.time() + 3600)
token_by_refresh_token = mock.Mock(return_value=expected_token)
mock_client = mock.Mock(
get_cached_access_token=mock.Mock(return_value=expected_token),
obtain_token_by_refresh_token=wrap_in_future(token_by_refresh_token)
)
credential = get_credential(_client=mock_client)
with mock.patch(
GET_REFRESH_TOKEN,
mock.Mock(side_effect=Exception("credential should not acquire a new token")),
):
token = await credential.get_token("scope")
assert token_by_refresh_token.call_count == 0
assert token.token == expected_token.token
assert token.expires_on == expected_token.expires_on
@pytest.mark.asyncio
async def test_adfs():
"""The credential should raise CredentialUnavailableError when configured for ADFS"""
credential = get_credential(tenant_id="adfs")
with pytest.raises(CredentialUnavailableError) as ex:
await credential.get_token("scope")
assert "adfs" in ex.value.message.lower()
@pytest.mark.asyncio
@pytest.mark.parametrize(
"cloud,authority",
(
("AzureCloud", AzureAuthorityHosts.AZURE_PUBLIC_CLOUD),
("AzureChinaCloud", AzureAuthorityHosts.AZURE_CHINA),
("AzureGermanCloud", AzureAuthorityHosts.AZURE_GERMANY),
("AzureUSGovernment", AzureAuthorityHosts.AZURE_GOVERNMENT),
),
)
async def test_reads_cloud_settings(cloud, authority):
"""the credential should read authority and tenant from VS Code settings when an application doesn't specify them"""
expected_tenant = "tenant-id"
user_settings = {"azure.cloud": cloud, "azure.tenant": expected_tenant}
transport = async_validating_transport(
requests=[Request(base_url="https://{}/{}".format(authority, expected_tenant))],
responses=[mock_response(json_payload=build_aad_response(access_token="**"))],
)
credential = get_credential(user_settings, transport=transport)
with mock.patch(GET_REFRESH_TOKEN, lambda _: "**"):
await credential.get_token("scope")
assert transport.send.call_count == 1
@pytest.mark.asyncio
async def test_no_user_settings():
"""the credential should default to Public Cloud and "organizations" tenant when it can't read VS Code settings"""
transport = async_validating_transport(
requests=[Request(base_url="https://{}/{}".format(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD, "organizations"))],
responses=[mock_response(json_payload=build_aad_response(access_token="**"))],
)
credential = get_credential(transport=transport)
with mock.patch(GET_REFRESH_TOKEN, lambda _: "**"):
await credential.get_token("scope")
assert transport.send.call_count == 1
```
#### File: keyvault/keys/_client.py
```python
from functools import partial
from azure.core.tracing.decorator import distributed_trace
from ._shared import KeyVaultClientBase
from ._shared.exceptions import error_map as _error_map
from ._shared._polling import DeleteRecoverPollingMethod, KeyVaultOperationPoller
from ._models import KeyVaultKey, KeyProperties, DeletedKey
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
# pylint:disable=unused-import
from typing import Any, Optional, Union
from azure.core.paging import ItemPaged
from ._models import JsonWebKey
class KeyClient(KeyVaultClientBase):
"""A high-level interface for managing a vault's keys.
:param str vault_url: URL of the vault the client will access. This is also called the vault's "DNS Name".
:param credential: An object which can provide an access token for the vault, such as a credential from
:mod:`azure.identity`
:keyword api_version: version of the Key Vault API to use. Defaults to the most recent.
:paramtype api_version: ~azure.keyvault.keys.ApiVersion
:keyword transport: transport to use. Defaults to :class:`~azure.core.pipeline.transport.RequestsTransport`.
:paramtype transport: ~azure.core.pipeline.transport.HttpTransport
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_key_client]
:end-before: [END create_key_client]
:language: python
:caption: Create a new ``KeyClient``
:dedent: 4
"""
# pylint:disable=protected-access
@distributed_trace
def create_key(self, name, key_type, **kwargs):
# type: (str, Union[str, azure.keyvault.keys.KeyType], **Any) -> KeyVaultKey
"""Create a key or, if ``name`` is already in use, create a new version of the key.
Requires keys/create permission.
:param str name: The name of the new key.
:param key_type: The type of key to create
:type key_type: ~azure.keyvault.keys.KeyType or str
:keyword int size: Key size in bits. Applies only to RSA and symmetric keys. Consider using
:func:`create_rsa_key` or :func:`create_oct_key` instead.
:keyword curve: Elliptic curve name. Applies only to elliptic curve keys. Defaults to the NIST P-256
elliptic curve. To create an elliptic curve key, consider using :func:`create_ec_key` instead.
:paramtype curve: ~azure.keyvault.keys.KeyCurveName or str
:keyword int public_exponent: The RSA public exponent to use. Applies only to RSA keys created in a Managed HSM.
:keyword key_operations: Allowed key operations
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The created key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_key]
:end-before: [END create_key]
:language: python
:caption: Create a key
:dedent: 8
"""
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyCreateParameters(
kty=key_type,
key_size=kwargs.pop("size", None),
key_attributes=attributes,
key_ops=kwargs.pop("key_operations", None),
tags=kwargs.pop("tags", None),
curve=kwargs.pop("curve", None),
public_exponent=kwargs.pop("public_exponent", None)
)
bundle = self._client.create_key(
vault_base_url=self.vault_url,
key_name=name,
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def create_rsa_key(self, name, **kwargs):
# type: (str, **Any) -> KeyVaultKey
"""Create a new RSA key or, if ``name`` is already in use, create a new version of the key
Requires the keys/create permission.
:param str name: The name for the new key.
:keyword int size: Key size in bits, for example 2048, 3072, or 4096.
:keyword int public_exponent: The RSA public exponent to use. Applies only to RSA keys created in a Managed HSM.
:keyword bool hardware_protected: Whether the key should be created in a hardware security module.
Defaults to ``False``.
:keyword key_operations: Allowed key operations
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The created key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_rsa_key]
:end-before: [END create_rsa_key]
:language: python
:caption: Create RSA key
:dedent: 8
"""
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="RSA-HSM" if hsm else "RSA", **kwargs)
@distributed_trace
def create_ec_key(self, name, **kwargs):
# type: (str, **Any) -> KeyVaultKey
"""Create a new elliptic curve key or, if ``name`` is already in use, create a new version of the key.
Requires the keys/create permission.
:param str name: The name for the new key.
:keyword curve: Elliptic curve name. Defaults to the NIST P-256 elliptic curve.
:paramtype curve: ~azure.keyvault.keys.KeyCurveName or str
:keyword key_operations: Allowed key operations
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool hardware_protected: Whether the key should be created in a hardware security module.
Defaults to ``False``.
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The created key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_ec_key]
:end-before: [END create_ec_key]
:language: python
:caption: Create an elliptic curve key
:dedent: 8
"""
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="EC-HSM" if hsm else "EC", **kwargs)
@distributed_trace
def create_oct_key(self, name, **kwargs):
# type: (str, **Any) -> KeyVaultKey
"""Create a new octet sequence (symmetric) key or, if ``name`` is in use, create a new version of the key.
Requires the keys/create permission.
:param str name: The name for the new key.
:keyword int size: Key size in bits, for example 128, 192, or 256.
:keyword key_operations: Allowed key operations.
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool hardware_protected: Whether the key should be created in a hardware security module.
Defaults to ``False``.
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The created key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_oct_key]
:end-before: [END create_oct_key]
:language: python
:caption: Create an octet sequence (symmetric) key
:dedent: 8
"""
hsm = kwargs.pop("hardware_protected", False)
return self.create_key(name, key_type="oct-HSM" if hsm else "oct", **kwargs)
@distributed_trace
def begin_delete_key(self, name, **kwargs):
# type: (str, **Any) -> DeletedKey
"""Delete all versions of a key and its cryptographic material.
Requires keys/delete permission. When this method returns Key Vault has begun deleting the key. Deletion may
take several seconds in a vault with soft-delete enabled. This method therefore returns a poller enabling you to
wait for deletion to complete.
:param str name: The name of the key to delete.
:returns: A poller for the delete key operation. The poller's `result` method returns the
:class:`~azure.keyvault.keys.DeletedKey` without waiting for deletion to complete. If the vault has
soft-delete enabled and you want to permanently delete the key with :func:`purge_deleted_key`, call the
poller's `wait` method first. It will block until the deletion is complete. The `wait` method requires
keys/get permission.
:rtype: ~azure.core.polling.LROPoller[~azure.keyvault.keys.DeletedKey]
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START delete_key]
:end-before: [END delete_key]
:language: python
:caption: Delete a key
:dedent: 8
"""
polling_interval = kwargs.pop("_polling_interval", None)
if polling_interval is None:
polling_interval = 2
deleted_key = DeletedKey._from_deleted_key_bundle(
self._client.delete_key(self.vault_url, name, error_map=_error_map, **kwargs)
)
command = partial(self.get_deleted_key, name=name, **kwargs)
polling_method = DeleteRecoverPollingMethod(
# no recovery ID means soft-delete is disabled, in which case we initialize the poller as finished
finished=deleted_key.recovery_id is None,
command=command,
final_resource=deleted_key,
interval=polling_interval,
)
return KeyVaultOperationPoller(polling_method)
@distributed_trace
def get_key(self, name, version=None, **kwargs):
# type: (str, Optional[str], **Any) -> KeyVaultKey
"""Get a key's attributes and, if it's an asymmetric key, its public material.
Requires keys/get permission.
:param str name: The name of the key to get.
:param str version: (optional) A specific version of the key to get. If not specified, gets the latest version
of the key.
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START get_key]
:end-before: [END get_key]
:language: python
:caption: Get a key
:dedent: 8
"""
bundle = self._client.get_key(self.vault_url, name, key_version=version or "", error_map=_error_map, **kwargs)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def get_deleted_key(self, name, **kwargs):
# type: (str, **Any) -> DeletedKey
"""Get a deleted key. Possible only in a vault with soft-delete enabled.
Requires keys/get permission.
:param str name: The name of the key
:returns: The deleted key
:rtype: ~azure.keyvault.keys.DeletedKey
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START get_deleted_key]
:end-before: [END get_deleted_key]
:language: python
:caption: Get a deleted key
:dedent: 8
"""
bundle = self._client.get_deleted_key(self.vault_url, name, error_map=_error_map, **kwargs)
return DeletedKey._from_deleted_key_bundle(bundle)
@distributed_trace
def list_deleted_keys(self, **kwargs):
# type: (**Any) -> ItemPaged[DeletedKey]
"""List all deleted keys, including the public part of each. Possible only in a vault with soft-delete enabled.
Requires keys/list permission.
:returns: An iterator of deleted keys
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.keys.DeletedKey]
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START list_deleted_keys]
:end-before: [END list_deleted_keys]
:language: python
:caption: List all the deleted keys
:dedent: 8
"""
return self._client.get_deleted_keys(
self._vault_url,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [DeletedKey._from_deleted_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def list_properties_of_keys(self, **kwargs):
# type: (**Any) -> ItemPaged[KeyProperties]
"""List identifiers and properties of all keys in the vault.
Requires keys/list permission.
:returns: An iterator of keys without their cryptographic material or version information
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.keys.KeyProperties]
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START list_keys]
:end-before: [END list_keys]
:language: python
:caption: List all keys
:dedent: 8
"""
return self._client.get_keys(
self._vault_url,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [KeyProperties._from_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def list_properties_of_key_versions(self, name, **kwargs):
# type: (str, **Any) -> ItemPaged[KeyProperties]
"""List the identifiers and properties of a key's versions.
Requires keys/list permission.
:param str name: The name of the key
:returns: An iterator of keys without their cryptographic material
:rtype: ~azure.core.paging.ItemPaged[~azure.keyvault.keys.KeyProperties]
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START list_properties_of_key_versions]
:end-before: [END list_properties_of_key_versions]
:language: python
:caption: List all versions of a key
:dedent: 8
"""
return self._client.get_key_versions(
self._vault_url,
name,
maxresults=kwargs.pop("max_page_size", None),
cls=lambda objs: [KeyProperties._from_key_item(x) for x in objs],
error_map=_error_map,
**kwargs
)
@distributed_trace
def purge_deleted_key(self, name, **kwargs):
# type: (str, **Any) -> None
"""Permanently deletes a deleted key. Only possible in a vault with soft-delete enabled.
Performs an irreversible deletion of the specified key, without
possibility for recovery. The operation is not available if the
:py:attr:`~azure.keyvault.keys.KeyProperties.recovery_level` does not specify 'Purgeable'.
This method is only necessary for purging a key before its
:py:attr:`~azure.keyvault.keys.DeletedKey.scheduled_purge_date`.
Requires keys/purge permission.
:param str name: The name of the deleted key to purge
:returns: None
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. code-block:: python
# if the vault has soft-delete enabled, purge permanently deletes a deleted key
# (with soft-delete disabled, begin_delete_key is permanent)
key_client.purge_deleted_key("key-name")
"""
self._client.purge_deleted_key(vault_base_url=self.vault_url, key_name=name, error_map=_error_map, **kwargs)
@distributed_trace
def begin_recover_deleted_key(self, name, **kwargs):
# type: (str, **Any) -> KeyVaultKey
"""Recover a deleted key to its latest version. Possible only in a vault with soft-delete enabled.
Requires keys/recover permission.
When this method returns Key Vault has begun recovering the key. Recovery may take several seconds. This
method therefore returns a poller enabling you to wait for recovery to complete. Waiting is only necessary when
you want to use the recovered key in another operation immediately.
:param str name: The name of the deleted key to recover
:returns: A poller for the recovery operation. The poller's `result` method returns the recovered
:class:`~azure.keyvault.keys.KeyVaultKey` without waiting for recovery to complete. If you want to use the
recovered key immediately, call the poller's `wait` method, which blocks until the key is ready to use. The
`wait` method requires keys/get permission.
:rtype: ~azure.core.polling.LROPoller[~azure.keyvault.keys.KeyVaultKey]
:raises: :class:`~azure.core.exceptions.HttpResponseError`
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START recover_deleted_key]
:end-before: [END recover_deleted_key]
:language: python
:caption: Recover a deleted key
:dedent: 8
"""
polling_interval = kwargs.pop("_polling_interval", None)
if polling_interval is None:
polling_interval = 2
recovered_key = KeyVaultKey._from_key_bundle(
self._client.recover_deleted_key(
vault_base_url=self.vault_url, key_name=name, error_map=_error_map, **kwargs
)
)
command = partial(self.get_key, name=name, **kwargs)
polling_method = DeleteRecoverPollingMethod(
finished=False, command=command, final_resource=recovered_key, interval=polling_interval,
)
return KeyVaultOperationPoller(polling_method)
@distributed_trace
def update_key_properties(self, name, version=None, **kwargs):
# type: (str, Optional[str], **Any) -> KeyVaultKey
"""Change a key's properties (not its cryptographic material).
Requires keys/update permission.
:param str name: The name of key to update
:param str version: (optional) The version of the key to update. If unspecified, the latest version is updated.
:keyword key_operations: Allowed key operations
:paramtype key_operations: list[~azure.keyvault.keys.KeyOperation or str]
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The updated key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START update_key]
:end-before: [END update_key]
:language: python
:caption: Update a key's attributes
:dedent: 8
"""
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyUpdateParameters(
key_ops=kwargs.pop("key_operations", None),
key_attributes=attributes,
tags=kwargs.pop("tags", None)
)
bundle = self._client.update_key(
self.vault_url,
name,
key_version=version or "",
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def backup_key(self, name, **kwargs):
# type: (str, **Any) -> bytes
"""Back up a key in a protected form useable only by Azure Key Vault.
Requires keys/backup permission.
This is intended to allow copying a key from one vault to another. Both vaults must be owned by the same Azure
subscription. Also, backup / restore cannot be performed across geopolitical boundaries. For example, a backup
from a vault in a USA region cannot be restored to a vault in an EU region.
:param str name: The name of the key to back up
:rtype: bytes
:raises:
:class:`~azure.core.exceptions.ResourceNotFoundError` if the key doesn't exist,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START backup_key]
:end-before: [END backup_key]
:language: python
:caption: Get a key backup
:dedent: 8
"""
backup_result = self._client.backup_key(self.vault_url, name, error_map=_error_map, **kwargs)
return backup_result.value
@distributed_trace
def restore_key_backup(self, backup, **kwargs):
# type: (bytes, **Any) -> KeyVaultKey
"""Restore a key backup to the vault.
Requires keys/restore permission.
This imports all versions of the key, with its name, attributes, and access control policies. If the key's name
is already in use, restoring it will fail. Also, the target vault must be owned by the same Microsoft Azure
subscription as the source vault.
:param bytes backup: A key backup as returned by :func:`backup_key`
:returns: The restored key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises:
:class:`~azure.core.exceptions.ResourceExistsError` if the backed up key's name is already in use,
:class:`~azure.core.exceptions.HttpResponseError` for other errors
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START restore_key_backup]
:end-before: [END restore_key_backup]
:language: python
:caption: Restore a key backup
:dedent: 8
"""
bundle = self._client.restore_key(
self.vault_url,
parameters=self._models.KeyRestoreParameters(key_bundle_backup=backup),
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
@distributed_trace
def import_key(self, name, key, **kwargs):
# type: (str, JsonWebKey, **Any) -> KeyVaultKey
"""Import a key created externally.
Requires keys/import permission. If ``name`` is already in use, the key will be imported as a new version.
:param str name: Name for the imported key
:param key: The JSON web key to import
:type key: ~azure.keyvault.keys.JsonWebKey
:keyword bool hardware_protected: Whether the key should be backed by a hardware security module
:keyword bool enabled: Whether the key is enabled for use.
:keyword tags: Application specific metadata in the form of key-value pairs.
:paramtype tags: dict[str, str]
:keyword ~datetime.datetime not_before: Not before date of the key in UTC
:keyword ~datetime.datetime expires_on: Expiry date of the key in UTC
:returns: The imported key
:rtype: ~azure.keyvault.keys.KeyVaultKey
:raises: :class:`~azure.core.exceptions.HttpResponseError`
"""
enabled = kwargs.pop("enabled", None)
not_before = kwargs.pop("not_before", None)
expires_on = kwargs.pop("expires_on", None)
if enabled is not None or not_before is not None or expires_on is not None:
attributes = self._models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires_on)
else:
attributes = None
parameters = self._models.KeyImportParameters(
key=key._to_generated_model(),
key_attributes=attributes,
hsm=kwargs.pop("hardware_protected", None),
tags=kwargs.pop("tags", None)
)
bundle = self._client.import_key(
self.vault_url,
name,
parameters=parameters,
error_map=_error_map,
**kwargs
)
return KeyVaultKey._from_key_bundle(bundle)
```
#### File: samples/async_samples/sample_metric_definitions_async.py
```python
import os
import asyncio
from azure.monitor.query.aio import MetricsQueryClient
from azure.identity.aio import ClientSecretCredential
async def list_namespaces():
credential = ClientSecretCredential(
client_id = os.environ['AZURE_CLIENT_ID'],
client_secret = os.environ['AZURE_CLIENT_SECRET'],
tenant_id = os.environ['AZURE_TENANT_ID']
)
client = MetricsQueryClient(credential)
metrics_uri = os.environ['METRICS_RESOURCE_URI']
response = client.list_metric_definitions(metrics_uri)
async for item in response:
print(item)
for availability in item.metric_availabilities:
print(availability.time_grain)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(list_namespaces())
```
#### File: tests/stress_tests/stress_test_base.py
```python
import time
import threading
from datetime import datetime, timedelta
import concurrent
import sys
import uuid
import logging
try:
import psutil
except ImportError:
pass # If psutil isn't installed, simply does not capture process stats.
from azure.servicebus import ServiceBusClient, ServiceBusMessage, ServiceBusMessageBatch
from azure.servicebus.exceptions import MessageAlreadySettled
from utilities import _build_logger
_logger = _build_logger("stress-test", logging.WARN)
class ReceiveType:
push="push"
pull="pull"
none=None
class StressTestResults(object):
def __init__(self):
self.total_sent=0
self.total_received=0
self.time_elapsed=None
self.state_by_sender={}
self.state_by_receiver={}
def __repr__(self):
return str(vars(self))
class StressTestRunnerState(object):
'''Per-runner state, e.g. if you spawn 3 senders each will have this as their state object,
which will be coalesced at completion into StressTestResults'''
def __init__(self):
self.total_sent=0
self.total_received=0
self.cpu_percent = None
self.memory_bytes = None
self.timestamp = None
self.exceptions = []
def __repr__(self):
return str(vars(self))
def populate_process_stats(self):
self.timestamp = datetime.utcnow()
try:
self.cpu_percent = psutil.cpu_percent()
self.memory_bytes = psutil.virtual_memory().total
except NameError:
return # psutil was not installed, fall back to simply not capturing these stats.
class StressTestRunner:
'''Framework for running a service bus stress test.
Duration can be overriden via the --stress_test_duration flag from the command line'''
def __init__(self,
senders,
receivers,
duration = timedelta(minutes=15),
receive_type = ReceiveType.push,
send_batch_size = None,
message_size = 10,
max_wait_time = 10,
send_delay = .01,
receive_delay = 0,
should_complete_messages = True,
max_message_count = 1,
send_session_id = None,
fail_on_exception = True):
self.senders = senders
self.receivers = receivers
self.duration=duration
self.receive_type = receive_type
self.message_size = message_size
self.send_batch_size = send_batch_size
self.max_wait_time = max_wait_time
self.send_delay = send_delay
self.receive_delay = receive_delay
self.should_complete_messages = should_complete_messages
self.max_message_count = max_message_count
self.fail_on_exception = fail_on_exception
self.send_session_id = send_session_id
# Because of pickle we need to create a state object and not just pass around ourselves.
# If we ever require multiple runs of this one after another, just make Run() reset this.
self._state = StressTestRunnerState()
self._duration_override = None
for arg in sys.argv:
if arg.startswith('--stress_test_duration_seconds='):
self._duration_override = timedelta(seconds=int(arg.split('=')[1]))
self._should_stop = False
# Plugin functions the caller can override to further tailor the test.
def on_send(self, state, sent_message, sender):
'''Called on every successful send, per message'''
pass
def on_receive(self, state, received_message, receiver):
'''Called on every successful receive, per message'''
pass
def on_receive_batch(self, state, batch, receiver):
'''Called on every successful receive, at the batch or iterator level rather than per-message'''
pass
def post_receive(self, state, receiver):
'''Called after completion of every successful receive'''
pass
def on_complete(self, send_results=[], receive_results=[]):
'''Called on stress test run completion'''
pass
def pre_process_message(self, message):
'''Allows user to transform the message before batching or sending it.'''
pass
def pre_process_message_batch(self, message):
'''Allows user to transform the batch before sending it.'''
pass
def pre_process_message_body(self, payload):
'''Allows user to transform message payload before sending it.'''
return payload
def _schedule_interval_logger(self, end_time, description="", interval_seconds=30):
def _do_interval_logging():
if end_time > datetime.utcnow() and not self._should_stop:
self._state.populate_process_stats()
_logger.critical("{} RECURRENT STATUS: {}".format(description, self._state))
self._schedule_interval_logger(end_time, description, interval_seconds)
t = threading.Timer(interval_seconds, _do_interval_logging)
t.start()
def _construct_message(self):
if self.send_batch_size != None:
batch = ServiceBusMessageBatch()
for _ in range(self.send_batch_size):
message = ServiceBusMessage(self.pre_process_message_body("a" * self.message_size))
self.pre_process_message(message)
batch.add_message(message)
self.PreProcessMessageBatch(batch)
return batch
else:
message = ServiceBusMessage(self.pre_process_message_body("a" * self.message_size))
self.pre_process_message(message)
return message
def _send(self, sender, end_time):
self._schedule_interval_logger(end_time, "Sender " + str(self))
try:
_logger.info("STARTING SENDER")
with sender:
while end_time > datetime.utcnow() and not self._should_stop:
_logger.info("SENDING")
try:
message = self._construct_message()
if self.send_session_id != None:
message.session_id = self.send_session_id
sender.send_messages(message)
self.on_send(self._state, message, sender)
except Exception as e:
_logger.exception("Exception during send: {}".format(e))
self._state.exceptions.append(e)
if self.fail_on_exception:
raise
self._state.total_sent += 1
time.sleep(self.send_delay)
self._state.timestamp = datetime.utcnow()
return self._state
except Exception as e:
_logger.exception("Exception in sender: {}".format(e))
self._should_stop = True
raise
def _receive(self, receiver, end_time):
self._schedule_interval_logger(end_time, "Receiver " + str(self))
try:
with receiver:
while end_time > datetime.utcnow() and not self._should_stop:
_logger.info("RECEIVE LOOP")
try:
if self.receive_type == ReceiveType.pull:
batch = receiver.receive_messages(max_message_count=self.max_message_count, max_wait_time=self.max_wait_time)
elif self.receive_type == ReceiveType.push:
batch = receiver._get_streaming_message_iter(max_wait_time=self.max_wait_time)
else:
batch = []
for message in batch:
self.on_receive(self._state, message, receiver)
try:
if self.should_complete_messages:
receiver.complete_message(message)
except MessageAlreadySettled: # It may have been settled in the plugin callback.
pass
self._state.total_received += 1
#TODO: Get EnqueuedTimeUtc out of broker properties and calculate latency. Should properties/app properties be mostly None?
if end_time <= datetime.utcnow():
break
time.sleep(self.receive_delay)
self.post_receive(self._state, receiver)
except Exception as e:
_logger.exception("Exception during receive: {}".format(e))
self._state.exceptions.append(e)
if self.fail_on_exception:
raise
self._state.timestamp = datetime.utcnow()
return self._state
except Exception as e:
_logger.exception("Exception in receiver {}".format(e))
self._should_stop = True
raise
def run(self):
start_time = datetime.utcnow()
end_time = start_time + (self._duration_override or self.duration)
sent_messages = 0
received_messages = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as proc_pool:
_logger.info("STARTING PROC POOL")
senders = [proc_pool.submit(self._send, sender, end_time) for sender in self.senders]
receivers = [proc_pool.submit(self._receive, receiver, end_time) for receiver in self.receivers]
result = StressTestResults()
for each in concurrent.futures.as_completed(senders + receivers):
_logger.info("SOMETHING FINISHED")
if each in senders:
result.state_by_sender[each] = each.result()
if each in receivers:
result.state_by_receiver[each] = each.result()
# TODO: do as_completed in one batch to provide a way to short-circuit on failure.
result.state_by_sender = {s:f.result() for s,f in zip(self.senders, concurrent.futures.as_completed(senders))}
result.state_by_receiver = {r:f.result() for r,f in zip(self.receivers, concurrent.futures.as_completed(receivers))}
_logger.info("got receiever results")
result.total_sent = sum([r.total_sent for r in result.state_by_sender.values()])
result.total_received = sum([r.total_received for r in result.state_by_receiver.values()])
result.time_elapsed = end_time - start_time
_logger.critical("Stress test completed. Results:\n{}".format(result))
return result
```
#### File: azure-ai-textanalytics/tests/test_json_pointer.py
```python
import pytest
from azure.ai.textanalytics._models import (
AnalyzeSentimentResult,
TargetSentiment,
AssessmentSentiment,
SentenceSentiment,
_get_indices,
)
from azure.ai.textanalytics._response_handlers import sentiment_result
from azure.ai.textanalytics._generated.v3_1_preview_5 import models as _generated_models
@pytest.fixture
def generated_target_assessment_confidence_scores():
return _generated_models.TargetConfidenceScoreLabel(
positive=1.0,
neutral=0.0,
negative=0.0,
)
@pytest.fixture
def generated_sentiment_confidence_score():
return _generated_models.SentimentConfidenceScorePerLabel(
positive=1.0,
neutral=0.0,
negative=0.0,
)
@pytest.fixture
def generated_target_relation():
return _generated_models.TargetRelation(
relation_type="assessment",
ref="#/documents/0/sentences/1/assessments/0"
)
@pytest.fixture
def generated_target(generated_target_assessment_confidence_scores, generated_target_relation):
return _generated_models.SentenceTarget(
text="aspect",
sentiment="positive",
confidence_scores=generated_target_assessment_confidence_scores,
offset=0,
length=6,
relations=[generated_target_relation],
)
@pytest.fixture
def generated_assessment(generated_target_assessment_confidence_scores):
return _generated_models.SentenceAssessment(
text="good",
sentiment="positive",
confidence_scores=generated_target_assessment_confidence_scores,
offset=0,
length=4,
is_negated=False,
)
def generated_sentence_sentiment(generated_sentiment_confidence_score, index, targets=[], assessments=[]):
return _generated_models.SentenceSentiment(
text="not relevant",
sentiment="positive",
confidence_scores=generated_sentiment_confidence_score,
offset=0,
length=12,
targets=targets,
assessments=assessments,
)
@pytest.fixture
def generated_document_sentiment(generated_target, generated_assessment, generated_sentiment_confidence_score):
target_sentence = generated_sentence_sentiment(generated_sentiment_confidence_score, index=0, targets=[generated_target])
assessment_sentence = generated_sentence_sentiment(generated_sentiment_confidence_score, index=1, assessments=[generated_assessment])
return _generated_models.DocumentSentiment(
id=1,
sentiment="positive",
confidence_scores=generated_sentiment_confidence_score,
sentences=[target_sentence, assessment_sentence],
warnings=[],
)
@pytest.fixture
def generated_sentiment_response(generated_document_sentiment):
return _generated_models.SentimentResponse(
documents=[generated_document_sentiment],
errors=[],
model_version="0000-00-00",
)
class TestJsonPointer():
def test_json_pointer_parsing(self):
assert [1, 0, 15] == _get_indices("#/documents/1/sentences/0/assessments/15")
def test_opinion_different_sentence_target(self, generated_sentiment_response):
# the first sentence has the target, and the second sentence has the assessment
# the desired behavior is the first wrapped sentence object has a target, and it's assessment
# is in the second sentence.
# the second sentence will have no mined opinions, since we define that as a target and assessment duo
wrapped_sentiment = sentiment_result("not relevant", generated_sentiment_response, {})[0]
assert wrapped_sentiment.sentences[0].mined_opinions[0].assessments[0].text == "good"
assert not wrapped_sentiment.sentences[1].mined_opinions
```
#### File: azure-ai-translation-document/samples/sample_check_document_statuses.py
```python
def sample_document_status_checks():
# [START list_all_document_statuses]
import os
import time
from azure.core.credentials import AzureKeyCredential
from azure.ai.translation.document import DocumentTranslationClient
endpoint = os.environ["AZURE_DOCUMENT_TRANSLATION_ENDPOINT"]
key = os.environ["AZURE_DOCUMENT_TRANSLATION_KEY"]
source_container_url = os.environ["AZURE_SOURCE_CONTAINER_URL"]
target_container_url = os.environ["AZURE_TARGET_CONTAINER_URL"]
client = DocumentTranslationClient(endpoint, AzureKeyCredential(key))
poller = client.begin_translation(source_container_url, target_container_url, "es")
completed_docs = []
while not poller.done():
time.sleep(30)
doc_statuses = client.list_all_document_statuses(poller.id)
for document in doc_statuses:
if document.id not in completed_docs:
if document.status == "Succeeded":
print("Document at {} was translated to {} language. You can find translated document at {}".format(
document.source_document_url, document.translated_to, document.translated_document_url
))
completed_docs.append(document.id)
if document.status == "Failed":
print("Document at {} failed translation. Error Code: {}, Message: {}".format(
document.source_document_url, document.error.code, document.error.message
))
completed_docs.append(document.id)
if document.status == "Running":
print("Document ID: {}, translation progress is {} percent".format(
document.id, document.translation_progress * 100
))
print("\nTranslation completed.")
# [END list_all_document_statuses]
if __name__ == '__main__':
sample_document_status_checks()
```
#### File: azure-ai-translation-document/tests/test_list_translations_async.py
```python
import pytest
import pytz
from datetime import datetime
import functools
from asynctestcase import AsyncDocumentTranslationTest
from preparer import DocumentTranslationPreparer, DocumentTranslationClientPreparer as _DocumentTranslationClientPreparer
from azure.ai.translation.document.aio import DocumentTranslationClient
DocumentTranslationClientPreparer = functools.partial(_DocumentTranslationClientPreparer, DocumentTranslationClient)
TOTAL_DOC_COUNT_IN_translation = 1
class TestSubmittedTranslations(AsyncDocumentTranslationTest):
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
async def test_list_translations(self, client):
# create some translations
operations_count = 5
docs_per_operation = 5
await self._begin_multiple_translations_async(client, operations_count, docs_per_operation=docs_per_operation, wait=False)
# list translations
submitted_translations = client.list_all_translation_statuses()
self.assertIsNotNone(submitted_translations)
# check statuses
async for translation in submitted_translations:
self._validate_translations(translation)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
async def test_list_translations_with_pagination(self, client):
# prepare data
operations_count = 5
docs_per_operation = 2
results_per_page = 2
# create some translations
await self._begin_multiple_translations_async(client, operations_count, docs_per_operation=docs_per_operation, wait=False)
# list translations
submitted_translations_pages = client.list_all_translation_statuses(results_per_page=results_per_page).by_page()
self.assertIsNotNone(submitted_translations_pages)
# iterate by page
async for page in submitted_translations_pages:
page_translations = []
async for translation in page:
page_translations.append(translation)
self._validate_translations(translation)
self.assertLessEqual(len(page_translations), results_per_page)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
async def test_list_translations_with_skip(self, client):
# prepare data
operations_count = 10
docs_per_operation = 2
skip = 5
# create some translations
await self._begin_multiple_translations_async(client, operations_count, wait=False, docs_per_operation=docs_per_operation)
# list translations - unable to assert skip!!
all_translations = client.list_all_translation_statuses()
all_operations_count = 0
async for translation in all_translations:
all_operations_count += 1
translations_with_skip = client.list_all_translation_statuses(skip=skip)
translations_with_skip_count = 0
async for translation in translations_with_skip:
translations_with_skip_count += 1
assert all_operations_count - translations_with_skip_count == skip
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
async def test_list_translations_filter_by_status(self, client):
operations_count = 5
docs_per_operation = 1
# create some translations with the status 'Succeeded'
completed_translation_ids = await self._begin_multiple_translations_async(client, operations_count, wait=True, docs_per_operation=docs_per_operation)
# create some translations with the status 'Cancelled'
translation_ids = await self._begin_multiple_translations_async(client, operations_count, wait=False, docs_per_operation=docs_per_operation)
for id in translation_ids:
await client.cancel_translation(id)
self.wait(10) # wait for 'cancelled' to propagate
# list translations with status filter
statuses = ["Cancelled"]
submitted_translations = client.list_all_translation_statuses(statuses=statuses)
# check statuses
async for translation in submitted_translations:
self.assertIn(translation.status, statuses)
self.assertNotIn(translation.id, completed_translation_ids)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
async def test_list_translations_filter_by_ids(self, client):
operations_count = 3
docs_per_operation = 2
# create some translations
translation_ids = await self._begin_multiple_translations_async(client, operations_count, wait=False, docs_per_operation=docs_per_operation)
# list translations
submitted_translations = client.list_all_translation_statuses(translation_ids=translation_ids)
self.assertIsNotNone(submitted_translations)
# check statuses
async for translation in submitted_translations:
self.assertIn(translation.id, translation_ids)
@pytest.mark.live_test_only
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
async def test_list_translations_filter_by_created_after(self, client):
# create some translations
operations_count = 3
docs_per_operation = 2
# create some translations
start = datetime.utcnow()
translation_ids = await self._begin_multiple_translations_async(client, operations_count, wait=False, docs_per_operation=docs_per_operation)
# list translations
submitted_translations = client.list_all_translation_statuses(created_after=start)
self.assertIsNotNone(submitted_translations)
# check statuses
async for translation in submitted_translations:
self.assertIn(translation.id, translation_ids)
assert(translation.created_on.replace(tzinfo=None) >= start.replace(tzinfo=None))
@pytest.mark.live_test_only
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
async def test_list_translations_filter_by_created_before(self, client):
'''
NOTE: test is dependent on 'end' to be specific/same as time zone of the service!
'end' must be timezone-aware!
'''
operations_count = 5
docs_per_operation = 1
# create some translations
await self._begin_multiple_translations_async(client, operations_count, wait=True, docs_per_operation=docs_per_operation)
end = datetime.utcnow().replace(tzinfo=pytz.utc)
translation_ids = await self._begin_multiple_translations_async(client, operations_count, wait=True, docs_per_operation=docs_per_operation)
# list translations
submitted_translations = client.list_all_translation_statuses(created_before=end)
self.assertIsNotNone(submitted_translations)
# check statuses
async for translation in submitted_translations:
self.assertLessEqual(translation.created_on.replace(tzinfo=None), end.replace(tzinfo=None))
self.assertNotIn(translation.id, translation_ids)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
async def test_list_translations_order_by_creation_time_asc(self, client):
operations_count = 3
docs_per_operation = 2
# create some translations
await self._begin_multiple_translations_async(client, operations_count, wait=False, docs_per_operation=docs_per_operation)
# list translations
submitted_translations = client.list_all_translation_statuses(order_by=["createdDateTimeUtc asc"])
self.assertIsNotNone(submitted_translations)
# check statuses
curr = datetime.min
async for translation in submitted_translations:
assert(translation.created_on.replace(tzinfo=None) >= curr.replace(tzinfo=None))
curr = translation.created_on
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
async def test_list_translations_order_by_creation_time_desc(self, client):
operations_count = 3
docs_per_operation = 2
# create some translations
await self._begin_multiple_translations_async(client, operations_count, wait=False, docs_per_operation=docs_per_operation)
# list translations
submitted_translations = client.list_all_translation_statuses(order_by=["createdDateTimeUtc desc"])
self.assertIsNotNone(submitted_translations)
# check statuses
curr = datetime.max
async for translation in submitted_translations:
assert(translation.created_on.replace(tzinfo=None) <= curr.replace(tzinfo=None))
curr = translation.created_on
@pytest.mark.skip(reason="not working! - list returned is empty")
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
async def test_list_translations_mixed_filters(self, client):
# create some translations
operations_count = 15
docs_per_operation = 1
results_per_page = 2
statuses = ["Cancelled"]
skip = 2
# create some translations
start = datetime.utcnow().replace(tzinfo=pytz.utc)
successful_translation_ids = await self._begin_multiple_translations_async(client, operations_count, wait=True, docs_per_operation=docs_per_operation)
cancelled_translation_ids = await self._begin_multiple_translations_async(client, operations_count, wait=False, docs_per_operation=docs_per_operation)
for translation_id in cancelled_translation_ids:
await client.cancel_translation(translation_id)
self.wait(15) # wait for status to propagate
end = datetime.utcnow().replace(tzinfo=pytz.utc)
# list translations
submitted_translations = client.list_all_translation_statuses(
# filters
statuses=statuses,
created_after=start,
created_before=end,
# ordering
order_by=["createdDateTimeUtc asc"],
# paging
skip=skip,
results_per_page=results_per_page
).by_page()
# check statuses
curr_time = datetime.min
async for page in submitted_translations:
counter = 0
async for translation in page:
counter += 1
# assert id
self.assertIn(translation.id, cancelled_translation_ids)
self.assertNotIn(translation.id, successful_translation_ids)
# assert ordering
assert(translation.created_on.replace(tzinfo=None) >= curr_time.replace(tzinfo=None))
curr_time = translation.created_on
# assert filters
assert(translation.created_on.replace(tzinfo=None) <= end.replace(tzinfo=None))
assert(translation.created_on.replace(tzinfo=None) >= start.replace(tzinfo=None))
self.assertIn(translation.status, statuses)
self.assertLessEqual(counter, results_per_page) # assert paging
``` |
{
"source": "JianpingZeng/xcc",
"score": 3
} |
#### File: lit/test/ProgressBar.py
```python
import sys, re, time
class TerminalController:
"""
A class that can be used to portably generate formatted output to
a terminal.
`TerminalController` defines a set of instance variables whose
values are initialized to the control sequence necessary to
perform a given action. These can be simply included in normal
output to the terminal:
>>> term = TerminalController()
>>> print 'This is '+term.GREEN+'green'+term.NORMAL
Alternatively, the `render()` method can used, which replaces
'${action}' with the string required to perform 'action':
>>> term = TerminalController()
>>> print term.render('This is ${GREEN}green${NORMAL}')
If the terminal doesn't support a given action, then the value of
the corresponding instance variable will be set to ''. As a
result, the above code will still work on terminals that do not
support color, except that their output will not be colored.
Also, this means that you can test whether the terminal supports a
given action by simply testing the truth value of the
corresponding instance variable:
>>> term = TerminalController()
>>> if term.CLEAR_SCREEN:
... print 'This terminal supports clearning the screen.'
Finally, if the width and height of the terminal are known, then
they will be stored in the `COLS` and `LINES` attributes.
"""
# Cursor movement:
BOL = '' #: Move the cursor to the beginning of the line
UP = '' #: Move the cursor up one line
DOWN = '' #: Move the cursor down one line
LEFT = '' #: Move the cursor left one char
RIGHT = '' #: Move the cursor right one char
# Deletion:
CLEAR_SCREEN = '' #: Clear the screen and move to home position
CLEAR_EOL = '' #: Clear to the end of the line.
CLEAR_BOL = '' #: Clear to the beginning of the line.
CLEAR_EOS = '' #: Clear to the end of the screen
# Output modes:
BOLD = '' #: Turn on bold mode
BLINK = '' #: Turn on blink mode
DIM = '' #: Turn on half-bright mode
REVERSE = '' #: Turn on reverse-video mode
NORMAL = '' #: Turn off all modes
# Cursor display:
HIDE_CURSOR = '' #: Make the cursor invisible
SHOW_CURSOR = '' #: Make the cursor visible
# Terminal size:
COLS = None #: Width of the terminal (None for unknown)
LINES = None #: Height of the terminal (None for unknown)
# Foreground colors:
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
# Background colors:
BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
_STRING_CAPABILITIES = """
BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
def __init__(self, term_stream=sys.stdout):
"""
Create a `TerminalController` and initialize its attributes
with appropriate values for the current terminal.
`term_stream` is the stream that will be used for terminal
output; if this stream is not a tty, then the terminal is
assumed to be a dumb terminal (i.e., have no capabilities).
"""
# Curses isn't available on all platforms
try: import curses
except: return
# If the stream isn't a tty, then assume it has no capabilities.
if not term_stream.isatty(): return
# Check the terminal type. If we fail, then assume that the
# terminal has no capabilities.
try: curses.setupterm()
except: return
# Look up numeric capabilities.
self.COLS = curses.tigetnum('cols')
self.LINES = curses.tigetnum('lines')
# Look up string capabilities.
for capability in self._STRING_CAPABILITIES:
(attrib, cap_name) = capability.split('=')
setattr(self, attrib, self._tigetstr(cap_name) or '')
# Colors
set_fg = self._tigetstr('setf')
if set_fg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, color, curses.tparm(set_fg, i) or '')
set_fg_ansi = self._tigetstr('setaf')
if set_fg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, color, curses.tparm(set_fg_ansi, i) or '')
set_bg = self._tigetstr('setb')
if set_bg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, 'BG_'+color, curses.tparm(set_bg, i) or '')
set_bg_ansi = self._tigetstr('setab')
if set_bg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '')
def _tigetstr(self, cap_name):
# String capabilities can include "delays" of the form "$<2>".
# For any modern terminal, we should be able to just ignore
# these, so strip them out.
import curses
cap = curses.tigetstr(cap_name) or ''
return re.sub(r'\$<\d+>[/*]?', '', cap)
def render(self, template):
"""
Replace each $-substitutions in the given template string with
the corresponding terminal control string (if it's defined) or
'' (if it's not).
"""
return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
def _render_sub(self, match):
s = match.group()
if s == '$$': return s
else: return getattr(self, s[2:-1])
#######################################################################
# Example use case: progress bar
#######################################################################
class ProgressBar:
"""
A 3-line progress bar, which looks like::
Header
20% [===========----------------------------------]
progress message
The progress bar is colored, if the terminal supports color
output; and adjusts to the width of the terminal.
"""
BAR = '%s${GREEN}[${BOLD}%s%s${NORMAL}${GREEN}]${NORMAL}%s\n'
HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n'
def __init__(self, term, header, useETA=True):
self.term = term
if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
raise ValueError("Terminal isn't capable enough -- you "
"should use a simpler progress dispaly.")
self.width = self.term.COLS or 75
self.bar = term.render(self.BAR)
self.header = self.term.render(self.HEADER % header.center(self.width))
self.cleared = 1 #: true if we haven't drawn the bar yet.
self.useETA = useETA
if self.useETA:
self.startTime = time.time()
self.update(0, '')
def update(self, percent, message):
if self.cleared:
sys.stdout.write(self.header)
self.cleared = 0
prefix = '%3d%% ' % (percent*100,)
suffix = ''
if self.useETA:
elapsed = time.time() - self.startTime
if percent > .0001 and elapsed > 1:
total = elapsed / percent
eta = int(total - elapsed)
h = eta//3600.
m = (eta//60) % 60
s = eta % 60
suffix = ' ETA: %02d:%02d:%02d'%(h,m,s)
barWidth = self.width - len(prefix) - len(suffix) - 2
n = int(barWidth*percent)
if len(message) < self.width:
message = message + ' '*(self.width - len(message))
else:
message = '... ' + message[-(self.width-4):]
sys.stdout.write(
self.term.BOL + self.term.UP + self.term.CLEAR_EOL +
(self.bar % (prefix, '='*n, '-'*(barWidth-n), suffix)) +
self.term.CLEAR_EOL + message)
def clear(self):
if not self.cleared:
sys.stdout.write(self.term.BOL + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL)
self.cleared = 1
def test():
import time
tc = TerminalController()
p = ProgressBar(tc, 'Tests')
for i in range(101):
p.update(i/100., str(i))
time.sleep(.3)
if __name__=='__main__':
test()
```
#### File: lit/test/TestRunner.py
```python
import os
import platform
import re
import signal
import subprocess
import sys
import ShUtil
import Util
kSystemName = platform.system()
class TestStatus:
Pass = 0
XFail = 1
Fail = 2
XPass = 3
Invalid = 4
kNames = ['Pass','XFail','Fail','XPass','Invalid']
@staticmethod
def getName(code):
return TestStatus.kNames[code]
def executeShCmd(cmd, cfg, cwd, results):
if isinstance(cmd, ShUtil.Seq):
if cmd.op == ';':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
return executeShCmd(cmd.rhs, cfg, cwd, results)
if cmd.op == '&':
raise NotImplementedError,"unsupported test command: '&'"
if cmd.op == '||':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
if res != 0:
res = executeShCmd(cmd.rhs, cfg, cwd, results)
return res
if cmd.op == '&&':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
if res is None:
return res
if res == 0:
res = executeShCmd(cmd.rhs, cfg, cwd, results)
return res
raise ValueError,'Unknown shell command: %r' % cmd.op
assert isinstance(cmd, ShUtil.Pipeline)
procs = []
input = subprocess.PIPE
for j in cmd.commands:
# FIXME: This is broken, it doesn't account for the accumulative nature
# of redirects.
stdin = input
stdout = stderr = subprocess.PIPE
for r in j.redirects:
if r[0] == ('>',2):
stderr = open(r[1], 'w')
elif r[0] == ('>&',2) and r[1] == '1':
stderr = subprocess.STDOUT
elif r[0] == ('>',):
stdout = open(r[1], 'w')
elif r[0] == ('<',):
stdin = open(r[1], 'r')
else:
raise NotImplementedError,"Unsupported redirect: %r" % r
procs.append(subprocess.Popen(j.args, cwd=cwd,
stdin = stdin,
stdout = stdout,
stderr = stderr,
env = cfg.environment))
# Immediately close stdin for any process taking stdin from us.
if stdin == subprocess.PIPE:
procs[-1].stdin.close()
procs[-1].stdin = None
if stdout == subprocess.PIPE:
input = procs[-1].stdout
else:
input = subprocess.PIPE
# FIXME: There is a potential for deadlock here, when we have a pipe and
# some process other than the last one ends up blocked on stderr.
procData = [None] * len(procs)
procData[-1] = procs[-1].communicate()
for i in range(len(procs) - 1):
if procs[i].stdout is not None:
out = procs[i].stdout.read()
else:
out = ''
if procs[i].stderr is not None:
err = procs[i].stderr.read()
else:
err = ''
procData[i] = (out,err)
# FIXME: Fix tests to work with pipefail, and make exitCode max across
# procs.
for i,(out,err) in enumerate(procData):
exitCode = res = procs[i].wait()
results.append((cmd.commands[i], out, err, res))
if cmd.negate:
exitCode = not exitCode
return exitCode
def executeScriptInternal(cfg, commands, cwd):
cmd = ShUtil.ShParser(' &&\n'.join(commands),
kSystemName == 'Windows').parse()
results = []
try:
exitCode = executeShCmd(cmd, cfg, cwd, results)
except:
import traceback
out = ''
err = 'Exception during script execution:\n%s\n' % traceback.format_exc()
return out, err, 127
out = err = ''
for i,(cmd, cmd_out,cmd_err,res) in enumerate(results):
out += 'Command %d: %s\n' % (i, ' '.join('"%s"' % s for s in cmd.args))
out += 'Command %d Result: %r\n' % (i, res)
out += 'Command %d Output:\n%s\n\n' % (i, cmd_out)
out += 'Command %d Stderr:\n%s\n\n' % (i, cmd_err)
return out, err, exitCode
def executeScript(cfg, script, commands, cwd):
# Write script file
f = open(script,'w')
if kSystemName == 'Windows':
f.write('\nif %ERRORLEVEL% NEQ 0 EXIT\n'.join(commands))
else:
f.write(' &&\n'.join(commands))
f.write('\n')
f.close()
if kSystemName == 'Windows':
command = ['cmd','/c', script]
else:
command = ['/bin/sh', script]
if cfg.useValgrind:
# FIXME: Running valgrind on sh is overkill. We probably could just
# run on jlang with no real loss.
command = ['valgrind', '-q',
'--tool=memcheck', '--leak-check=no', '--trace-children=yes',
'--error-exitcode=123'] + command
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=cfg.environment)
out,err = p.communicate()
exitCode = p.wait()
return out, err, exitCode
import StringIO
def runOneTest(cfg, testPath, tmpBase):
# Make paths absolute.
tmpBase = os.path.abspath(tmpBase)
testPath = os.path.abspath(testPath)
# Create the output directory if it does not already exist.
Util.mkdir_p(os.path.dirname(tmpBase))
script = tmpBase + '.script'
if kSystemName == 'Windows':
script += '.bat'
substitutions = [('%s', testPath),
('%S', os.path.dirname(testPath)),
('%t', tmpBase + '.tmp'),
(' jlang ', ' ' + cfg.jlang + ' '),
(' jlang-cc ', ' ' + cfg.jlangcc + ' ')]
# Collect the test lines from the script.
scriptLines = []
xfailLines = []
for ln in open(testPath):
if 'RUN:' in ln:
# Isolate the command to run.
index = ln.index('RUN:')
ln = ln[index+4:]
# Strip trailing newline.
scriptLines.append(ln)
elif 'XFAIL' in ln:
xfailLines.append(ln)
# FIXME: Support something like END, in case we need to process large
# files.
# Verify the script contains a run line.
if not scriptLines:
return (TestStatus.Fail, "Test has no run line!")
# Apply substitutions to the script.
def processLine(ln):
# Apply substitutions
for a,b in substitutions:
ln = ln.replace(a,b)
# Strip the trailing newline and any extra whitespace.
return ln.strip()
scriptLines = map(processLine, scriptLines)
# Validate interior lines for '&&', a lovely historical artifact.
for i in range(len(scriptLines) - 1):
ln = scriptLines[i]
if not ln.endswith('&&'):
return (TestStatus.Fail,
("MISSING \'&&\': %s\n" +
"FOLLOWED BY : %s\n") % (ln, scriptLines[i + 1]))
# Strip off '&&'
scriptLines[i] = ln[:-2]
if not cfg.useExternalShell:
res = executeScriptInternal(cfg, scriptLines, os.path.dirname(testPath))
if res is not None:
out, err, exitCode = res
elif True:
return (TestStatus.Fail,
"Unable to execute internally:\n%s\n"
% '\n'.join(scriptLines))
else:
out, err, exitCode = executeScript(cfg, script, scriptLines,
os.path.dirname(testPath))
else:
out, err, exitCode = executeScript(cfg, script, scriptLines,
os.path.dirname(testPath))
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
if xfailLines:
ok = exitCode != 0
status = (TestStatus.XPass, TestStatus.XFail)[ok]
else:
ok = exitCode == 0
status = (TestStatus.Fail, TestStatus.Pass)[ok]
if ok:
return (status,'')
output = StringIO.StringIO()
print >>output, "Script:"
print >>output, "--"
print >>output, '\n'.join(scriptLines)
print >>output, "--"
print >>output, "Exit Code: %r" % exitCode
print >>output, "Command Output (stdout):"
print >>output, "--"
output.write(out)
print >>output, "--"
print >>output, "Command Output (stderr):"
print >>output, "--"
output.write(err)
print >>output, "--"
return (status, output.getvalue())
def capture(args):
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out,_ = p.communicate()
return out
def inferJlang(cfg):
# Determine which jlang to use.
jlang = os.getenv('JLANG')
# If the user set jlang in the environment, definitely use that and don't
# try to validate.
if jlang:
return jlang
# Otherwise look in the path.
jlang = Util.which('jlang', cfg.environment['PATH'])
if not jlang:
print >>sys.stderr, "error: couldn't find 'jlang' program, try setting JLANG in your environment"
sys.exit(1)
return jlang
def inferJlangCC(cfg, jlang):
jlangcc = os.getenv('JLANGCC')
# If the user set jlang in the environment, definitely use that and don't
# try to validate.
if jlangcc:
return jlangcc
# Otherwise try adding -cc since we expect to be looking in a build
# directory.
if jlang.endswith('.exe'):
jlangccName = jlang[:-4] + '-cc.exe'
else:
jlangccName = jlang + '-cc'
jlangcc = Util.which(jlangccName, cfg.environment['PATH'])
if not jlangcc:
# Otherwise ask jlang.
res = capture([jlang, '-print-prog-name=jlang-cc'])
res = res.strip()
if res and os.path.exists(res):
jlangcc = res
if not jlangcc:
print >>sys.stderr, "error: couldn't find 'jlang-cc' program, try setting JLANGCC in your environment"
sys.exit(1)
return jlangcc
def getTestOutputBase(dir, testpath):
"""getTestOutputBase(dir, testpath) - Get the full path for temporary files
corresponding to the given test path."""
# Form the output base out of the test parent directory name and the test
# name. FIXME: Find a better way to organize test results.
return os.path.join(dir,
os.path.basename(os.path.dirname(testpath)),
os.path.basename(testpath))
``` |
{
"source": "JianPpan/untitled_ClassManagement",
"score": 2
} |
#### File: untitled_ClassManagement/leaveManagement/views.py
```python
from django.shortcuts import render
from .forms import UserForm
from .forms import LeaveForm
# from django.views.decorators.csrf import csrf_exempt
import pymysql
# Create your views here.
def conn_db():
# 获取数据库连接
global conn
conn = pymysql.connect(host='localhost', user='root', passwd='<PASSWORD>', db='student', port=3306, charset='utf8')
cursor = conn.cursor() # 获取一个游标
return cursor
def leaveManagement(request):
username = request.session['username']
cursor = conn_db()
sql = "select * from student.leave"
cursor.execute(sql)
# conn.commit()
row_list = cursor.fetchall()
cursor.close()
return render(request, 'leaveManagement.html', {"username": username, "row_list": row_list})
def leaveAdd(request):
username = request.session['username']
if request.method == 'POST':
leaveform = LeaveForm(request.POST) # form包含提交的表单数据。
if leaveform.is_valid():
lid = leaveform.cleaned_data['lid']
ltype = leaveform.cleaned_data['ltype']
ltime = leaveform.cleaned_data['ltime']
ldate = leaveform.cleaned_data['ldate']
lplace = leaveform.cleaned_data['lplace']
cursor = conn_db()
sql1 = "insert into student.leave values('%s','%s','%s','%s','%s')" \
% (lid, ltype, ltime, ldate, lplace)
cursor.execute(sql1)
conn.commit()
sql2 = "select * from student.leave"
cursor.execute(sql2)
row_list = cursor.fetchall()
cursor.close()
conn.close()
return render(request, 'leaveManagement.html', {"username": username, 'row_list': row_list})
def leaveUpdated(request):
username = request.session['username']
if request.method=='POST':
leaveform = LeaveForm(request.POST) #form包含提交的表单数据。
if leaveform.is_valid():
lid = leaveform.cleaned_data['lid']
ltype = leaveform.cleaned_data['ltype']
ltime = leaveform.cleaned_data['ltime']
ldate = leaveform.cleaned_data['ldate']
lplace = leaveform.cleaned_data['lplace']
sql1 = "update student.leave set ltype = '%s', ltime = '%s', ldate = '%s', lplace = '%s'\
where lid = '%s' " %(ltype, ltime, ldate, lplace, lid)
cursor = conn_db()
cursor.execute(sql1)
conn.commit()
sql2 = "select * from student.leave"
cursor.execute(sql2)
row_list = cursor.fetchall()
cursor.close()
conn.close()
return render(request, 'leaveManagement.html',{"username":username, 'row_list':row_list})
else:
return render(request, 'leaveManagement.html',{"username":username})
else:
return render(request, 'leaveManagement.html',{"username":username})
def leaveDelete(request,lid):
username = request.session['username']
cursor = conn_db()
sql1 = "delete from student.leave where lid='%s'" %(lid)
cursor.execute(sql1)
conn.commit()
sql2 = "select * from student.leave"
cursor.execute(sql2)
row_list = cursor.fetchall()
cursor.close()
conn.close()
return render(request, 'leaveManagement.html',{"username":username, 'row_list':row_list})
def leaveSearch(request):
if request.method == 'POST':
username = request.session['username']
ltype = request.POST['ltype']
cursor = conn_db()
if ltype != '':
sql1 = "select * from student.leave where ltype = '%s'" % (ltype)
cursor.execute(sql1)
conn.commit()
row_list = cursor.fetchall()
return render(request, 'leaveManagement.html', {"username": username, 'row_list': row_list})
else:
sql2 = "select * from student.leave"
cursor.execute(sql2)
conn.commit()
row_list = cursor.fetchall()
return render(request, 'leaveManagement.html', {"username": username, 'row_list': row_list})
cursor.close()
```
#### File: untitled_ClassManagement/login/views.py
```python
from .forms import UserForm # .表示当前目录
from django.shortcuts import render
import pymysql
# Create your views here.
def conn():
# 获取数据库连接
conn = pymysql.connect(host='localhost', user='root', passwd='<PASSWORD>', db='student', port=3306, charset='utf8')
cursor = conn.cursor() # 获取一个游标
return cursor
def login(request):
if request.method == 'POST':
userform = UserForm(request.POST) # form包含提交的表单数据。
if userform.is_valid():
username = userform.cleaned_data['username']
password = userform.cleaned_data['password']
# role = userform.cleaned_data['role']
request.session['username'] = username
cursor = conn()
sql = "select * from teacher where sname = '%s' and password = '%s'" % (username, password)
try:
cursor.execute(sql)
user = cursor.fetchone()
cursor.close() # 释放游标
if user:
return render(request, "studentManagement.html", {"username": username})
else:
return render(request, 'loginError.html')
except Exception as e:
print(e)
return render(request, 'login.html')
else:
return render(request, 'login.html')
```
#### File: untitled_ClassManagement/studentManagement/views.py
```python
from django.shortcuts import render
from .forms import UserForm
from .forms import StudentForm
import pymysql
# Create your views here.
def conn_db():
# 获取数据库连接
global conn
conn = pymysql.connect(host='localhost', user='root', passwd='<PASSWORD>', db='student', port=3306, charset='utf8')
cursor = conn.cursor() # 获取一个游标
return cursor
def studentManagement(request):
username = request.session['username']
cursor = conn_db()
sql = "select * from student"
cursor.execute(sql)
# conn.commit()
row_list = cursor.fetchall()
cursor.close()
return render(request, 'studentManagement.html', {"username": username, "row_list": row_list})
def studentAdd(request):
username = request.session['username']
if request.method == 'POST':
studentform = StudentForm(request.POST) # form包含提交的表单数据。
if studentform.is_valid():
sno = studentform.cleaned_data['sno']
sname = studentform.cleaned_data['sname']
spassword = studentform.cleaned_data['spassword']
ssage = studentform.cleaned_data['ssage']
ssex = studentform.cleaned_data['ssex']
politics = studentform.cleaned_data['politics']
phone = studentform.cleaned_data['phone']
homeph = studentform.cleaned_data['homeph']
address = studentform.cleaned_data['address']
cet = studentform.cleaned_data['cet']
compgrade = studentform.cleaned_data['compgrade']
teachgrade = studentform.cleaned_data['teachgrade']
mandarin = studentform.cleaned_data['mandarin']
cursor = conn_db()
sql1 = "insert into student values('%s','%s','%s','%d','%s','%s','%s','%s','%s','%s','%s','%s','%s')" \
% (sno, sname, spassword, ssage, ssex, politics, phone, homeph, address, cet, compgrade, teachgrade,
mandarin)
cursor.execute(sql1)
conn.commit()
sql2 = "select * from student"
cursor.execute(sql2)
row_list = cursor.fetchall()
cursor.close()
conn.close()
return render(request, 'studentManagement.html', {"username": username, 'row_list': row_list})
def studentUpdated(request):
username = request.session['username']
if request.method == 'POST':
studentform = StudentForm(request.POST) # form包含提交的表单数据。
if studentform.is_valid():
sno = studentform.cleaned_data['sno'] # 通过input中name='sno'获取值。
sname = studentform.cleaned_data['sname']
spassword = studentform.cleaned_data['spassword']
ssage = studentform.cleaned_data['ssage']
ssex = studentform.cleaned_data['ssex']
politics = studentform.cleaned_data['politics']
phone = studentform.cleaned_data['phone']
homeph = studentform.cleaned_data['homeph']
address = studentform.cleaned_data['address']
cet = studentform.cleaned_data['cet']
compgrade = studentform.cleaned_data['compgrade']
teachgrade = studentform.cleaned_data['teachgrade']
mandarin = studentform.cleaned_data['mandarin']
sql1 = "update student set sname = '%s', password = <PASSWORD>', ssage = '%d', ssex = '%s', \
politics = '%s', phone = '%s', homeph = '%s', address = '%s', \
cet = '%s', compgrade = '%s', teachgrade = '%s', mandarin = '%s' \
where sno = '%s' " % (sname, spassword, ssage, ssex, politics, phone, \
homeph, address, cet, compgrade, teachgrade, mandarin, sno)
cursor = conn_db()
cursor.execute(sql1)
conn.commit()
sql2 = "select * from student"
cursor.execute(sql2)
row_list = cursor.fetchall()
cursor.close()
conn.close()
return render(request, 'studentManagement.html', {"username": username, 'row_list': row_list})
else:
return render(request, 'studentManagement.html', {"username": username})
else:
return render(request, 'studentManagement.html', {"username": username})
def studentDelete(request, sno):
username = request.session['username']
cursor = conn_db()
sql1 = "delete from student where sno = '%s'" % (sno)
cursor.execute(sql1)
conn.commit()
sql2 = "select * from student"
cursor.execute(sql2)
row_list = cursor.fetchall()
cursor.close()
conn.close()
return render(request, 'studentManagement.html', {"username": username, 'row_list': row_list})
def studentSearch(request):
if request.method == 'POST':
username = request.session['username']
sname = request.POST['sname']
cursor = conn_db()
if sname != '':
sql1 = "select * from student where sname = '%s'" % (sname)
cursor.execute(sql1)
conn.commit()
row_list = cursor.fetchall()
return render(request, 'studentManagement.html', {"username": username, 'row_list': row_list})
else:
sql2 = "select * from student"
cursor.execute(sql2)
conn.commit()
row_list = cursor.fetchall()
return render(request, 'studentManagement.html', {"username": username, 'row_list': row_list})
cursor.close()
``` |
{
"source": "JianqiangRen/AAMS",
"score": 2
} |
#### File: AAMS/net/utils.py
```python
import tensorflow as tf
import vgg
from tensorflow.python.ops import control_flow_ops
import tensorflow.contrib.slim as slim
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_RESIZE_SIDE_MIN = 256
_RESIZE_SIDE_MAX = 512
def zca_normalization(features):
shape = tf.shape(features)
# reshape the features to orderless feature vectors
mean_features = tf.reduce_mean(features, axis=[1, 2], keep_dims=True)
unbiased_features = tf.reshape(features - mean_features, shape=(shape[0], -1, shape[3]))
# get the covariance matrix
gram = tf.matmul(unbiased_features, unbiased_features, transpose_a=True)
gram /= tf.reduce_prod(tf.cast(shape[1:3], tf.float32))
# converting the feature spaces
s, u, v = tf.svd(gram, compute_uv=True)
s = tf.expand_dims(s, axis=1) # let it be active in the last dimension
# get the effective singular values
valid_index = tf.cast(s > 0.00001, dtype=tf.float32)
s_effective = tf.maximum(s, 0.00001)
sqrt_s_effective = tf.sqrt(s_effective) * valid_index
sqrt_inv_s_effective = tf.sqrt(1.0/s_effective) * valid_index
# colorization functions
colorization_kernel = tf.matmul(tf.multiply(u, sqrt_s_effective), v, transpose_b=True)
# normalized features
normalized_features = tf.matmul(unbiased_features, u)
normalized_features = tf.multiply(normalized_features, sqrt_inv_s_effective)
normalized_features = tf.matmul(normalized_features, v, transpose_b=True)
normalized_features = tf.reshape(normalized_features, shape=shape)
return normalized_features, colorization_kernel, mean_features
def zca_colorization(normalized_features, colorization_kernel, mean_features):
# broadcasting the tensors for matrix multiplication
shape = tf.shape(normalized_features)
normalized_features = tf.reshape(
normalized_features, shape=(shape[0], -1, shape[3]))
colorized_features = tf.matmul(normalized_features, colorization_kernel)
colorized_features = tf.reshape(colorized_features, shape=shape) + mean_features
return colorized_features
def adain_normalization(features):
epsilon = 1e-7
mean_features, colorization_kernels = tf.nn.moments(features, [1, 2], keep_dims=True)
normalized_features = tf.div(
tf.subtract(features, mean_features), tf.sqrt(tf.add(colorization_kernels, epsilon)))
return normalized_features, colorization_kernels, mean_features
def adain_colorization(normalized_features, colorization_kernels, mean_features):
return tf.sqrt(colorization_kernels) * normalized_features + mean_features
def project_features(features, projection_module='ZCA'):
if projection_module == 'ZCA':
return zca_normalization(features)
elif projection_module == 'AdaIN':
return adain_normalization(features)
else:
return features, None, None
def reconstruct_features(projected_features, feature_kernels, mean_features, reconstruction_module='ZCA'):
if reconstruction_module == 'ZCA':
return zca_colorization(projected_features, feature_kernels, mean_features)
elif reconstruction_module == 'AdaIN':
return adain_colorization(projected_features, feature_kernels, mean_features)
else:
return projected_features
def instance_norm(inputs, epsilon=1e-10):
inst_mean, inst_var = tf.nn.moments(inputs, [1, 2], keep_dims=True)
normalized_inputs = tf.div( tf.subtract(inputs, inst_mean), tf.sqrt(tf.add(inst_var, epsilon)))
return normalized_inputs
def adaptive_instance_normalization(content_feature, style_feature):
normalized_content_feature = instance_norm(content_feature)
inst_mean, inst_var = tf.nn.moments(style_feature, [1, 2], keep_dims=True)
return tf.sqrt(inst_var) * normalized_content_feature + inst_mean
def hw_flatten(x):
return tf.reshape(x, shape=[x.shape[0], -1, x.shape[-1]])
def conv(x, channels, kernel=3, stride=1, pad=1, pad_type='zero', scope='conv_0'):
with tf.variable_scope(scope):
if pad_type == 'zero':
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]])
if pad_type == 'reflect':
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='REFLECT')
x = tf.layers.conv2d(x, channels, kernel, stride, kernel_initializer=tf.contrib.layers.xavier_initializer())
return x
def upsampling(x, stride, scope='upsample_0'):
with tf.variable_scope(scope):
stride_larger_than_one = tf.greater(stride, 1)
height = tf.shape(x)[1]
width = tf.shape(x)[2]
new_height, new_width = tf.cond(
stride_larger_than_one,
lambda: (height * stride, width * stride),
lambda: (height, width))
x = tf.image.resize_nearest_neighbor(x, [new_height, new_width])
return x
def avg_pooling(x, size, stride, scope='pool_0'):
with tf.variable_scope(scope):
x = tf.layers.average_pooling2d(x, size, stride, 'same')
return x
def mean_image_subtraction(images, means=(_R_MEAN, _G_MEAN, _B_MEAN)):
num_channels = 3
channels = tf.split(images, num_channels, axis=2)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(channels, axis=2)
def mean_image_summation(image, means=(_R_MEAN, _G_MEAN, _B_MEAN)):
num_channels = 3
channels = tf.split(image, num_channels, axis=2)
for i in range(num_channels):
channels[i] += means[i]
return tf.concat(channels, axis=2)
def batch_mean_image_subtraction(images, means=(_R_MEAN, _G_MEAN, _B_MEAN)):
if images.get_shape().ndims != 4:
raise ValueError('Input must be of size [batch, height, width, C>0')
num_channels = images.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(images, num_channels, axis=3)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(channels, axis=3)
def batch_mean_image_summation(images, means=(_R_MEAN, _G_MEAN, _B_MEAN)):
if images.get_shape().ndims != 4:
raise ValueError('Input must be of size [batch, height, width, C>0')
num_channels = images.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(images, num_channels, axis=3)
for i in range(num_channels):
channels[i] += means[i]
return tf.concat(channels, axis=3)
def extract_image_features(inputs, reuse=True):
with slim.arg_scope(vgg.vgg_arg_scope()):
_, end_points = vgg.vgg_19(inputs, spatial_squeeze=False, is_training=False, reuse=reuse)
return end_points
def compute_total_variation_loss_l1(inputs, weights=1, scope=None):
inputs_shape = tf.shape(inputs)
height = inputs_shape[1]
width = inputs_shape[2]
with tf.variable_scope(scope, 'total_variation_loss', [inputs]):
loss_y = tf.losses.absolute_difference(
tf.slice(inputs, [0, 0, 0, 0], [-1, height-1, -1, -1]),
tf.slice(inputs, [0, 1, 0, 0], [-1, -1, -1, -1]),
weights=weights,
scope='loss_y')
loss_x = tf.losses.absolute_difference(
tf.slice(inputs, [0, 0, 0, 0], [-1, -1, width-1, -1]),
tf.slice(inputs, [0, 0, 1, 0], [-1, -1, -1, -1]),
weights=weights,
scope='loss_x')
loss = loss_y + loss_x
return loss
def _smallest_size_at_least(height, width, smallest_side):
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
return new_height, new_width
def _aspect_preserving_resize(image, smallest_side):
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
align_corners=False)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def _mean_image_subtraction(image, means=(_R_MEAN, _G_MEAN, _B_MEAN)):
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=2, values=channels)
def preprocessing_for_train(image, output_height, output_width, resize_side):
image = _aspect_preserving_resize(image, resize_side)
image = _random_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
def preprocessing_for_eval(image, output_height, output_width, resize_side):
image = _aspect_preserving_resize(image, resize_side)
image = _central_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
def preprocessing_image(image, output_height, output_width,
resize_side=_RESIZE_SIDE_MIN, is_training=False):
if is_training:
return preprocessing_for_train(image, output_height, output_width, resize_side)
else:
return preprocessing_for_eval(image, output_height, output_width, resize_side)
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies(
[rank_assertion],
tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
image = control_flow_ops.with_dependencies(
[size_assertion],
tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape)
def _random_crop(image_list, crop_height, crop_width):
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
image_shape = control_flow_ops.with_dependencies(
[rank_assertions[0]],
tf.shape(image_list[0]))
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
shape = control_flow_ops.with_dependencies([rank_assertions[i]],
tf.shape(image))
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
max_offset_height = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_height - crop_height + 1, []))
max_offset_width = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_width - crop_width + 1, []))
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def _central_crop(image_list, crop_height, crop_width):
outputs = []
for image in image_list:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = (image_height - crop_height) / 2
offset_width = (image_width - crop_width) / 2
outputs.append(_crop(image, offset_height, offset_width,
crop_height, crop_width))
return outputs
def k_means(image, clusters_num):
image = tf.squeeze(image)
print("k_means", image.shape)
_points = tf.reshape(image, (-1, 1))
centroids = tf.slice(tf.random_shuffle(_points), [0, 0], [clusters_num, -1])
points_expanded = tf.expand_dims(_points, 0)
for i in xrange(80):
centroids_expanded = tf.expand_dims(centroids, 1)
distances = tf.reduce_sum(tf.square(tf.subtract(points_expanded, centroids_expanded)), 2)
assignments = tf.argmin(distances, 0)
centroids = tf.concat(
[tf.reduce_mean(tf.gather(_points, tf.reshape(tf.where(tf.equal(assignments, c)), [1, -1])), axis=1) for c
in
xrange(clusters_num)], 0)
centroids = tf.squeeze(centroids)
centroids = -tf.nn.top_k(-centroids, clusters_num)[0] # sort
return centroids
if __name__ == "__main__":
import cv2
img = cv2.imread('lenna_cropped.jpg', cv2.IMREAD_GRAYSCALE)
points = tf.cast(tf.convert_to_tensor(img), tf.float32)
print(points.shape)
centroids = k_means(points, 4)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
print(sess.run(centroids))
```
#### File: JianqiangRen/AAMS/test.py
```python
import tensorflow as tf
import scipy.misc
import numpy as np
from PIL import Image
import argparse
import os
import errno
import shutil
import cv2
parser = argparse.ArgumentParser()
parser.add_argument("--model", dest='model', type=str)
parser.add_argument("--content", dest='content', type=str)
parser.add_argument("--style", dest='style', type=str)
parser.add_argument("--get_sal", dest='get_sal', type=bool, default=False)
parser.add_argument("--inter_weight", dest='inter_weight', type=float, default=1.0)
args = parser.parse_args()
max_length = 800
def single_img_test(model_path, content_path, style_path, inter_weight_value=1.0):
f = tf.gfile.FastGFile(model_path, 'rb')
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
persisted_graph = tf.import_graph_def(graph_def, name='')
sess = tf.InteractiveSession(graph=persisted_graph)
content = tf.get_default_graph().get_tensor_by_name("content:0")
style = tf.get_default_graph().get_tensor_by_name("style:0")
output = tf.get_default_graph().get_tensor_by_name("stylized_output:0")
attention = tf.get_default_graph().get_tensor_by_name('attention_map:0')
inter_weight = tf.get_default_graph().get_tensor_by_name("inter_weight:0")
centroids = tf.get_default_graph().get_tensor_by_name("centroids:0")
content_feed = image_reader(content_path)
style_feed = image_reader(style_path)
if np.shape(content_feed)[0] >max_length or np.shape(content_feed)[1]>max_length:
h = np.shape(content_feed)[0]
w = np.shape(content_feed)[1]
if h > w:
content_feed = cv2.resize(content_feed, (max_length * w / h, max_length))
else:
content_feed = cv2.resize(content_feed, (max_length, max_length * h / w))
output_value, attention_value, centroids_value = sess.run([output, attention, centroids], feed_dict={content: content_feed,
style: style_feed,
inter_weight: inter_weight_value
})
print('content size:', np.shape(content_feed))
print('style size:', np.shape(style_feed))
print('output size:', np.shape(output_value))
print('attention size:', np.shape(attention_value))
print('centroids',centroids_value)
prepare_dir('images/test_result')
filename = 'images/test_result/{}_stylized_{}.{}'.format(
content_path.split('/')[-1].split('.')[0],
style_path.split('/')[-1].split('.')[0],
content_path.split('.')[-1]
)
output_image = output_value[0]
output_image = np.clip(output_image, 0, 255).astype(np.uint8)
imsave(filename, output_image.astype(np.uint8))
print('saving {}'.format(filename))
''' save attention map'''
mean_sal = 0
for i in xrange(attention_value.shape[3]):
mean_sal += attention_value[0, :, :, i]
mean_sal = mean_sal * 1.0 / attention_value.shape[3]
from matplotlib import pyplot as plt
from matplotlib import cm
plt.switch_backend('agg')
mean_sal = mean_sal - np.min(mean_sal)
mean_sal = mean_sal * 1.0 / np.max(mean_sal)
plt.imshow(mean_sal, cmap=cm.get_cmap('rainbow', 1000))
plt.colorbar()
plt.axis('off')
print('mean_sal size:', np.shape(mean_sal))
filename = 'images/test_result/{}_mean_atten.png'.format(
content_path.split('/')[-1].split('.')[0])
plt.savefig(filename, bbox_inches="tight")
print('attention mean:{}, min:{}, max:{}'.format(np.mean(mean_sal), np.min(mean_sal), np.max(mean_sal)))
sess.close()
print('single image test done')
def imsave(filename, img):
Image.fromarray(img).save(filename, quality=95)
def empty_dir(path):
for the_file in os.listdir(path):
file_path = os.path.join(path, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print 'Warning: {}'.format(e)
def create_dir(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def prepare_dir(path, empty=False):
if not os.path.exists(path):
create_dir(path)
if empty:
empty_dir(path)
def image_reader(filename):
img = scipy.misc.imread(filename).astype(np.float)
if len(img.shape) == 2:
img = np.dstack((img, img, img))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
'''
usage:
python single_img_test.py --model models/author/avatar.pb
--content data/contents/images/woman_side_portrait.jpg
--style data/styles/brushstrokers.jpg
--inter_weight 1.0
'''
if __name__ == "__main__":
single_img_test(args.model, args.content, args.style, args.inter_weight)
```
#### File: JianqiangRen/AAMS/train.py
```python
import tensorflow as tf
import glob
import os
from net import utils, aams
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import control_flow_ops
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", dest='dataset', type=str)
args = parser.parse_args()
def _get_init_fn():
vgg_checkpoint_path = "vgg_19.ckpt"
if tf.gfile.IsDirectory(vgg_checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(vgg_checkpoint_path)
else:
checkpoint_path = vgg_checkpoint_path
variables_to_restore = []
for var in slim.get_model_variables():
tf.logging.info('model_var: %s' % var)
excluded = False
for exclusion in ['vgg_19/fc']:
if var.op.name.startswith(exclusion):
excluded = True
tf.logging.info('exclude:%s' % exclusion)
break
if not excluded:
variables_to_restore.append(var)
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=True)
if __name__ == "__main__":
with tf.Graph().as_default():
global_step = slim.create_global_step()
img_paths = glob.glob(os.path.join(args.dataset, "*.jpg"))
path_queue = tf.train.string_input_producer(img_paths, shuffle=True)
reader = tf.WholeFileReader()
paths, contents = reader.read(path_queue)
raw_img = tf.image.decode_jpeg(contents)
raw_img = 255.0 * tf.image.convert_image_dtype(raw_img, dtype=tf.float32)
image_clip = utils.preprocessing_image(
raw_img,
256, 256, 512,
is_training=True)
image_batch = tf.train.shuffle_batch([image_clip], batch_size=8, capacity=50000, num_threads=4,
min_after_dequeue=10000)
model = aams.AAMS()
total_loss = model.build_graph(image_batch)
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
scopes = ['self_attention', 'decoder']
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
train_op = model.get_training_op(global_step, variables_to_train)
update_ops.append(train_op)
summaries |= set(model.summaries)
update_op = tf.group(*update_ops)
watched_loss = control_flow_ops.with_dependencies([update_op], total_loss, name="train_op")
# merge the summaries
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES))
summary_op = tf.summary.merge(list(summaries), name='summary_op')
def train_step_fn(session, *args, **kwargs):
total_loss, should_stop = slim.learning.train_step(session, *args, **kwargs)
train_step_fn.step += 1
if train_step_fn.step % 200 == 0:
texts = ['aams step is :{}, total loss: {}'.format(train_step_fn.step, total_loss)]
print(texts)
return [total_loss, should_stop]
train_step_fn.step = 0
print("start learning\n" + '_' * 30)
sess_config = tf.ConfigProto()
sess_config.gpu_options.per_process_gpu_memory_fraction = 0.8
slim.learning.train(
watched_loss,
train_step_fn = train_step_fn,
logdir= './tfmodel',
init_fn=_get_init_fn(),
summary_op = summary_op,
number_of_steps= 80000,
log_every_n_steps= 100,
save_interval_secs= 600,
save_summaries_secs= 120,
session_config= sess_config
)
``` |
{
"source": "jianqianyan/dingding_robot",
"score": 3
} |
#### File: jianqianyan/dingding_robot/scheduler.py
```python
from apscheduler.schedulers.blocking import BlockingScheduler
import datetime
from chinese_calendar import is_workday, is_holiday
import time
import requests
# url.txt是存储钉钉webhook的文件,只有一行
ding_url = open('url.txt').read().strip()
def workday_msg(localtime):
if localtime.tm_hour in [10,14,15,16,19,20]:
return "该喝水啦"
if localtime.tm_hour in [11,15,16,17,19,20]:
return "该互动一下啦"
if lcoaltime.tm_hour == 24:
return "该睡觉啦"
return None
def holiday_msg(localtime):
return None
sched = BlockingScheduler()
def alarm_clock():
date = datetime.date.today()
localtime = time.localtime(time.time())
if is_holiday(date):
text = holiday_msg(localtime)
elif is_workday(date):
text = workday_msg(localtime)
if text is not None:
msg_body = {"msgtype": "text",
"text": {"content": text},
"at": { "isAtAll": True}
}
ret = requests.post(ding_url, json = msg_body)
sched.add_job(func=alarm_clock, trigger='cron', hour='0-23', start_date='2018-10-30 14:00:00')
sched.start()
``` |
{
"source": "JianqiaoJIN/master_thesis",
"score": 3
} |
#### File: master_thesis/grangerCausality/evaluate.py
```python
import pandas as pd
import numpy as np
from sklearn import metrics
import json, pickle, copy
def writeJSON(filePath, data):
"""Dumps data to a nicely formatted json at filePath."""
with open(filePath, "w") as outFile:
outFile.write(json.dumps(data,
sort_keys=True,
indent=4,
separators=(',', ': ')))
"""
drop A[i,i]
"""
def preProcess(A_true, A_est):
A_true = A_true.values
A_est = A_est.values
r,c = A_true.shape
for i in range(r):
A_true[i, i] = -1
A_est[i, i] = -1
A_true_temp = np.reshape(A_true, newshape=(1, r*c))[0]
A_est_temp = list(np.reshape(A_est, newshape=(1, r*c))[0])
A_true_temp = list(filter(lambda a: a != -1, A_true_temp))
A_est_temp = list(filter(lambda a: a != -1, A_est_temp))
return A_true_temp, A_est_temp
"""
calculate AUC
"""
def compute_AUC(A_true, A_est):
A_true, A_est = preProcess(A_true, A_est)
fpr, tpr, thresholds = metrics.roc_curve(A_true, A_est)
return metrics.auc(fpr, tpr)
"""
calculate F1-score
"""
def compute_F1Score(A_true, A_est):
A_true, A_est = preProcess(A_true, A_est)
#print ("F1: %f" % f1_score(A_est, A_true, average= "binary"))
return f1_score(A_est, A_true, average= "binary")
if __name__ == "__main__":
GC_types = ["bivariateGC","conditionalGC","groupLassoGC", "mlpGC"]
dirs = ['var_system', 'henon_system']
N_set = [1000]
D_set = [5, 30]
results_AUC = {}
for d in dirs:
D_GC_AUC = {}
for D in D_set:
N_D_GC = {}
for N in N_set:
A_true = pd.read_csv("data/"+d+"/A_true_"+str(D) + "_" + str(N)+".csv")
N_D_GC_AUC = {}
for GC in GC_types:
file_name = "results/"+d+"/"+GC+"/A_est_"+ str(D) + "_" + str(N) +".csv"
A_est = pd.read_csv(file_name)
AUC = compute_AUC(A_true, A_est)
N_D_GC_AUC[GC] = AUC
N_D_GC[N] = N_D_GC_AUC
D_GC_AUC[D] = N_D_GC
results_AUC[d] = D_GC_AUC
writeJSON("results_auc.json", results_AUC)
``` |
{
"source": "jianqiaoSE/ceGan",
"score": 3
} |
#### File: ceGan/analy/draw_all.py
```python
import re
from re import Pattern
from typing import AnyStr
import matplotlib.pyplot as plt
base_url = '/home/b8313/coding/py/gan-torch-text/log/'
bleu2mode = re.compile('BLEU-\[2, 3, 4, 5\] = \[(.*?),')
nll_gen_mode = re.compile('NLL_gen = (.*?),')
nll_div_mode = re.compile('NLL_div = (.*?),')
filename = 'log_1102_1543_10_exp+log.txt'
def get_nll(mode) -> list:
with open(base_url + filename, "r") as f:
content = f.read()
# match = re.findall(nll_gen_mode, content)
match = re.findall(mode, content)
for count in range(0, 4):
del match[0]
datas = []
for data in match:
data = float(data)
datas.append(data)
# print(len(datas))
print(filename + str(datas))
x = []
for count in range(0, len(datas)):
x.append(count * 50)
# print(len(x))
x[len(datas) - 1] -= 1
print(x)
# plt.title('BLEU-2')
# plt.xlabel("epoch")
# # plt.ylabel("BLEU2")
#
# # plt.plot(x, datas, label=filename, color=cmap(color))
# plt.plot(x, datas, label=filename)
#
# plt.grid()
# plt.legend()
return datas
def main():
data_ora = get_nll(nll_div_mode)
data_gen= get_nll(nll_gen_mode)
plt.figure(figsize=(8, 6))
# plt.plot([50 * i for i in range(len(data_ora))], data_ora)
plt.plot([50 * i for i in range(len(data_ora))], data_ora)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.title('NLL-oracle',fontsize=25)
plt.show()
if __name__ == '__main__':
main()
```
#### File: ceGan/analy/drawpics.py
```python
import matplotlib.pyplot as plt
import re
bleu2mode = re.compile('BLEU-\[2, 3, 4, 5\] = \[(.*?),')
nll_gen_mode = re.compile('NLL_gen = (.*?),')
nll_div_mode = re.compile('NLL_div = (.*?),')
base_url = '/home/b8313/coding/py/gan-torch-text/log/'
# base_url = 'D:\coding\py\gan-torch-text\log\\'
# N = 160
# def get_cmap(n, name='hsv'):
# '''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
# RGB color; the keyword argument name must be a standard mpl colormap name.'''
# return plt.cm.get_cmap(name, n)
if __name__ == '__main__':
files_list = [
# 'log_1023_2112_01_ce_vec0_8.txt',
# 'log_1023_2326_06_ce_vec0_9.txt',
# 'log_1023_2334_14_ce_vec0_7.txt',
# 'log_1024_0757_24_ce_vec0_6.txt',
# 'log_1024_1424_26_ce_vec0_2.txt',
# 'log_1024_1631_49_ce_vec0_1.txt',
# 'log_1024_1633_52_ce_vec0_0.txt',
# # 'log_1025_0904_05_ce_vec0_5.txt',
# 'log_1024_2306_48_ce_vec0_0.txt',
# 'log_1025_1126_03_ce_vec1_0.txt',
# 'log_1025_1445_17_ce_vec1_0.txt',
# 'log_1025_2112_11_ce_vec0_0.txt',
# 'log_1025_2259_34_ce_vec0_0.txt',
# "log_1026_0853_19_ce_vec0_0.txt",
# 'log_1026_0859_47_ce_vec0_0.txt',
# 'log_1026_1702_17_ce_vec0_0.txt',
# 'log_1026_1702_17_ce_vec1_0.txt',
# "log_1026_1919_58_ce_vec1_0.txt",
# 'log_1026_1959_51_ce_vec1_0.txt',
# 'log_1026_2002_09_ce_vec1_0.txt',
# 'log_1027_1751_51_ce_vec1_0_log.txt',
# 'log_1027_1947_04_ce_vec0_0_log.txt',
# 'log_1027_2245_37_ce_vec0_5_log.txt'
# 'log_1028_1143_34_ce_vec1_0_log_temp250.txt',
# 'log_1028_2026_28_ce_vec0_5_log_temp250.txt',
# 'log_1029_0827_34_ce_vec0_0_log_temp250.txt',
# # 'log_1028_0826_32_ce_vec0_5_exp.txt',
# 'log_1029_0945_17_ce_vec_0_8_log_temp250.txt',
# 'log_1029_1825_45_ex_vec0_5.txt',
# 'log_1030_0409_17_ce_vec0_5_log.txt',
# 'log_1030_0858_17_ce_vec0_5_sqrt_temp50.txt',
# 'log_1030_1318_16_ce_vec0_5_log_temp50.txt',
# 'log_1030_1735_42_ce_vec0_5_exp_temp50.txt',
# 'log_1030_2312_59_exp+log_temp1000.txt',
# 'log_1031_0334_28_log_temp1000.txt',
# 'log_1031_0757_39_exp_temp1000.txt'
# 'log_1029_1825_45_ex_vec0_5.txt',
# 'log_1030_0409_17_ce_vec0_5_log.txt',
# 'log_1030_0858_17_ce_vec0_5_sqrt_temp50.txt',
# 'log_1030_1318_16_ce_vec0_5_log_temp50.txt',
# 'log_1030_1735_42_ce_vec0_5_exp_temp50.txt'
# 'log_1102_0946_31_--mu_temp exp --fn_mu_temp "exp sigmoid quad" temp150.txt'
# 'log_1102_1543_10_exp+log.txt'
# 'log_1102_2012_32_exp.txt'
'log_1102_1543_10_exp+log.txt',
'log_1102_2031_32_exp+log.txt',
'log_1103_0052_47_exp+log.txt',
'log_1103_0516_11_exp+log.txt',
# 'log_1103_1056_28_exp+log.txt',
# 'log_1103_1100_09_exp+log.txt',
# 'log_1103_1703_45_exp.txt',
# 'log_1103_2123_34_exp.txt',
'log_1104_0217_47_exp.txt',
'log_1104_0635_14_exp.txt'
]
# cmap = get_cmap(N)
colors = ['black', 'red', 'darkorange', 'yellow', 'green', 'cyan', 'blue', 'purple', 'saddlebrown', 'lime',
'hotpink', 'deepskyblue', 'fuchsia', 'gold', 'olive', 'navy', 'gray']
color = 0
plt.figure(figsize=(12.8, 9.6))
for filename in files_list:
with open(base_url + filename, "r") as f:
content = f.read()
# match = re.findall(nll_gen_mode, content)
match = re.findall(nll_div_mode , content)
for count in range(0, 4):
del match[0]
datas = []
for data in match:
data = float(data)
datas.append(data)
# print(len(datas))
print(filename + str(datas))
x = []
for count in range(0, len(datas)):
x.append(count * 50)
# print(len(x))
x[len(datas) - 1] -= 1
print(x)
# plt.title('BLEU-2')
plt.xlabel("epoch")
# plt.ylabel("BLEU2")
# plt.plot(x, datas, label=filename, color=cmap(color))
plt.plot(x, datas, label=filename, color=colors[color])
color += 1
# color += 16
plt.grid()
plt.legend()
# plt.show()
plt.show()
```
#### File: ceGan/analy/exp.py
```python
import matplotlib.pyplot as plt
import numpy as np
temper=100
N=3000
def get_exp_list(temper :int)->list:
return [temper ** (i / 3000) for i in range(3000)]
if __name__ == '__main__':
# x=[i for i in range(3000)]
exp=[100 ** (i / 3000) for i in range(3000)]
sigmoid=[(temper - 1) * 1 / (1 + np.exp((N / 2 - i) * 20 / N)) + 1 for i in range(3000)]
plt.figure(figsize=(25.6, 19.2))
# datas=[]
# datas.append(exp)
# datas.append(sigmoid)
# plt.plot([i for i in range(N)],datas)
plt.plot(sigmoid)
plt.plot(exp)
plt.show()
```
#### File: ceGan/run/run_cegan.py
```python
import sys
from subprocess import call
import argparse
import os
"""
this part of code was copied from rel-gan
"""
# Job id and gpu_id
# if len(sys.argv) > 2:
# job_id = int(sys.argv[1])
# gpu_id = str(sys.argv[2])
# print('job_id: {}, gpu_id: {}'.format(job_id, gpu_id))
# elif len(sys.argv) > 1:
# job_id = int(sys.argv[1])
# gpu_id = 0
# print('job_id: {}, missing gpu_id (use default {})'.format(job_id, gpu_id))
# else:
# job_id = 1
# gpu_id = 0
# print('Missing argument: job_id and gpu_id. Use default job_id: {}, gpu_id: {}'.format(job_id, gpu_id))
job_id = 1
gpu_id = 0
print('Missing argument: job_id and gpu_id. Use default job_id: {}, gpu_id: {}'.format(job_id, gpu_id))
# Executables
executable = 'python' # specify your own python interpreter path here
rootdir = '../'
scriptname = 'main.py'
# ===Program===
if_test = int(False)
run_model = 'cegan'
CUDA = int(True)
oracle_pretrain = int(True)
gen_pretrain = int(False)
dis_pretrain = int(False)
MLE_train_epoch = 30
ADV_train_epoch = 3000
tips = 'CeGAN experiments'
# ===Oracle or Real===
if_real_data = [int(False), int(True), int(True)]
dataset = ['oracle', 'image_coco', 'emnlp_news']
loss_type = 'ragan'
vocab_size = [5000, 0, 0]
temp_adpt = 'sqrt'
temperature = [1, 100, 100]
# ===Basic Param===
data_shuffle = int(False)
model_type = 'RMC'
gen_init = 'truncated_normal'
dis_init = 'uniform'
samples_num = 10000
# batch_size = 32
batch_size = 32
max_seq_len = 20
gen_lr = 0.01
gen_adv_lr = 1e-4
dis_lr = 1e-4
pre_log_step = 10
adv_log_step = 50
mu_temp = 'exp'
# ===Generator===
ADV_g_step = 1
gen_embed_dim = 32
gen_hidden_dim = 32
mem_slots = 1
num_heads = 2
head_size = 256
# ===Discriminator===
ADV_d_step = 5
dis_embed_dim = 64
dis_hidden_dim = 64
num_rep = 64
# ===Metrics===
use_nll_oracle = int(True)
use_nll_gen = int(True)
use_nll_div = int(True)
use_bleu = int(True)
use_self_bleu = int(True)
use_ppl = int(False)
def program_config(parser):
parser.add_argument('--mu_temp', default=mu_temp, type=str)
parser.add_argument('--temp', default=temperature[job_id], type=int)
parser.add_argument('--fn_mu_temp', default=mu_temp, type=str)
return parser
parser = argparse.ArgumentParser()
parser = program_config(parser)
opt = parser.parse_args()
args = [
# Program
'--if_test', if_test,
'--run_model', run_model,
'--cuda', CUDA,
# '--device', gpu_id, # comment for auto GPU
'--ora_pretrain', oracle_pretrain,
'--gen_pretrain', gen_pretrain,
'--dis_pretrain', dis_pretrain,
'--mle_epoch', MLE_train_epoch,
'--adv_epoch', ADV_train_epoch,
'--tips', tips,
# Oracle or Real
'--if_real_data', if_real_data[job_id],
'--dataset', dataset[job_id],
'--loss_type', loss_type,
'--vocab_size', vocab_size[job_id],
'--temp_adpt', temp_adpt,
# '--temperature', temperature[job_id],
'--temperature', opt.temp,
'--mu_temp', opt.mu_temp,
'--fn_mu_temp',opt.fn_mu_temp,
# Basic Param
'--shuffle', data_shuffle,
'--model_type', model_type,
'--gen_init', gen_init,
'--dis_init', dis_init,
'--samples_num', samples_num,
'--batch_size', batch_size,
'--max_seq_len', max_seq_len,
'--gen_lr', gen_lr,
'--gen_adv_lr', gen_adv_lr,
'--dis_lr', dis_lr,
'--pre_log_step', pre_log_step,
'--adv_log_step', adv_log_step,
# Generator
'--adv_g_step', ADV_g_step,
'--gen_embed_dim', gen_embed_dim,
'--gen_hidden_dim', gen_hidden_dim,
'--mem_slots', mem_slots,
'--num_heads', num_heads,
'--head_size', head_size,
# Discriminator
'--adv_d_step', ADV_d_step,
'--dis_embed_dim', dis_embed_dim,
'--dis_hidden_dim', dis_hidden_dim,
'--num_rep', num_rep,
# Metrics
'--use_nll_oracle', use_nll_oracle,
'--use_nll_gen', use_nll_gen,
'--use_nll_div', use_nll_div,
'--use_bleu', use_bleu,
'--use_self_bleu', use_self_bleu,
'--use_ppl', use_ppl,
]
args = list(map(str, args))
my_env = os.environ.copy()
call([executable, scriptname] + args, env=my_env, cwd=rootdir)
``` |
{
"source": "JianqingJiang/QoS-floodlight",
"score": 2
} |
#### File: apps/qos/qosmanager2.py
```python
import sys
import os # for file handling
import httplib # basic HTTP library for HTTPS connections
import urllib # used for url-encoding during login request
import simplejson # converts between JSON and python objects
import time # for dates in json
import argparse # more flexible argument parser for v2
def main():
if (len(sys.argv)) <= 1:
print "Type --help for help"
exit()
parser = argparse.ArgumentParser(description="Floodlight Quality of Service Manager")
parser.add_argument('-p','--port',
required=False,
default="8080",
type=str,
dest='p',
metavar="P")
parser.add_argument('-c','--controller',
required=False,
default="127.0.0.1",
dest="c",
type=str,
metavar="C")
parser.add_argument('-e','--enable',
required=False,
dest="qos_op",
action="store_const",
const="enable")
parser.add_argument('-d','--disable',
required=False,
dest="qos_op",
action="store_const",
const="disable")
parser.add_argument('-s','--status',
required=False,
dest="qos_op",
action="store_const",
const="status")
parser.add_argument('-A','--add',
required=False,
dest="action_op",
action="store_const",
const="add")
parser.add_argument('-D','--delete',
required=False,
dest="action_op",
action="store_const",
const="delete")
parser.add_argument('-M','--modify',
required=False,
dest="action_op",
action="store_const",
const="modify")
parser.add_argument('-L','--list',
required=False,
dest="action_op",
action="store_const",
const="list")
parser.add_argument('-t','--type',
required=False,
dest="type",
choices=["policy","service","policies","services"])
parser.add_argument('-O','--json',
required=False,
dest="obj")
args = parser.parse_args()
#Init arguments
c = args.c
p = args.p
obj = args.obj
type = args.type
qos_op = args.qos_op
action_op = args.action_op
#HTTP Helper
helper = httpHelper(__name="QoSHTTPHelper")
helper.connect(c,p)
#Enable / Disable
if qos_op == "enable":
enable(c,p)
exit()
elif qos_op == "disable":
disable(c,p)
exit()
elif qos_op == "status":
qosStatus(c,p)
exit()
#Listing
if action_op == "list":
if type != None:
if "service" in type:
listServices(c,p)
exit()
elif "polic" in type:
listPolicies(c,p)
exit()
else:
print "Unknown type: %s to list" % type
exit()
else:
print "Must include type of to list"
exit()
#Start Add / Modify / Delete
if action_op == "add":
if obj == None:
print "Must include json object"
exit(1)
else:
add(type, obj, c, p, helper)
exit()
if action_op == "delete":
if obj == None:
print "Error, Must include json"
exit(1)
else:
delete(type, obj, c, p, helper)
exit()
if action_op == "modify":
if obj == None:
print "Error, Must include json"
exit(1)
else:
modify(type, obj, c, p, helper)
exit()
else:
er = "Unrecognized commands"
print er
exit(1)
########
#TODO
########
def add(obj_type, json, controller, port, conn):
helper = conn
if obj_type == "service":
print "Trying to add service %s" % json
url = "http://%s:%s/wm/qos/service/json" % (controller,port)
#preserve immutable
_json = json
try:
req = helper.request("POST",url,_json)
print "[CONTROLLER]: %s" % req
r_j = simplejson.loads(req)
if r_j['status'] != "Please enable Quality of Service":
write_add("service",_json)
else:
print "[QoSPusher] please enable QoS on controller"
except Exception as e:
print e
print "Could Not Complete Request"
exit(1)
helper.close_connection()
elif obj_type == "policy":
print "Trying to add policy %s" % json
url = "http://%s:%s/wm/qos/policy/json" % (controller,port)
#preserve immutable
_json = json
try:
req = helper.request("POST",url,_json)
print "[CONTROLLER]: %s" % req
r_j = simplejson.loads(req)
if r_j['status'] != "Please enable Quality of Service":
write_add("policy",_json)
else:
print "[QoSPusher] please enable QoS on controller"
except Exception as e:
print e
print "Could Not Complete Request"
exit(1)
helper.close_connection()
else:
print "Error parsing command %s" % type
exit(1)
######
#TODO
######
def delete(obj_type, json, controller, port, conn):
helper = conn
if json == None:
print "Must include json object"
exit(1)
#preserve immutable
uid_o = json
if obj_type == "service":
print "Trying to delete service %s" % json
url = "http://%s:%s/wm/qos/service/json" % (controller,port)
try:
#Get all services on controller
name_req = helper.request("GET",url,None)
svs = simplejson.loads(name_req)
o = simplejson.loads(uid_o)
u_id_n = None
#Compare
for sv in svs:
if int(sv['sid']) == int(o['sid']):
u_id_n = sv['name']
break
if u_id_n != None:
req = helper.request("DELETE",url,uid_o)
print "[CONTROLLER]: %s" % req
r_j = simplejson.loads(req)
if r_j['status'] != "Please enable Quality of Service":
#remove service
write_remove("service", u_id_n )
else:
print "[QoSManager] please enable QoS on controller"
else:
print "Service not found"
except Exception as e:
print e
print "Could Not Complete Request"
exit(1)
helper.close_connection()
elif obj_type == "policy":
if json == None:
print "Must include json object"
exit(1)
print "Trying to delete policy %s" % json
url = "http://%s:%s/wm/qos/policy/json" % (controller,port)
try:
#Get all policies from controller
name_req = helper.request("GET",url,None)
pols = simplejson.loads(name_req)
o = simplejson.loads(uid_o)
u_id_n = None
#Compare
for pol in pols:
print "comparing %s : %s " % (pol['policyid'],o['policy-id'])
if int(pol['policyid']) == int(o['policy-id']):
u_id_n = pol['name']
break
if u_id_n != None:
req = helper.request("DELETE",url,uid_o)
print "[CONTROLLER]: %s" % req
r_j = simplejson.loads(req)
if r_j['status'] != "Please enable Quality of Service":
#remove policy on match
write_remove("policy", u_id_n )
else:
print "[QoSPusher] please enable QoS on controller"
else:
print "Policy not found"
except Exception as e:
print e
print "Could Not Complete Request"
exit(1)
helper.close_connection()
def modify(obj_type, json, controller, port, conn):
helper == conn
print "Modify Policy and Service, TODO"
#TODO (futures)
#WRITE JSON TO QOS_STATE JSON
# @OP = service / policy
# @JSON_O = json object to be added
#
# @author = <NAME>
def write_add(op,json_o=None):
conf = "qos-state.json"
pwd = os.getcwd()
try:
if os.path.exists("%s/%s" % (pwd,conf)):
qos_data = open(conf)
else:
print "Does not exists, creating %s in %s " % (conf,pwd)
qos_data = open(conf, 'w+')
qos_data.write('{"services":[],"policies":[]}');
qos_data.close()
qos_data = open(conf)
except ValueError as e:
print "Problem with qos-state file"
print e
exit(1)
#load and encode
data = simplejson.load(qos_data)
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(data)
jsond = simplejson.JSONDecoder().decode(sjson)
o_data = simplejson.loads(json_o)
o_data["datetime"] = time.asctime()
found = False
if op == "service":
for service in jsond['services']:
if service['name'] == o_data['name']:
found = True
break
if found:
print "[QoSPusher]: Service Already Exists"
else:
print "Writing service to qos-state.json"
jsond['services'].append(o_data)
elif op == "policy":
for policy in jsond['policies']:
#print "checking %s against %s" % (policy['name'] ,o_data['name'])
if policy['name'] == o_data['name']:
found = True
break
if found:
print "[QoSPusher]: Policy Already Exists"
else:
print "Writing policy to qos.state.json"
jsond['policies'].append(o_data)
#deserialize and write back
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(jsond)
qos_data.close()
newd = open(conf, 'w+')
#housekeeping
sjson = sjson.translate(None,'\\')
sjson = sjson.replace('"{', '{')
sjson = sjson.replace('}"', '}')
#incase of mis rep "<space>{|}
sjson = sjson.replace('" {', '{')
sjson = sjson.replace('} "', '}')
newd.write(sjson)
state = os.popen("echo '%s' | python -mjson.tool | more" % sjson).read()
#print state #debug
newd.close()
#DELETE JSON FILE FROM STATE JSON
# @OP = sevice / policy
# @U_ID = unique id of service of policy
#
# @author <NAME>
def write_remove(op,u_id):
conf = "qos-state.json"
pwd = <PASSWORD>()
try:
if os.path.exists("%s/%s" % (pwd,conf)):
print "Opening qos-state.json in %s" % pwd
qos_data = open(conf)
else:
print "%s/%s does not exist" %(pwd,conf)
except ValueError as e:
print "Problem with qos-state file"
print e
exit(1)
#load and encode
data = simplejson.load(qos_data)
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(data)
jsond = simplejson.JSONDecoder().decode(sjson)
if op == "service":
print "Deleting service from qos-state.json"
try:
found = False
for srv in range(len(jsond['services'])):
if u_id == jsond['services'][srv]['name']:
found = True
del jsond['services'][srv]
break;
if not found:
print "Could not find service to delete from %s" % conf
except ValueError as e:
"Could not delete service, does not exist"
elif op == "policy":
print "Deleting policy from qos.state.json"
try:
found = False
for pol in range(len(jsond['policies'])):
if u_id == jsond['policies'][pol]['name']:
found = True
del jsond['policies'][pol]
break;
if not found:
print "Could not find service to delete from %s" % conf
except ValueError as e:
"Could not delete policy, does not exist"
#deserialize and write back
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(jsond)
qos_data.close()
newd = open(conf, 'w+')
sjson = sjson.translate(None,'\\')
sjson = sjson.replace('"{', '{')
sjson = sjson.replace('}"', '}')
#incase of mis rep "<space>{|}
sjson = sjson.replace('" {', '{')
sjson = sjson.replace('} "', '}')
newd.write(sjson)
state = os.popen("echo '%s' | python -mjson.tool | more" % sjson).read()
#print state #debug
newd.close()
#ENABLE QoS ON CONTROLLER
def enable(ip,port):
helper = httpHelper(__name="QoSHTTPHelper")
helper.connect(ip,port)
print "Enabling QoS at %s:%s" % (ip,port)
url = "http://%s:%s/wm/qos/tool/enable/json" % (ip,port)
try:
req = helper.request("GET",url,None)
print "[CONTROLLER]: %s" % req
except Exception as e:
print e
print "Could Not Complete Request"
exit(1)
helper.close_connection()
#DISABLE QoS ON CONTROLLER
def disable(ip,port):
helper = httpHelper(__name="QoSHTTPHelper")
helper.connect(ip,port)
print "Disabling QoS at %s:%s" % (ip,port)
url = "http://%s:%s/wm/qos/tool/disable/json" % (ip,port)
try:
req = helper.request("GET",url,None)
print "[CONTROLLER]: %s" % req
except Exception as e:
print e
print "Could Not Complete Request"
exit(1)
helper.close_connection()
#LIST SERVICE FROM CONTROLLER
def listServices(ip,port):
helper = httpHelper(__name="QoSHTTPHelper")
helper.connect(ip,port)
print "QoS at %s:%s" % (ip,port)
url = "http://%s:%s/wm/qos/service/json" % (ip,port)
try:
req = helper.request("GET",url,None)
print "listing services..."
srvs = os.popen("echo '%s' | python -mjson.tool | more" % req).read()
print "[CONTROLLER]: %s" % srvs
except Exception as e:
print e
print "Could Not Complete Request"
exit(1)
helper.close_connection()
#LIST POLICIES FROM CONTROLLER
def listPolicies(ip,port):
helper = httpHelper(__name="QoSHTTPHelper")
helper.connect(ip,port)
print "QoS at %s:%s" % (ip,port)
url = "http://%s:%s/wm/qos/policy/json" % (ip,port)
try:
req = helper.request("GET",url,None)
print "listing policies"
pols = os.popen("echo '%s' | python -mjson.tool | more" % req).read()
print "[CONTROLLER]: %s" % pols
except Exception as e:
print e
print "Could Not Complete Request"
exit(1)
helper.close_connection()
#GET STATUS OF QoS ON CONTROLLER
def qosStatus(ip,port):
helper = httpHelper(__name="QoSHTTPHelper")
helper.connect(ip,port)
print "QoS at %s:%s" % (ip,port)
url = "http://%s:%s/wm/qos/tool/status/json" % (ip,port)
try:
req = helper.request("GET",url,None)
pols = os.popen("echo '%s' | python -mjson.tool | more" % req).read()
print "[CONTROLLER]: %s" % pols
except Exception as e:
print e
print "Could Not Complete Request"
exit(1)
helper.close_connection()
#HTTP HELPER CLASS
#
# Contains connection paramters and
# a REQUEST helper for sending and
# recieving JSON
#
# @author <NAME>
class httpHelper:
__name = "None"
httpcon = None
#initialize
def __init__(self, **kvargs):
self._attributes = kvargs
def set_attributes(self, key, value):
self.attributes[key] = value
return
def get_attributes(self, key):
return self._attributes.get(key,None)
def connect(self,ip,port):
try:
self.httpcon = httplib.HTTPConnection(ip,port)
self.httpcon.connect()
except httplib.HTTPException as e:
print "Could not connect to server: %s:%s" % (ip, port)
print e
exit(1)
except Exception as e:
print "Could not connect to server: %s:%s" % (ip, port)
print e
exit(1)
print "Connection Successful"
return self.httpcon
def close_connection(self):
try:
self.httpcon.close()
except httplib.HTTPException:
print "Could not close connection"
except Exception as e:
print "Could not close connection"
print e
print "Closed connection successfully"
def request(self, method, url, body, content_type="application/json"):
headers = { "Content-Type" : content_type }
self.httpcon.request(method, url,body, headers)
response = self.httpcon.getresponse()
s = response.status
ok = httplib.OK
acc = httplib.ACCEPTED
crtd = httplib.CREATED
ncontnt = httplib.NO_CONTENT
if s != ok and s != acc and s != crtd and s != ncontnt:
print "%s to %s got an unexpected response code: %d %s (content = '%s')" \
% (method, url, response.status, response.reason, response.read())
return response.read()
#Call main :)
if __name__ == "__main__" :
main()
``` |
{
"source": "jianqingxie/RSTNet",
"score": 2
} |
#### File: jianqingxie/RSTNet/feats_process.py
```python
import json
import os
import h5py
import argparse
import numpy
import torch
import torch.nn as nn
from h5py._hl import base
from tqdm import tqdm
class DataProcessor(nn.Module):
def __init__(self):
super(DataProcessor, self).__init__()
self.pool = nn.AdaptiveAvgPool2d((7, 7))
def forward(self, x):
x = self.pool(x)
x = torch.squeeze(x) # [1, d, h, w] => [d, h, w]
x = x.permute(1, 2, 0) # [d, h, w] => [h, w, d]
return x.view(-1, x.size(-1)) # [h*w, d]
def save_dataset(file_path, feat_paths):
print('save the ori grid features to the features with specified size')
processor = DataProcessor()
with h5py.File(file_path, 'w') as f:
for i in tqdm(range(len(feat_paths))):
feat_path = os.path.join('/mnt/data/X101-features', 'train2014', feat_paths[i])
if not os.path.exists(feat_path):
feat_path = os.path.join('/mnt/data/X101-features', 'val2014', feat_paths[i])
img_name = feat_path.split('/')[-1]
img_feat = torch.load(feat_path)
img_id = int(img_name.split('.')[0])
img_feat = processor(img_feat)
data = numpy.asarray(img_feat.numpy(), order="C", dtype=base.guess_dtype(img_feat.numpy()))
f.require_dataset('%d_grids' % img_id, shape=data.shape, dtype=data.dtype, data=img_feat.numpy())
f.close()
def get_feat_paths(dir_to_save_feats, data_split='trainval', test2014_info_path=None):
print('get the paths of grid features')
ans = []
if data_split == 'trainval':
ans = os.listdir(os.path.join(dir_to_save_feats, 'train2014')) + os.listdir(os.path.join(dir_to_save_feats, 'val2014'))
elif data_split == 'test':
assert test2014_info_path is not None
with open(test2014_info_path, 'r') as f:
test2014_info = json.load(f)
for image in test2014_info['images']:
img_id = image['id']
feat_path = os.path.join(dir_to_save_feats, 'test2015', img_id+'.pth')
assert os.path.exists(feat_path)
ans.append(feat_path)
assert len(ans) == 40775
assert ans # make sure ans list is not empty
return ans
def main(args):
feat_paths = get_feat_paths(args.dir_to_save_feats, args.data_split, args.test2014_info_path)
file_path = os.path.join(args.dir_to_save_feats, 'X101_grid_feats_coco_'+args.data_split+'.hdf5')
save_dataset(file_path, feat_paths)
print('finished!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='data process')
parser.add_argument('--dir_to_save_feats', type=str, default='/mnt/data/X101-features')
parser.add_argument('--data_split', type=str, default='trainval') # trainval, test
parser.add_argument('--test2014_info_path', type=str, default=None) # None, image_info_test2014.json
args = parser.parse_args()
main(args)
``` |
{
"source": "jianquan79/applied_ds",
"score": 3
} |
#### File: jianquan79/applied_ds/test_io_hw.py
```python
import pytest
import pandas as pd
import numpy as np
import os
import pdb
import io_hw
num = np.random.randint(100000000)
name = 'elephant'
def test_io_hw():
"""
This file grades the homework, io_hw.py
It will check the following:
- The function you wrote loads your dataset
- The function you wrote saves first 5 rows your dataset
"""
df, head_df = io_hw.io_hw('test.csv')
assert os.path.isfile('test.csv')
assert sum(1 for line in open('test.csv')) - 1 == len(head_df)
assert len(df.columns) == len(head_df.columns)
``` |
{
"source": "jianrenw/SOD-TGNN",
"score": 2
} |
#### File: SOD-TGNN/nuscenes_flicker/flicker.py
```python
import numpy as np
import os
import os.path as osp
import json
import argparse
from nuscenes import NuScenes
from .nuscenes_split import nuscenes_split
import copy
import multiprocessing
parser = argparse.ArgumentParser()
parser.add_argument('--det_dir', type=str)
parser.add_argument('--output_dir', type=str)
parser.add_argument('--root_path', type=str, default='/working_dir/nuscenes')
parser.add_argument('--split', type=str, default='unlabeled_train')
parser.add_argument('--frame', type=int, default=5)
parser.add_argument('--distance', type=int, default=10)
# parser.add_argument('--flag', type=str)
meta = {
"use_camera": False,
"use_lidar": True,
"use_radar": False,
"use_map": False,
"use_external": False
}
def flicker(results, all_sample_tokens, all_timestamps, idx, my_obj, frame,
distance):
# search over frame
# search over distance
best_association_score = []
start = max([0, idx - frame])
end = min(idx + frame + 1, len(all_sample_tokens))
for i in range(start, end):
min_dis = distance
local_best = 0
if i == idx:
continue
time_lag = all_timestamps[i] - all_timestamps[idx]
predict_position = time_lag / 1000000 * np.array(
my_obj['velocity'][:2]) + np.array(my_obj['translation'][:2])
if not all_sample_tokens[i] in results:
import pdb; pdb.set_trace()
objs = results[all_sample_tokens[i]]
for obj in objs:
if obj['detection_name'] != my_obj['detection_name']:
continue
cd = np.linalg.norm(
np.array(obj['translation'][:2]) - predict_position)
if cd < min_dis:
min_dis = cd
local_best = obj['detection_score']
best_association_score.append(local_best)
new_score = np.mean(np.array(best_association_score))
return new_score * my_obj['detection_score']
def format_sample_result(sample_result, new_score):
'''
Output:
sample_result {
"sample_token": <str> -- Foreign key. Identifies the sample/keyframe for which objects are detected.
"translation": <float> [3] -- Estimated bounding box location in meters in the global frame: center_x, center_y, center_z.
"size": <float> [3] -- Estimated bounding box size in meters: width, length, height.
"rotation": <float> [4] -- Estimated bounding box orientation as quaternion in the global frame: w, x, y, z.
"velocity": <float> [2] -- Estimated bounding box velocity in m/s in the global frame: vx, vy.
"detection_name": predicted class for this sample_result, e.g. car, pedestrian.
Note that the tracking_name cannot change throughout a track.
"detection_score": <float> -- Object prediction score between 0 and 1 for the class identified by tracking_name.
We average over frame level scores to compute the track level score.
The score is used to determine positive and negative tracks via thresholding.
"original_score": <float> -- Original detection score.
"attribute_name": str -- Attribute_name
}
'''
sample_result = copy.deepcopy(sample_result)
sample_result["original_score"] = sample_result["detection_score"]
sample_result["detection_score"] = new_score
return sample_result
def main(root_path, det_dir, output_dir, split, frame, distance):
nusc_trainval = NuScenes(version='v1.0-trainval',
dataroot=root_path,
verbose=True)
nusc_test = NuScenes(version='v1.0-test',
dataroot=root_path,
verbose=True)
my_split = nuscenes_split('without_test', nusc_trainval, nusc_test)
token_set = my_split.get_all_sample_tokens(split)
# with open(osp.join(args.det_dir, 'det.json')) as f:
# data = json.load(f)
# results = data['results']
with open(det_dir) as f:
data = json.load(f)
results = data['results']
new_results = {}
# def save_flicker(single_token_set):
# scene_name, token_time = single_token_set
# # for scene_name, token_time in token_set.items():
# sample_tokens, timestamps = token_time[:]
# for idx, sample_token in enumerate(sample_tokens):
# # import pdb; pdb.set_trace()
# objects = results[sample_token]
# for obj in objects:
# new_score = flicker(results, sample_tokens, timestamps, idx, obj,
# args.frame, args.distance)
# if sample_token in new_results:
# new_results[sample_token].append(
# format_sample_result(obj, new_score))
# else:
# new_results[sample_token] = [
# format_sample_result(obj, new_score)
# ]
# pool = multiprocessing.Pool()
# pool.map(save_flicker, token_set.items())
for scene_name, token_time in token_set.items():
sample_tokens, timestamps = token_time[:]
for idx, sample_token in enumerate(sample_tokens):
objects = results[sample_token]
for obj in objects:
new_score = flicker(results, sample_tokens, timestamps, idx, obj,
frame, distance)
if sample_token in new_results:
new_results[sample_token].append(
format_sample_result(obj, new_score))
else:
new_results[sample_token] = [
format_sample_result(obj, new_score)
]
# print(len(new_results))
if not osp.isdir(output_dir):
os.makedirs(output_dir, exist_ok=True)
output_data = {'meta': meta, 'results': new_results}
with open(osp.join(output_dir, 'det_{}.json'.format(split)), 'w') as outfile:
json.dump(output_data, outfile)
if __name__ == '__main__':
args = parser.parse_args()
main(args.root_path, args.det_dir, args.output_dir, args.split, args.frame, args.distance)
```
#### File: jianrenw/SOD-TGNN/preprocess.py
```python
import os
import argparse
import CenterPoint_v1.tools.detection as centerpoint_detector
import CenterPoint_v1.tools.detection_batch as centerpoint_detector_batch
from det3d.datasets.h3d import h3d as h3d_ds
from label_generation import main as label_generation
from isotonic_regression import main as isotonic_regression
from vis_bin_new import main as vis_bin
def create_folder(path):
if not os.path.exists(path):
os.makedirs(path)
def run_detection(pc_path, ckpt_path, config, save_path, split='inference'):
# centerpoint_detector.main(root_path=data_path, save_path=save_path, ckpt_path=ckpt_path)
if split == 'val':
centerpoint_detector_batch.main(
ckpt_path=ckpt_path, config=config, save_path=save_path, split=split)
else:
if os.path.isfile('./h3d_all_infos_inference.pkl'):
os.remove('./h3d_all_infos_inference.pkl')
h3d_ds.create_h3d_inference_infos(pc_path, save_path='./')
centerpoint_detector_batch.main(
ckpt_path=ckpt_path, config=config, save_path=save_path)
def run_flicker(detection_path, save_path):
try:
command = (' ').join(
('./flicker.sh', detection_path, detection_path, save_path))
# print(command)
os.system(command)
except ValueError:
print('run flicker command wrong')
def run_isotonic_regression(gt_data_path, detection_path, isotonic_train_path):
label_generation(gt_data_path, isotonic_train_path + 'inference/', isotonic_train_path + 'label_generation/')
isotonic_regression(isotonic_train_path + 'label_generation/', isotonic_train_path)
vis_bin(isotonic_train_path, detection_path, detection_path)
def hdd_data_prep(pc_path, label_path, save_path, gt_data_path=None):
h3d_ds.create_h3d_infos(pc_path, label_path=label_path,
save_path=save_path, gt_data_path=gt_data_path, calib=False)
# h3d_ds.create_reduced_point_cloud(data_path=data_path, save_path=data_path+"_reduced")
# h3d_ds.create_groundtruth_database(save_path)
# create_groundtruth_database("H3dDataset", data_path, Path(data_path) / "h3d_car_infos_train.pkl")
def h3d_test_prep(data_path, save_path=None):
h3d_ds.create_h3d_test_infos(data_path, save_path=save_path, calib=False)
# h3d_ds.create_reduced_point_cloud(data_path=data_path, save_path=data_path+"_reduced")
# h3d_ds.create_groundtruth_database(save_path)
# create_groundtruth_database("H3dDataset", data_path, Path(data_path) / "h3d_car_infos_train.pkl")
def parse_args():
parser = argparse.ArgumentParser(description="Train a detector")
parser.add_argument("--root_path", required=True, help="root data path")
parser.add_argument("--config", required=True,
help="train config file path")
parser.add_argument("--round", type=int, required=True,
help="current round")
# parser.add_argument("--work_dir", help="the dir to save logs and models")
parser.add_argument(
"--h3d_path", default='/working_dir/h3d_data/icra_bin/', help="h3d data path for test")
parser.add_argument("--resume", type=bool, default=False,
help="the checkpoint file to resume from")
parser.add_argument('--work_dir', type=str,
default='./CenterPoint_v1/work_dirs/round_')
args = parser.parse_args()
return args
def main():
args = parse_args()
root_path = args.root_path
h3d_path = args.h3d_path
count = args.round
data_path = root_path + '/HDD/'
labels_path = root_path + '/labels/'
if not os.path.isfile(h3d_path + 'h3d_all_infos_test.pkl'):
h3d_test_prep(h3d_path)
try:
current_model_save = os.path.join(
args.work_dir + str(count), args.config.split('/')[-1].split('.')[0], 'latest.pth')
# save_path = './data_02/labels/round_' + str(count) + '/'
save_path = labels_path + '/current_round/'
save_path_val = save_path + 'isotonic_regression_train/'
if not os.path.isfile(save_path + 'h3d_all_dbinfos_train.pkl'):
create_folder(save_path)
create_folder(save_path_val + 'inference/')
run_detection(data_path, current_model_save,args.config, save_path)
run_flicker(save_path, save_path)
run_detection(data_path, current_model_save, args.config, save_path_val + 'inference/', split='val')
run_flicker(save_path_val + 'inference/', save_path_val + 'inference/')
run_isotonic_regression(h3d_path, save_path, save_path_val)
hdd_data_prep(data_path, save_path, save_path, h3d_path)
except ValueError:
print('error!')
if __name__ == '__main__':
main()
```
#### File: SOD-TGNN/tools/create_data.py
```python
import copy
from pathlib import Path
import pickle
import fire, os
from det3d.datasets.nuscenes import nusc_common as nu_ds
from det3d.datasets.utils.create_gt_database import create_groundtruth_database
from det3d.datasets.waymo import waymo_common as waymo_ds
def nuscenes_data_prep(root_path, version, nsweeps=10, filter_zero=True, virtual=False):
nu_ds.create_nuscenes_infos(root_path, version=version, nsweeps=nsweeps, filter_zero=filter_zero)
if version == 'v1.0-trainval':
create_groundtruth_database(
"NUSC",
root_path,
Path(root_path) / "infos_train_{:02d}sweeps_withvelo_filter_{}.pkl".format(nsweeps, filter_zero),
nsweeps=nsweeps,
virtual=virtual
)
def waymo_data_prep(root_path, split, nsweeps=1):
waymo_ds.create_waymo_infos(root_path, split=split, nsweeps=nsweeps)
if split == 'train':
create_groundtruth_database(
"WAYMO",
root_path,
Path(root_path) / "infos_train_{:02d}sweeps_filter_zero_gt.pkl".format(nsweeps),
used_classes=['VEHICLE', 'CYCLIST', 'PEDESTRIAN'],
nsweeps=nsweeps
)
if __name__ == "__main__":
fire.Fire()
``` |
{
"source": "Jianrong-Lu/Head-and-Neck-Tumour-Segmentation-and-Prediction-of-Patient-Survival",
"score": 2
} |
#### File: src/Segmentation_Task/layers.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class ConvBlock3d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(ConvBlock3d, self).__init__()
self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding)
self.batch_norm = nn.BatchNorm3d(num_features=out_channels)
def forward(self, x):
x = self.conv3d(x)
x = self.batch_norm(x)
x = F.relu(x, inplace=True)
return x
class FastSmoothSENorm(nn.Module):
class SEWeights(nn.Module):
def __init__(self, in_channels, reduction=2):
super().__init__()
self.conv1 = nn.Conv3d(in_channels, in_channels // reduction, kernel_size=1, stride=1, padding=0, bias=True)
self.conv2 = nn.Conv3d(in_channels // reduction, in_channels, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, x):
b, c, d, h, w = x.size()
out = torch.mean(x.view(b, c, -1), dim=-1).view(b, c, 1, 1, 1) # output_shape: in_channels x (1, 1, 1)
out = F.relu(self.conv1(out))
out = self.conv2(out)
return out
def __init__(self, in_channels, reduction=2):
super(FastSmoothSENorm, self).__init__()
self.norm = nn.InstanceNorm3d(in_channels, affine=False)
self.gamma = self.SEWeights(in_channels, reduction)
self.beta = self.SEWeights(in_channels, reduction)
def forward(self, x):
gamma = torch.sigmoid(self.gamma(x))
beta = torch.tanh(self.beta(x))
x = self.norm(x)
return gamma * x + beta
class FastSmoothSeNormConv3d(nn.Module):
"""
3D normalised squeeze-excitation convolutional blocks described in:
<NAME>., <NAME>., <NAME>. (2021) Squeeze-and-Excitation Normalization for Automated Delineation of Head and Neck Primary Tumors in Combined PET and CT Images.
In: <NAME>., <NAME>., <NAME>. (eds) Head and Neck Tumor Segmentation. HECKTOR 2020. Lecture Notes in Computer Science, vol 12603. Springer, Cham.
https://doi.org/10.1007/978-3-030-67194-5_4
"""
def __init__(self, in_channels, out_channels, reduction=2, **kwargs):
super(FastSmoothSeNormConv3d, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, bias=True, **kwargs)
self.norm = FastSmoothSENorm(out_channels, reduction)
def forward(self, x):
x = self.conv(x)
x = F.relu(x, inplace=True)
x = self.norm(x)
return x
class RESseNormConv3d(nn.Module):
"""
3D normalised squeeze-excitation residual blocks described in:
<NAME>., <NAME>., <NAME>. (2021) Squeeze-and-Excitation Normalization for Automated Delineation of Head and Neck Primary Tumors in Combined PET and CT Images.
In: <NAME>., <NAME>., <NAME>. (eds) Head and Neck Tumor Segmentation. HECKTOR 2020. Lecture Notes in Computer Science, vol 12603. Springer, Cham.
https://doi.org/10.1007/978-3-030-67194-5_4
"""
def __init__(self, in_channels, out_channels, reduction=2, **kwargs):
super().__init__()
self.conv1 = FastSmoothSeNormConv3d(in_channels, out_channels, reduction, **kwargs)
if in_channels != out_channels:
self.res_conv = FastSmoothSeNormConv3d(in_channels, out_channels, reduction, kernel_size=1, stride=1, padding=0)
else:
self.res_conv = None
def forward(self, x):
residual = self.res_conv(x) if self.res_conv else x
x = self.conv1(x)
x += residual
return x
class ASPP(nn.Module):
"""
ASPP module adapted from https://github.com/lvpeiqing/SAR-U-Net-liver-segmentation/blob/master/models/se_p_resunet/se_p_resunet.py#L28
"""
def __init__(self, in_dims, out_dims, rate=[6, 12, 18]):
super(ASPP, self).__init__()
self.pool = nn.MaxPool3d(2)
self.aspp_block1 = nn.Sequential(
nn.Conv3d(
in_dims, out_dims, 3, stride=1, padding=rate[0], dilation=rate[0]
),
nn.ReLU(inplace=True),
nn.BatchNorm3d(out_dims),
)
self.aspp_block2 = nn.Sequential(
nn.Conv3d(
in_dims, out_dims, 3, stride=1, padding=rate[1], dilation=rate[1]
),
nn.ReLU(inplace=True),
nn.BatchNorm3d(out_dims),
)
self.aspp_block3 = nn.Sequential(
nn.Conv3d(
in_dims, out_dims, 3, stride=1, padding=rate[2], dilation=rate[2]
),
nn.ReLU(inplace=True),
nn.BatchNorm3d(out_dims),
)
self.output = nn.Conv3d(len(rate) * out_dims, out_dims, 1)
self._init_weights()
def forward(self, x):
x = self.pool(x)
x1 = self.aspp_block1(x)
x2 = self.aspp_block2(x)
x3 = self.aspp_block3(x)
out = torch.cat([x1, x2, x3], dim=1)
return self.output(out)
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class ChannelSELayer3D(nn.Module):
"""
3D extension of Squeeze-and-Excitation (SE) block described in:
*Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507*
*Zhu et al., AnatomyNet, arXiv:arXiv:1808.05238*
"""
def __init__(self, num_channels, reduction_ratio=2):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSELayer3D, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool3d(1)
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input_tensor):
"""
:param input_tensor: X, shape = (batch_size, num_channels, D, H, W)
:return: output tensor
"""
batch_size, num_channels, D, H, W = input_tensor.size()
# Average along each channel
squeeze_tensor = self.avg_pool(input_tensor)
# channel excitation
fc_out_1 = self.relu(self.fc1(squeeze_tensor.view(batch_size, num_channels)))
fc_out_2 = self.sigmoid(self.fc2(fc_out_1))
output_tensor = torch.mul(input_tensor, fc_out_2.view(batch_size, num_channels, 1, 1, 1))
return output_tensor
class SpatialSELayer3D(nn.Module):
"""
3D extension of SE block squeezing spatially and exciting channel-wise defined in:
Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, MICCAI 2018*
"""
def __init__(self, num_channels):
"""
:param num_channels: No of input channels
"""
super(SpatialSELayer3D, self).__init__()
self.conv = nn.Conv3d(num_channels, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_tensor, weights=None):
"""
:param weights: weights for few shot learning
:param input_tensor: X, shape = (batch_size, num_channels, D, H, W)
:return: output_tensor
"""
# channel squeeze
batch_size, channel, D, H, W = input_tensor.size()
if weights:
weights = weights.view(1, channel, 1, 1)
out = F.conv3d(input_tensor, weights)
else:
out = self.conv(input_tensor)
squeeze_tensor = self.sigmoid(out)
# spatial excitation
output_tensor = torch.mul(input_tensor, squeeze_tensor.view(batch_size, 1, D, H, W))
return output_tensor
class ChannelSpatialSELayer3D(nn.Module):
"""
3D extension of concurrent spatial and channel squeeze & excitation:
*Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, arXiv:1803.02579*
"""
def __init__(self, num_channels, reduction_ratio=2):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSpatialSELayer3D, self).__init__()
self.cSE = ChannelSELayer3D(num_channels, reduction_ratio)
self.sSE = SpatialSELayer3D(num_channels)
def forward(self, input_tensor):
"""
:param input_tensor: X, shape = (batch_size, num_channels, D, H, W)
:return: output_tensor
"""
output_tensor = torch.max(self.cSE(input_tensor), self.sSE(input_tensor))
return output_tensor
class up_conv(nn.Module):
def __init__(self, in_channels, out_channels, reduction=2, scale=2):
super().__init__()
self.scale = scale
self.conv = ChannelSpatialSELayer3D(in_channels, out_channels, reduction, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x = self.conv(x)
x = F.interpolate(x, scale_factor=self.scale, mode='trilinear', align_corners=False)
return x
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_normal_(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal_(m.weight.data, gain=1)
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
```
#### File: src/Segmentation_Task/model.py
```python
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from layers import ConvBlock3d, FastSmoothSeNormConv3d, RESseNormConv3d , ChannelSpatialSELayer3D, init_weights, unetConv3_x, unetConv3_k
class NormResSEUNet_3Plus(nn.Module):
'''
Inspired from:
-UNet 3+ architecture with full scale inter and intra-skip connections with ground-truth supervision defined in:
Hu<NAME>. et al.(2020). UNet 3+: A Full-Scale Connected UNet for Medical Image Segmentation. 1055-1059. 10.1109/ICASSP40776.2020.9053405.
https://arxiv.org/abs/2004.08790
- 3D normalised squeeze and excitation blocks defined in:
<NAME>., <NAME>., <NAME>. (2021) Squeeze-and-Excitation Normalization for Automated Delineation of Head and Neck Primary Tumors in Combined PET and CT Images.
In: <NAME>., <NAME>., <NAME>. (eds) Head and Neck Tumor Segmentation. HECKTOR 2020. Lecture Notes in Computer Science, vol 12603. Springer, Cham.
https://doi.org/10.1007/978-3-030-67194-5_4
'''
def __init__(self, in_channels=2, n_classes=1, feature_scale=4, reduction=2, is_deconv=True, is_batchnorm=True):
super(NormResSEUNet_3Plus, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
filters = [24,48,96,192,384]
## -------------Encoder--------------
self.block_1_1_left = RESseNormConv3d(self.in_channels, filters[0], reduction, kernel_size=7, stride=1, padding=3)
self.block_1_2_left = RESseNormConv3d(filters[0], filters[0], reduction, kernel_size=3, stride=1, padding=1)
self.pool_1 = nn.MaxPool3d(kernel_size=2, stride=2)
self.block_2_1_left = RESseNormConv3d(filters[0], filters[1], reduction, kernel_size=3, stride=1, padding=1)
self.block_2_2_left = RESseNormConv3d(filters[1], filters[1], reduction, kernel_size=3, stride=1, padding=1)
self.block_2_3_left = RESseNormConv3d(filters[1], filters[1], reduction, kernel_size=3, stride=1, padding=1)
self.pool_2 = nn.MaxPool3d(kernel_size=2, stride=2)
self.block_3_1_left = RESseNormConv3d(filters[1], filters[2], reduction, kernel_size=3, stride=1, padding=1)
self.block_3_2_left = RESseNormConv3d(filters[2], filters[2], reduction, kernel_size=3, stride=1, padding=1)
self.block_3_3_left = RESseNormConv3d(filters[2], filters[2], reduction, kernel_size=3, stride=1, padding=1)
self.pool_3 = nn.MaxPool3d(kernel_size=2, stride=2)
self.block_4_1_left = RESseNormConv3d(filters[2], filters[3], reduction, kernel_size=3, stride=1, padding=1)
self.block_4_2_left = RESseNormConv3d(filters[3], filters[3], reduction, kernel_size=3, stride=1, padding=1)
self.block_4_3_left = RESseNormConv3d(filters[3], filters[3], reduction, kernel_size=3, stride=1, padding=1)
self.pool_4 = nn.MaxPool3d(kernel_size=2, stride=2)
self.block_5_1_left = RESseNormConv3d(filters[3], filters[4], reduction, kernel_size=3, stride=1, padding=1)
self.block_5_2_left = RESseNormConv3d(filters[4], filters[4], reduction, kernel_size=3, stride=1, padding=1)
self.block_5_3_left = RESseNormConv3d(filters[4], filters[4], reduction, kernel_size=3, stride=1, padding=1)
## -------------Decoder--------------
self.CatChannels = filters[0]
self.CatBlocks = 6
self.UpChannels = self.CatChannels * self.CatBlocks
'''stage 4d'''
self.h1_PT_hd4 = nn.MaxPool3d(8, 8, ceil_mode=True)
self.h1_PT_hd4_conv = FastSmoothSeNormConv3d(filters[0], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.h2_PT_hd4 = nn.MaxPool3d(4, 4, ceil_mode=True)
self.h2_PT_hd4_conv = FastSmoothSeNormConv3d(filters[1], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.h3_PT_hd4 = nn.MaxPool3d(2, 2, ceil_mode=True)
self.h3_PT_hd4_conv = FastSmoothSeNormConv3d(filters[2], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.h4_Cat_hd4_conv = FastSmoothSeNormConv3d(filters[3], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.hd5_UT_hd4 = nn.Upsample(scale_factor=2, mode='trilinear') # 14*14
self.hd5_UT_hd4_conv = FastSmoothSeNormConv3d(filters[4], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.conv4d_1 = FastSmoothSeNormConv3d(self.UpChannels, self.UpChannels, reduction, kernel_size=3, stride=1, padding=1)
'''stage 3d'''
self.h1_PT_hd3 = nn.MaxPool3d(4, 4, ceil_mode=True)
self.h1_PT_hd3_conv = FastSmoothSeNormConv3d(filters[0], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.h2_PT_hd3 = nn.MaxPool3d(2, 2, ceil_mode=True)
self.h2_PT_hd3_conv = FastSmoothSeNormConv3d(filters[1], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.h3_Cat_hd3_conv = FastSmoothSeNormConv3d(filters[2], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.hd4_UT_hd3 = nn.Upsample(scale_factor=2, mode='trilinear')
self.hd4_UT_hd3_conv = FastSmoothSeNormConv3d(self.UpChannels, self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.hd5_UT_hd3 = nn.Upsample(scale_factor=4, mode='trilinear')
self.hd5_UT_hd3_conv = FastSmoothSeNormConv3d(filters[4], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.conv3d_1 = FastSmoothSeNormConv3d(self.UpChannels, self.UpChannels, reduction, kernel_size=3, stride=1, padding=1)
'''stage 2d '''
self.h1_PT_hd2 = nn.MaxPool3d(2, 2, ceil_mode=True)
self.h1_PT_hd2_conv = FastSmoothSeNormConv3d(filters[0], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.h2_Cat_hd2_conv = FastSmoothSeNormConv3d(filters[1], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.hd3_UT_hd2 = nn.Upsample(scale_factor=2, mode='trilinear')
self.hd3_UT_hd2_conv = FastSmoothSeNormConv3d(self.UpChannels, self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.hd4_UT_hd2 = nn.Upsample(scale_factor=4, mode='trilinear')
self.hd4_UT_hd2_conv = FastSmoothSeNormConv3d(self.UpChannels, self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.hd5_UT_hd2 = nn.Upsample(scale_factor=8, mode='trilinear')
self.hd5_UT_hd2_conv = FastSmoothSeNormConv3d(filters[4], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.conv2d_1 = FastSmoothSeNormConv3d(self.UpChannels, self.UpChannels, reduction, kernel_size=3, stride=1, padding=1)
'''stage 1d'''
self.h1_Cat_hd1_conv = FastSmoothSeNormConv3d(filters[0], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.hd2_UT_hd1 = nn.Upsample(scale_factor=2, mode='trilinear')
self.hd2_UT_hd1_conv = FastSmoothSeNormConv3d(self.UpChannels, self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.hd3_UT_hd1 = nn.Upsample(scale_factor=4, mode='trilinear')
self.hd3_UT_hd1_conv = FastSmoothSeNormConv3d(self.UpChannels, self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.hd4_UT_hd1 = nn.Upsample(scale_factor=8, mode='trilinear')
self.hd4_UT_hd1_conv = FastSmoothSeNormConv3d(self.UpChannels, self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.hd5_UT_hd1 = nn.Upsample(scale_factor=16, mode='trilinear')
self.hd5_UT_hd1_conv = FastSmoothSeNormConv3d(filters[4], self.CatChannels, reduction, kernel_size=3, stride=1, padding=1)
self.conv1d_1 = FastSmoothSeNormConv3d(self.UpChannels, self.UpChannels, reduction, kernel_size=3, stride=1, padding=1)
# -------------Trilinear Upsampling--------------
self.upscore6 = nn.Upsample(scale_factor=32,mode='trilinear')
self.upscore5 = nn.Upsample(scale_factor=16,mode='trilinear')
self.upscore4 = nn.Upsample(scale_factor=8,mode='trilinear')
self.upscore3 = nn.Upsample(scale_factor=4,mode='trilinear')
self.upscore2 = nn.Upsample(scale_factor=2, mode='trilinear')
# DeepSup
self.outconv1 = nn.Conv3d(self.UpChannels, n_classes, 3, padding=1)
self.outconv2 = nn.Conv3d(self.UpChannels, n_classes, 3, padding=1)
self.outconv3 = nn.Conv3d(self.UpChannels, n_classes, 3, padding=1)
self.outconv4 = nn.Conv3d(self.UpChannels, n_classes, 3, padding=1)
self.outconv5 = nn.Conv3d(filters[4], n_classes, 3, padding=1)
self.conv1x1 = nn.Conv3d(self.UpChannels, n_classes, kernel_size=1, stride=1, padding=0)
def forward(self, x):
## -------------Encoder-------------
h1 = self.block_1_2_left(self.block_1_1_left(x))
h2 = self.pool_1(h1)
h2 = self.block_2_3_left(self.block_2_2_left(self.block_2_1_left(h2)))
h3 = self.pool_2(h2)
h3 = self.block_3_3_left(self.block_3_2_left(self.block_3_1_left(h3)))
h4 = self.pool_3(h3)
h4 = self.block_4_3_left(self.block_4_2_left(self.block_4_1_left(h4)))
h5 = self.pool_4(h4)
hd5 = self.block_5_3_left(self.block_5_2_left(self.block_5_1_left(h5)))
## -------------Decoder-------------
h1_PT_hd4 = self.h1_PT_hd4_conv(self.h1_PT_hd4(h1))
h2_PT_hd4 = self.h2_PT_hd4_conv(self.h2_PT_hd4(h2))
h3_PT_hd4 = self.h3_PT_hd4_conv(self.h3_PT_hd4(h3))
h4_Cat_hd4 = self.h4_Cat_hd4_conv(h4)
hd5_UT_hd4 = self.hd5_UT_hd4_conv(self.hd5_UT_hd4(hd5))
hd4 = self.conv4d_1(torch.cat((h1_PT_hd4, h2_PT_hd4, h3_PT_hd4, h4_Cat_hd4, hd5_UT_hd4), 1))
h1_PT_hd3 = self.h1_PT_hd3_conv(self.h1_PT_hd3(h1))
h2_PT_hd3 = self.h2_PT_hd3_conv(self.h2_PT_hd3(h2))
h3_Cat_hd3 = self.h3_Cat_hd3_conv(h3)
hd4_UT_hd3 = self.hd4_UT_hd3_conv(self.hd4_UT_hd3(hd4))
hd5_UT_hd3 = self.hd5_UT_hd3_conv(self.hd5_UT_hd3(hd5))
hd3 = self.conv3d_1(torch.cat((h1_PT_hd3, h2_PT_hd3, h3_Cat_hd3, hd4_UT_hd3, hd5_UT_hd3), 1))
h1_PT_hd2 = self.h1_PT_hd2_conv(self.h1_PT_hd2(h1))
h2_Cat_hd2 = self.h2_Cat_hd2_conv(h2)
hd3_UT_hd2 = self.hd3_UT_hd2_conv(self.hd3_UT_hd2(hd3))
hd4_UT_hd2 = self.hd4_UT_hd2_conv(self.hd4_UT_hd2(hd4))
hd5_UT_hd2 = self.hd5_UT_hd2_conv(self.hd5_UT_hd2(hd5))
hd2 = self.conv2d_1(torch.cat((h1_PT_hd2, h2_Cat_hd2, hd3_UT_hd2, hd4_UT_hd2, hd5_UT_hd2), 1))
h1_Cat_hd1 = self.h1_Cat_hd1_conv(h1)
hd2_UT_hd1 = self.hd2_UT_hd1_conv(self.hd2_UT_hd1(hd2))
hd3_UT_hd1 = self.hd3_UT_hd1_conv(self.hd3_UT_hd1(hd3))
hd4_UT_hd1 = self.hd4_UT_hd1_conv(self.hd4_UT_hd1(hd4))
hd5_UT_hd1 = self.hd5_UT_hd1_conv(self.hd5_UT_hd1(hd5))
hd1 = self.conv1d_1(torch.cat((h1_Cat_hd1, hd2_UT_hd1, hd3_UT_hd1, hd4_UT_hd1, hd5_UT_hd1), 1))
d5 = self.outconv5(hd5)
d5 = self.upscore5(d5)
d4 = self.outconv4(hd4)
d4 = self.upscore4(d4)
d3 = self.outconv3(hd3)
d3 = self.upscore3(d3)
d2 = self.outconv2(hd2)
d2 = self.upscore2(d2)
d1 = self.conv1x1(hd1)
return F.sigmoid(d1)
``` |
{
"source": "Jianrong-Lu/MONAI",
"score": 2
} |
#### File: monai/bundle/config_parser.py
```python
import importlib
import json
import re
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Sequence, Tuple, Union
from monai.bundle.config_item import ComponentLocator, ConfigComponent, ConfigExpression, ConfigItem
from monai.bundle.reference_resolver import ReferenceResolver
from monai.bundle.utils import ID_SEP_KEY, MACRO_KEY
from monai.config import PathLike
from monai.utils import ensure_tuple, look_up_option, optional_import
yaml, _ = optional_import("yaml")
__all__ = ["ConfigParser"]
class ConfigParser:
"""
The primary configuration parser. It traverses a structured config (in the form of nested Python dict or list),
creates ``ConfigItem``, and assign unique IDs according to the structures.
This class provides convenient access to the set of ``ConfigItem`` of the config by ID.
A typical workflow of config parsing is as follows:
- Initialize ``ConfigParser`` with the ``config`` source.
- Call ``get_parsed_content()`` to get expected component with `id`.
.. code-block:: python
from monai.bundle import ConfigParser
config = {
"my_dims": 2,
"dims_1": "$@my_dims + 1",
"my_xform": {"_target_": "LoadImage"},
"my_net": {"_target_": "BasicUNet", "spatial_dims": "@dims_1", "in_channels": 1, "out_channels": 4},
"trainer": {"_target_": "SupervisedTrainer", "network": "@my_net", "preprocessing": "@my_xform"}
}
# in the example $@my_dims + 1 is an expression, which adds 1 to the value of @my_dims
parser = ConfigParser(config)
# get/set configuration content, the set method should happen before calling parse()
print(parser["my_net"]["in_channels"]) # original input channels 1
parser["my_net"]["in_channels"] = 4 # change input channels to 4
print(parser["my_net"]["in_channels"])
# instantiate the network component
parser.parse(True)
net = parser.get_parsed_content("my_net", instantiate=True)
print(net)
# also support to get the configuration content of parsed `ConfigItem`
trainer = parser.get_parsed_content("trainer", instantiate=False)
print(trainer)
Args:
config: input config source to parse.
excludes: when importing modules to instantiate components,
excluding components from modules specified in ``excludes``.
globals: pre-import packages as global variables to ``ConfigExpression``,
so that expressions, for example, ``"$monai.data.list_data_collate"`` can use ``monai`` modules.
The current supported globals and alias names are
``{"monai": "monai", "torch": "torch", "np": "numpy", "numpy": "numpy"}``.
These are MONAI's minimal dependencies.
See also:
- :py:class:`monai.bundle.ConfigItem`
- :py:class:`monai.bundle.scripts.run`
"""
suffixes = ("json", "yaml", "yml")
suffix_match = rf".*\.({'|'.join(suffixes)})"
path_match = rf"({suffix_match}$)"
meta_key = "_meta_" # field key to save metadata
def __init__(
self,
config: Any = None,
excludes: Optional[Union[Sequence[str], str]] = None,
globals: Optional[Dict[str, Any]] = None,
):
self.config = None
self.globals: Dict[str, Any] = {}
globals = {"monai": "monai", "torch": "torch", "np": "numpy", "numpy": "numpy"} if globals is None else globals
if globals is not None:
for k, v in globals.items():
self.globals[k] = importlib.import_module(v) if isinstance(v, str) else v
self.locator = ComponentLocator(excludes=excludes)
self.ref_resolver = ReferenceResolver()
if config is None:
config = {self.meta_key: {}}
self.set(config=config)
def __repr__(self):
return f"{self.config}"
def __getitem__(self, id: Union[str, int]):
"""
Get the config by id.
Args:
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
"""
if id == "":
return self.config
config = self.config
for k in str(id).split(self.ref_resolver.sep):
if not isinstance(config, (dict, list)):
raise ValueError(f"config must be dict or list for key `{k}`, but got {type(config)}: {config}.")
indexing = k if isinstance(config, dict) else int(k)
config = config[indexing]
return config
def __setitem__(self, id: Union[str, int], config: Any):
"""
Set config by ``id``. Note that this method should be used before ``parse()`` or ``get_parsed_content()``
to ensure the updates are included in the parsed content.
Args:
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
config: config to set at location ``id``.
"""
if id == "":
self.config = config
self.ref_resolver.reset()
return
keys = str(id).split(self.ref_resolver.sep)
# get the last parent level config item and replace it
last_id = self.ref_resolver.sep.join(keys[:-1])
conf_ = self[last_id]
indexing = keys[-1] if isinstance(conf_, dict) else int(keys[-1])
conf_[indexing] = config
self.ref_resolver.reset()
return
def get(self, id: str = "", default: Optional[Any] = None):
"""
Get the config by id.
Args:
id: id to specify the expected position. See also :py:meth:`__getitem__`.
default: default value to return if the specified ``id`` is invalid.
"""
try:
return self[id]
except KeyError:
return default
def set(self, config: Any, id: str = ""):
"""
Set config by ``id``. See also :py:meth:`__setitem__`.
"""
self[id] = config
def parse(self, reset: bool = True):
"""
Recursively resolve `self.config` to replace the macro tokens with target content.
Then recursively parse the config source, add every item as ``ConfigItem`` to the reference resolver.
Args:
reset: whether to reset the ``reference_resolver`` before parsing. Defaults to `True`.
"""
if reset:
self.ref_resolver.reset()
self.resolve_macro()
self._do_parse(config=self.get())
def get_parsed_content(self, id: str = "", **kwargs):
"""
Get the parsed result of ``ConfigItem`` with the specified ``id``.
- If the item is ``ConfigComponent`` and ``instantiate=True``, the result is the instance.
- If the item is ``ConfigExpression`` and ``eval_expr=True``, the result is the evaluated output.
- Else, the result is the configuration content of `ConfigItem`.
Args:
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
kwargs: additional keyword arguments to be passed to ``_resolve_one_item``.
Currently support ``reset`` (for parse), ``instantiate`` and ``eval_expr``. All defaulting to True.
"""
if not self.ref_resolver.is_resolved():
# not parsed the config source yet, parse it
self.parse(kwargs.get("reset", True))
return self.ref_resolver.get_resolved_content(id=id, **kwargs)
def read_meta(self, f: Union[PathLike, Sequence[PathLike], Dict], **kwargs):
"""
Read the metadata from specified JSON or YAML file.
The metadata as a dictionary will be stored at ``self.config["_meta_"]``.
Args:
f: filepath of the metadata file, the content must be a dictionary,
if providing a list of files, wil merge the content of them.
if providing a dictionary directly, use it as metadata.
kwargs: other arguments for ``json.load`` or ``yaml.safe_load``, depends on the file format.
"""
self.set(self.load_config_files(f, **kwargs), self.meta_key)
def read_config(self, f: Union[PathLike, Sequence[PathLike], Dict], **kwargs):
"""
Read the config from specified JSON or YAML file.
The config content in the `self.config` dictionary.
Args:
f: filepath of the config file, the content must be a dictionary,
if providing a list of files, wil merge the content of them.
if providing a dictionary directly, use it as config.
kwargs: other arguments for ``json.load`` or ``yaml.safe_load``, depends on the file format.
"""
content = {self.meta_key: self.get(self.meta_key, {})}
content.update(self.load_config_files(f, **kwargs))
self.set(config=content)
def _do_resolve(self, config: Any):
"""
Recursively resolve the config content to replace the macro tokens with target content.
The macro tokens start with "%", can be from another structured file, like:
``{"net": "%default_net"}``, ``{"net": "%/data/config.json#net"}``.
Args:
config: input config file to resolve.
"""
if isinstance(config, (dict, list)):
for k, v in enumerate(config) if isinstance(config, list) else config.items():
config[k] = self._do_resolve(v)
if isinstance(config, str) and config.startswith(MACRO_KEY):
path, ids = ConfigParser.split_path_id(config[len(MACRO_KEY) :])
parser = ConfigParser(config=self.get() if not path else ConfigParser.load_config_file(path))
return self._do_resolve(config=deepcopy(parser[ids]))
return config
def resolve_macro(self):
"""
Recursively resolve `self.config` to replace the macro tokens with target content.
The macro tokens are marked as starting with "%", can be from another structured file, like:
``"%default_net"``, ``"%/data/config.json#net"``.
"""
self.set(self._do_resolve(config=deepcopy(self.get())))
def _do_parse(self, config, id: str = ""):
"""
Recursively parse the nested data in config source, add every item as `ConfigItem` to the resolver.
Args:
config: config source to parse.
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
"""
if isinstance(config, (dict, list)):
subs = enumerate(config) if isinstance(config, list) else config.items()
for k, v in subs:
sub_id = f"{id}{self.ref_resolver.sep}{k}" if id != "" else k
self._do_parse(config=v, id=sub_id)
# copy every config item to make them independent and add them to the resolver
item_conf = deepcopy(config)
if ConfigComponent.is_instantiable(item_conf):
self.ref_resolver.add_item(ConfigComponent(config=item_conf, id=id, locator=self.locator))
elif ConfigExpression.is_expression(item_conf):
self.ref_resolver.add_item(ConfigExpression(config=item_conf, id=id, globals=self.globals))
else:
self.ref_resolver.add_item(ConfigItem(config=item_conf, id=id))
@classmethod
def load_config_file(cls, filepath: PathLike, **kwargs):
"""
Load config file with specified file path (currently support JSON and YAML files).
Args:
filepath: path of target file to load, supported postfixes: `.json`, `.yml`, `.yaml`.
kwargs: other arguments for ``json.load`` or ```yaml.safe_load``, depends on the file format.
"""
_filepath: str = str(Path(filepath))
if not re.compile(cls.path_match, re.IGNORECASE).findall(_filepath):
raise ValueError(f'unknown file input: "{filepath}"')
with open(_filepath) as f:
if _filepath.lower().endswith(cls.suffixes[0]):
return json.load(f, **kwargs)
if _filepath.lower().endswith(cls.suffixes[1:]):
return yaml.safe_load(f, **kwargs)
raise ValueError(f"only support JSON or YAML config file so far, got name {_filepath}.")
@classmethod
def load_config_files(cls, files: Union[PathLike, Sequence[PathLike], dict], **kwargs) -> dict:
"""
Load config files into a single config dict.
Args:
files: path of target files to load, supported postfixes: `.json`, `.yml`, `.yaml`.
kwargs: other arguments for ``json.load`` or ```yaml.safe_load``, depends on the file format.
"""
if isinstance(files, dict): # already a config dict
return files
content = {}
for i in ensure_tuple(files):
content.update(cls.load_config_file(i, **kwargs))
return content
@classmethod
def export_config_file(cls, config: Dict, filepath: PathLike, fmt="json", **kwargs):
"""
Export the config content to the specified file path (currently support JSON and YAML files).
Args:
config: source config content to export.
filepath: target file path to save.
fmt: format of config content, currently support ``"json"`` and ``"yaml"``.
kwargs: other arguments for ``json.dump`` or ``yaml.safe_dump``, depends on the file format.
"""
_filepath: str = str(Path(filepath))
writer = look_up_option(fmt.lower(), {"json", "yaml"})
with open(_filepath, "w") as f:
if writer == "json":
return json.dump(config, f, **kwargs)
if writer == "yaml":
return yaml.safe_dump(config, f, **kwargs)
raise ValueError(f"only support JSON or YAML config file so far, got {writer}.")
@classmethod
def split_path_id(cls, src: str) -> Tuple[str, str]:
"""
Split `src` string into two parts: a config file path and component id.
The file path should end with `(json|yaml|yml)`. The component id should be separated by `#` if it exists.
If no path or no id, return "".
Args:
src: source string to split.
"""
result = re.compile(rf"({cls.suffix_match}(?=(?:{ID_SEP_KEY}.*)|$))", re.IGNORECASE).findall(src)
if not result:
return "", src # the src is a pure id
path_name = result[0][0] # at most one path_name
_, ids = src.rsplit(path_name, 1)
return path_name, ids[len(ID_SEP_KEY) :] if ids.startswith(ID_SEP_KEY) else ""
```
#### File: monai/metrics/rocauc.py
```python
import warnings
from typing import Union, cast
import numpy as np
import torch
from monai.utils import Average, look_up_option
from .metric import CumulativeIterationMetric
class ROCAUCMetric(CumulativeIterationMetric):
"""
Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC). Referring to:
`sklearn.metrics.roc_auc_score <https://scikit-learn.org/stable/modules/generated/
sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score>`_.
The input `y_pred` and `y` can be a list of `channel-first` Tensor or a `batch-first` Tensor.
Args:
average: {``"macro"``, ``"weighted"``, ``"micro"``, ``"none"``}
Type of averaging performed if not binary classification.
Defaults to ``"macro"``.
- ``"macro"``: calculate metrics for each label, and find their unweighted mean.
This does not take label imbalance into account.
- ``"weighted"``: calculate metrics for each label, and find their average,
weighted by support (the number of true instances for each label).
- ``"micro"``: calculate metrics globally by considering each element of the label
indicator matrix as a label.
- ``"none"``: the scores for each class are returned.
"""
def __init__(self, average: Union[Average, str] = Average.MACRO) -> None:
super().__init__()
self.average = average
def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor): # type: ignore
return y_pred, y
def aggregate(self):
"""
As AUC metric needs to execute on the overall data, so usually users accumulate `y_pred` and `y`
of every iteration, then execute real computation and reduction on the accumulated data.
"""
y_pred, y = self.get_buffer()
# compute final value and do metric reduction
if not isinstance(y_pred, torch.Tensor) or not isinstance(y, torch.Tensor):
raise ValueError("y_pred and y must be PyTorch Tensor.")
return compute_roc_auc(y_pred=y_pred, y=y, average=self.average)
def _calculate(y_pred: torch.Tensor, y: torch.Tensor) -> float:
if not (y.ndimension() == y_pred.ndimension() == 1 and len(y) == len(y_pred)):
raise AssertionError("y and y_pred must be 1 dimension data with same length.")
y_unique = y.unique()
if len(y_unique) == 1:
warnings.warn(f"y values can not be all {y_unique.item()}, skip AUC computation and return `Nan`.")
return float("nan")
if not y_unique.equal(torch.tensor([0, 1], dtype=y.dtype, device=y.device)):
warnings.warn(f"y values must be 0 or 1, but in {y_unique.tolist()}, skip AUC computation and return `Nan`.")
return float("nan")
n = len(y)
indices = y_pred.argsort()
y = y[indices].cpu().numpy()
y_pred = y_pred[indices].cpu().numpy()
nneg = auc = tmp_pos = tmp_neg = 0.0
for i in range(n):
y_i = cast(float, y[i])
if i + 1 < n and y_pred[i] == y_pred[i + 1]:
tmp_pos += y_i
tmp_neg += 1 - y_i
continue
if tmp_pos + tmp_neg > 0:
tmp_pos += y_i
tmp_neg += 1 - y_i
nneg += tmp_neg
auc += tmp_pos * (nneg - tmp_neg / 2)
tmp_pos = tmp_neg = 0
continue
if y_i == 1:
auc += nneg
else:
nneg += 1
return auc / (nneg * (n - nneg))
def compute_roc_auc(y_pred: torch.Tensor, y: torch.Tensor, average: Union[Average, str] = Average.MACRO):
"""Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC). Referring to:
`sklearn.metrics.roc_auc_score <https://scikit-learn.org/stable/modules/generated/
sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score>`_.
Args:
y_pred: input data to compute, typical classification model output.
the first dim must be batch, if multi-classes, it must be in One-Hot format.
for example: shape `[16]` or `[16, 1]` for a binary data, shape `[16, 2]` for 2 classes data.
y: ground truth to compute ROC AUC metric, the first dim must be batch.
if multi-classes, it must be in One-Hot format.
for example: shape `[16]` or `[16, 1]` for a binary data, shape `[16, 2]` for 2 classes data.
average: {``"macro"``, ``"weighted"``, ``"micro"``, ``"none"``}
Type of averaging performed if not binary classification.
Defaults to ``"macro"``.
- ``"macro"``: calculate metrics for each label, and find their unweighted mean.
This does not take label imbalance into account.
- ``"weighted"``: calculate metrics for each label, and find their average,
weighted by support (the number of true instances for each label).
- ``"micro"``: calculate metrics globally by considering each element of the label
indicator matrix as a label.
- ``"none"``: the scores for each class are returned.
Raises:
ValueError: When ``y_pred`` dimension is not one of [1, 2].
ValueError: When ``y`` dimension is not one of [1, 2].
ValueError: When ``average`` is not one of ["macro", "weighted", "micro", "none"].
Note:
ROCAUC expects y to be comprised of 0's and 1's. `y_pred` must be either prob. estimates or confidence values.
"""
y_pred_ndim = y_pred.ndimension()
y_ndim = y.ndimension()
if y_pred_ndim not in (1, 2):
raise ValueError(
"Predictions should be of shape (batch_size, num_classes) or (batch_size, ), got {y_pred.shape}."
)
if y_ndim not in (1, 2):
raise ValueError("Targets should be of shape (batch_size, num_classes) or (batch_size, ), got {y.shape}.")
if y_pred_ndim == 2 and y_pred.shape[1] == 1:
y_pred = y_pred.squeeze(dim=-1)
y_pred_ndim = 1
if y_ndim == 2 and y.shape[1] == 1:
y = y.squeeze(dim=-1)
if y_pred_ndim == 1:
return _calculate(y_pred, y)
if y.shape != y_pred.shape:
raise ValueError("data shapes of y_pred and y do not match, got {y_pred.shape} and {y.shape}.")
average = look_up_option(average, Average)
if average == Average.MICRO:
return _calculate(y_pred.flatten(), y.flatten())
y, y_pred = y.transpose(0, 1), y_pred.transpose(0, 1)
auc_values = [_calculate(y_pred_, y_) for y_pred_, y_ in zip(y_pred, y)]
if average == Average.NONE:
return auc_values
if average == Average.MACRO:
return np.mean(auc_values)
if average == Average.WEIGHTED:
weights = [sum(y_) for y_ in y]
return np.average(auc_values, weights=weights)
raise ValueError(f'Unsupported average: {average}, available options are ["macro", "weighted", "micro", "none"].')
``` |
{
"source": "jiansenzheng/python-nats",
"score": 2
} |
#### File: python-nats/test/fixtures.py
```python
import shutil
import subprocess
import tempfile
import logging
import time
import hashlib
import uuid
from OpenSSL import crypto
class NatsServerHelper(object):
def __init__(
self,
base_directory,
proc_name='nats-server'
):
self.base_directory = base_directory
self.proc_name = proc_name
self.daemon = None
self.work_dir = None
def run(self, proc_args=None):
log = logging.getLogger()
directory = tempfile.mkdtemp(
dir = self.base_directory,
prefix = 'python-nats')
log.debug('Created directory %s' % directory)
daemon_args = [
self.proc_name
]
if proc_args:
daemon_args.extend(proc_args)
self.daemon = subprocess.Popen(daemon_args)
self.work_dir = directory
log.debug('Started %d' % self.daemon.pid)
log.debug('Params: %s' % daemon_args)
time.sleep(2)
def stop(self):
log = logging.getLogger()
dir, process = self.work_dir, self.daemon
process.kill()
time.sleep(2)
log.debug('Killed nats pid:%d', process.pid)
shutil.rmtree(dir)
log.debug('Removed directory %s' % dir)
```
#### File: python-nats/test/test_integration.py
```python
from fixtures import NatsServerHelper
import os,sys
import unittest
import tempfile
import shutil
LIB_PATH = os.path.split(os.getcwd())[0]
NATS_PATH = '{}/vendor/nats-server/bin'.format(LIB_PATH)
sys.path.append(LIB_PATH)
sys.path.append(NATS_PATH)
from mock import MagicMock, patch
from nats.client import NatsClient
class NatsClientTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
program = cls._get_exe()
cls.directory = tempfile.mkdtemp(prefix='python-nats')
cls.processHelper = NatsServerHelper(
cls.directory,
proc_name=program)
cls.processHelper.run(proc_args="-c resources/nats_config.yml")
addr = "nats://127.0.0.1:4222"
cls.client = NatsClient(uris=addr)
@classmethod
def tearDownClass(cls):
cls.processHelper.stop()
shutil.rmtree(cls.directory)
@classmethod
def _is_exe(cls, fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
@classmethod
def _get_exe(cls):
PROGRAM = 'nats-server'
program_path = None
for path in sys.path:
path = path.strip('"')
exe_file = os.path.join(path, PROGRAM)
if cls._is_exe(exe_file):
program_path = exe_file
break
print program_path
if not program_path:
#raise Exception("Nats-Server not in path, skip integration test.")
sys.exit(0)
return program_path
class TestSimple(NatsClientTest):
def test_publish(self):
print "skip"
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiansfoggy/16-720B",
"score": 3
} |
#### File: hw1/code/visual_words.py
```python
import numpy as np
import multiprocessing as mp
import imageio
import scipy.ndimage
import skimage.color
import sklearn.cluster
import scipy.spatial.distance
import os,time
import matplotlib.pyplot as plt
import util
import random
def extract_filter_responses(image):
'''
Extracts the filter responses for the given image.
[input]
* image: numpy.ndarray of shape (H,W) or (H,W,3)
[output]
* filter_responses: numpy.ndarray of shape (H,W,3F)
'''
scales = [1,2,4,8,8*np.sqrt(2)]
F = 20
filter_responses = np.zeros((image.shape[0], image.shape[1], image.shape[2]*F))
image = image.astype('float')/255
# make grayscale to color
if not image.shape[2]:
image = np.repeat(image[:,:,np.newaxis], 3, axis=2)
# convert to Lab space
image = skimage.color.rgb2lab(image)
i = -1
for scale in scales:
f1 = scipy.ndimage.gaussian_filter(image[:,:,0], sigma=scale)
i+=1
filter_responses[:,:,i] = f1
f2 = scipy.ndimage.gaussian_filter(image[:,:,1], sigma=scale)
i+=1
filter_responses[:,:,i] = f2
f3 = scipy.ndimage.gaussian_filter(image[:,:,2], sigma=scale)
i+=1
filter_responses[:,:,i] = f3
f1 = scipy.ndimage.gaussian_laplace(image[:,:,0], sigma=scale)
i+=1
filter_responses[:,:,i] = f1
f2 = scipy.ndimage.gaussian_laplace(image[:,:,1], sigma=scale)
i+=1
filter_responses[:,:,i] = f2
f3 = scipy.ndimage.gaussian_laplace(image[:,:,2], sigma=scale)
i+=1
filter_responses[:,:,i] = f3
f1 = scipy.ndimage.gaussian_filter(image[:,:,0], sigma=scale, order = [0,1])
i+=1
filter_responses[:,:,i] = f1
f2 = scipy.ndimage.gaussian_filter(image[:,:,1], sigma=scale, order = [0,1])
i+=1
filter_responses[:,:,i] = f2
f3 = scipy.ndimage.gaussian_filter(image[:,:,2], sigma=scale, order = [0,1])
i+=1
filter_responses[:,:,i] = f3
f1 = scipy.ndimage.gaussian_filter(image[:,:,0], sigma=scale, order = (1,0))
i+=1
filter_responses[:,:,i] = f1
f2 = scipy.ndimage.gaussian_filter(image[:,:,1], sigma=scale, order = (1,0))
i+=1
filter_responses[:,:,i] = f2
f3 = scipy.ndimage.gaussian_filter(image[:,:,2], sigma=scale, order = (1,0))
i+=1
filter_responses[:,:,i] = f3
return filter_responses
def get_visual_words(image,dictionary):
'''
Compute visual words mapping for the given image using the dictionary of visual words.
[input]
* image: numpy.ndarray of shape (H,W) or (H,W,3)
[output]
* wordmap: numpy.ndarray of shape (H,W)
'''
if image.shape[2] >= 3:
image = image[:,:,:3]
response = extract_filter_responses(image)
width, height, depth = response.shape
image_height, image_width, image_depth = image.shape
response_new = np.reshape(response, (width*height, response.shape[-1]))
distances = scipy.spatial.distance.cdist(response_new, dictionary)
distances = np.argmin(distances, axis=1)
wordmap = np.reshape(distances, (image_height, image_width))
return wordmap
def compute_dictionary_one_image(args):
'''
Extracts random samples of the dictionary entries from an image.
This is a function run by a subprocess.
[input]
* i: index of training image
* alpha: number of random samples
* image_path: path of image file
* time_start: time stamp of start time
[saved]
* sampled_response: numpy.ndarray of shape (alpha,3F)
'''
i,alpha,image_path = args
image = skimage.io.imread('../data/' + image_path)
image = image.astype('float')/255
if image.shape[2] >= 3:
image = image[:,:,:3]
response = extract_filter_responses(image)
filter_responses = np.random.permutation(response.reshape(image.shape[0]*image.shape[1], -1))[:alpha]
return filter_responses
def compute_dictionary(num_workers=2):
'''
Creates the dictionary of visual words by clustering using k-means.
[input]
* num_workers: number of workers to process in parallel
[saved]
* dictionary: numpy.ndarray of shape (K,3F)
'''
train_data = np.load("../data/train_data.npz")
F = 20
T = train_data['image_names'].shape[0]
alpha = 200
k = 100
pool = mp.Pool(num_workers)
# get all responses
filter_responses = []
for i in range(0, T):
# print (i)
args = [(i, alpha, train_data['image_names'][i][0])]
filter_responses.append(pool.apply_async(compute_dictionary_one_image, args))
# stack them to get a filtered reponses matrix of size (alpha*T,3*F)
features = []
for result in filter_responses:
features.append(result.get())
a = features[0]
for i in range(1, len(features)):
a = np.concatenate((a, features[i]), axis=0)
# save output features
np.save('../outputs/filtered_responses.npy', a)
# perform k-means clustering
kmeans = sklearn.cluster.KMeans(n_clusters=k, n_jobs=-1).fit(a)
dictionary = kmeans.cluster_centers_
print (dictionary.shape)
np.save('../outputs/dictionary.npy', dictionary)
```
#### File: HW3/code/InverseCompositionAffine.py
```python
import numpy as np
from scipy.interpolate import RectBivariateSpline
def validate_coords(y, x, ny, nx):
a = np.logical_and(np.logical_and(x>=0, x<=nx-1), np.logical_and(y>=0, y<=ny-1))
return a.nonzero()[0]
def InverseCompositionAffine(It, It1):
# Input:
# It: template image
# It1: Current image
# Output:
# M: the Affine warp matrix [2x3 numpy array]
# put your implementation here
M = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
h,w = It.shape
x = np.arange(w)
y = np.arange(h)
It_y, It_x = np.gradient(It)
It_spline = RectBivariateSpline(y, x, It)
It1_spline = RectBivariateSpline(y, x, It1)
It_x_spline = RectBivariateSpline(y, x, It_x)
It_y_spline = RectBivariateSpline(y, x, It_y)
xt,yt = np.meshgrid(x, y)
xt = np.reshape(xt, (-1,1))
yt = np.reshape(yt, (-1,1))
template = np.array(It_spline.ev(yt,xt).tolist())
template = template.ravel()
# print ("Template shape is {0}".format(template.shape))
patch_x = np.reshape(np.array(It_x_spline.ev(yt,xt).tolist()), (-1,1))
patch_y = np.reshape(np.array(It_y_spline.ev(yt,xt).tolist()), (-1,1))
A = np.hstack((np.multiply(yt,patch_y), np.multiply(xt,patch_y), patch_y, np.multiply(yt,patch_x), np.multiply(xt,patch_x), patch_x))
a = np.ones((xt.shape[0],1))
xy1 = np.hstack((yt,xt,a))
tol = 0.1
iter1 = 0
while (True):
affine_homogenous = np.matmul(M,xy1.T)
valid_coords = validate_coords(affine_homogenous[0], affine_homogenous[1], h, w)
C = A[valid_coords,:]
H = np.matmul(C.T,C)
yi = affine_homogenous[0, valid_coords]
xi = affine_homogenous[1, valid_coords]
image = np.array(It1_spline.ev(yi, xi).tolist())
temp_template = template[valid_coords]
b = image - temp_template
b = np.matmul(C.T, b)
deltap = np.linalg.lstsq(H,b,rcond=None)[0]
deltaM = np.reshape(deltap, (2,3))
M = M - deltaM
a = np.linalg.norm(deltaM)
if a < tol:
# print (iter1)
break
iter1 += 1
return M
```
#### File: HW3/code/LucasKanadeBasis.py
```python
import numpy as np
from scipy.interpolate import RectBivariateSpline
def LucasKanadeBasis(It, It1, rect, bases):
# Input:
# It: template image
# It1: Current image
# rect: Current position of the car
# (top left, bot right coordinates)
# bases: [n, m, k] where nxm is the size of the template.
# Output:
# p: movement vector [dp_x, dp_y]
# Put your implementation here
p0 = np.zeros(2)
p = p0
# get rectangle coordinates
x1 = rect[0]
x2 = rect[2]
y1 = rect[1]
y2 = rect[3]
tol = 0.5
h, w = It1.shape
x = np.arange(w)
y = np.arange(h)
# get gradient of image
It_y, It_x = np.gradient(It1)
# interpolating code to get I_t
It_spline = RectBivariateSpline(y,x,It)
It1_spline = RectBivariateSpline(y,x,It1)
It_x_spline = RectBivariateSpline(y,x,It_x)
It_y_spline = RectBivariateSpline(y,x,It_y)
# get the template
x_temp = np.arange(x1,x2+0.5)
y_temp = np.arange(y1,y2+0.5)
x,y = np.meshgrid(x_temp, y_temp)
template = np.array(It_spline.ev(y.flatten(), x.flatten()).tolist())
num_bases = bases.shape[2]
shape1 = bases[:,:,0].shape[0]*bases[:,:,0].shape[1]
B = np.zeros((shape1, num_bases))
for i in range(0, num_bases):
base = bases[:,:,i]
base = base.flatten()
B[:,i] = base
M = np.subtract(np.identity(shape1), np.matmul(B, B.T))
iter1 = 0
while (True):
xi_temp = np.arange(x1+p[0], x2+p[0]+0.5)
yi_temp = np.arange(y1+p[1], y2+p[1]+0.5)
xi, yi = np.meshgrid(xi_temp, yi_temp)
patch_x = np.reshape(np.array(It_x_spline.ev(yi.flatten(), xi.flatten()).tolist()), (-1,1))
patch_y = np.reshape(np.array(It_y_spline.ev(yi.flatten(), xi.flatten()).tolist()), (-1,1))
A = np.hstack((patch_x, patch_y))
A_new = np.matmul(M,A)
image = np.array(It1_spline.ev(yi.flatten(), xi.flatten()).tolist())
b = template - image
b_new = np.matmul(M,b)
deltap = np.linalg.lstsq(A_new,b_new,rcond=None)[0]
p = p + deltap
a = np.linalg.norm(deltap)
# print (a)
if a < tol:
# print (iter)
break
iter1 += 1
return p
```
#### File: HW4/code/findM2.py
```python
import numpy as np
import submission as sub
import helper as hp
import matplotlib.image as mpimg
def bestM2(pts1, pts2, M, K1, K2):
F = sub.eightpoint(pts1, pts2, M)
E = sub.essentialMatrix(F, K1, K2)
M1 = np.zeros((3,4))
M1[0,0] = 1
M1[1,1] = 1
M1[2,2] = 1
C1 = np.matmul(K1, M1)
P = []
error = []
num_pos = []
Marray = hp.camera2(E)
h,w,d = Marray.shape
for i in range(0, d):
C2 = np.matmul(K2, Marray[:,:,i])
Pi, erri = sub.triangulate(C1, pts1, C2, pts2)
P.append(Pi)
error.append(erri)
ind = np.where(Pi[:,2] > 0)
num_pos.append(len(ind[0]))
P = np.stack(P, axis=-1)
correct = np.equal(num_pos, P.shape[0])
ind = np.where(correct)[0][0]
M2 = Marray[:,:,ind]
M2 = np.reshape(M2, (M2.shape[0],M2.shape[1]))
P = P[:,:,ind]
P = np.reshape(P, (P.shape[0],P.shape[1]))
C2 = np.matmul(K2,M2)
return M1,C1,M2,C2,P
if __name__ == "__main__":
im1 = mpimg.imread('../data/im1.png')
im2 = mpimg.imread('../data/im2.png')
h,w,d = im1.shape
M = max(h,w)
data = np.load('../data/some_corresp.npz')
pts1 = data['pts1']
pts2 = data['pts2']
data = np.load('../data/intrinsics.npz')
K1 = data['K1']
K2 = data['K2']
M1, C1, M2, C2, P = bestM2(pts1, pts2, M, K1, K2)
np.savez('../results/files/q3_3.npz', M2=M2, C2=C2, P=P)
```
#### File: HW5/code/run_q2.py
```python
import numpy as np
from nn import *
from util import *
import copy
# fake data
# feel free to plot it in 2D
# what do you think these 4 classes are?
g0 = np.random.multivariate_normal([3.6,40],[[0.05,0],[0,10]],10)
g1 = np.random.multivariate_normal([3.9,10],[[0.01,0],[0,5]],10)
g2 = np.random.multivariate_normal([3.4,30],[[0.25,0],[0,5]],10)
g3 = np.random.multivariate_normal([2.0,10],[[0.5,0],[0,10]],10)
x = np.vstack([g0,g1,g2,g3])
# we will do XW + B
# that implies that the data is N x D
# create labels
y_idx = np.array([0 for _ in range(10)] + [1 for _ in range(10)] + [2 for _ in range(10)] + [3 for _ in range(10)])
# print (y_idx)
# turn to one_hot
y = np.zeros((y_idx.shape[0],y_idx.max()+1))
y[np.arange(y_idx.shape[0]),y_idx] = 1
print (y.shape)
# parameters in a dictionary
params = {}
# Q 2.1
# initialize a layer
initialize_weights(2,25,params,'layer1')
initialize_weights(25,4,params,'output')
assert(params['Wlayer1'].shape == (2,25))
assert(params['blayer1'].shape == (25,))
#expect 0, [0.05 to 0.12]
print("{}, {:.2f}".format(params['blayer1'].sum(),params['Wlayer1'].std()**2))
print("{}, {:.2f}".format(params['boutput'].sum(),params['Woutput'].std()**2))
# Q 2.2.1
# implement sigmoid
test = sigmoid(np.array([-1000,1000]))
print('should be zero and one\t',test.min(),test.max())
# implement forward
h1 = forward(x,params,'layer1')
# print(h1.shape)
# # Q 2.2.2
# # implement softmax
probs = forward(h1,params,'output',softmax)
# # make sure you understand these values!
# positive, ~1, ~1, (40,4)
print(probs.min(),min(probs.sum(1)),max(probs.sum(1)),probs.shape)
# # Q 2.2.3
# # implement compute_loss_and_acc
loss, acc = compute_loss_and_acc(y, probs)
# # should be around -np.log(0.25)*40 [~55] and 0.25
# # if it is not, check softmax!
print("{}, {:.2f}".format(loss,acc))
# # here we cheat for you
# # the derivative of cross-entropy(softmax(x)) is probs - 1[correct actions]
delta1 = probs
delta1[np.arange(probs.shape[0]),y_idx] -= 1
# print (delta1)
# # we already did derivative through softmax
# # so we pass in a linear_deriv, which is just a vector of ones
# # to make this a no-op
delta2 = backwards(delta1,params,'output',linear_deriv)
# # Implement backwards!
backwards(delta2,params,'layer1',sigmoid_deriv)
# W and b should match their gradients sizes
for k,v in sorted(list(params.items())):
if 'grad' in k:
name = k.split('_')[1]
print(name,v.shape, params[name].shape)
# Q 2.4
batches = get_random_batches(x,y,5)
# print batch sizes
print([_[0].shape[0] for _ in batches])
batch_num = len(batches)
def apply_gradient(params, name, learning_rate):
W = params['W' + name]
b = params['b' + name]
grad_W = params['grad_W' + name]
grad_b = params['grad_b' + name]
W = W - learning_rate*grad_W
b = b - learning_rate*grad_b
params['W'+name] = W
params['b'+name] = b
# # WRITE A TRAINING LOOP HERE
max_iters = 500
learning_rate = 1e-3
# with default settings, you should get loss < 35 and accuracy > 75%
for itr in range(max_iters):
total_loss = 0
avg_acc = 0
for xb,yb in batches:
# forward
h1 = forward(xb, params, 'layer1')
probs = forward(h1, params, 'output', softmax)
# loss
loss, acc = compute_loss_and_acc(yb, probs)
# be sure to add loss and accuracy to epoch totals
total_loss += loss
avg_acc += acc/batch_num
# backward
delta1 = probs
yb_idx = np.argmax(yb, axis=1)
delta1[np.arange(probs.shape[0]),yb_idx] -= 1
delta2 = backwards(delta1,params,'output',linear_deriv)
backwards(delta2,params,'layer1',sigmoid_deriv)
# apply gradient
apply_gradient(params, 'output', learning_rate)
apply_gradient(params, 'layer1', learning_rate)
if itr % 100 == 0:
print("itr: {:02d} \t loss: {:.2f} \t acc : {:.2f}".format(itr,total_loss,avg_acc))
# # Q 2.5 should be implemented in this file
# # you can do this before or after training the network.
# save the old params
h1 = forward(x, params, 'layer1')
probs = forward(h1, params, 'output', softmax)
loss, acc = compute_loss_and_acc(y, probs)
delta1 = probs
yb_idx = np.argmax(y, axis=1)
delta1[np.arange(probs.shape[0]),y_idx] -= 1
delta2 = backwards(delta1,params,'output',linear_deriv)
backwards(delta2,params,'layer1',sigmoid_deriv)
params_orig = copy.deepcopy(params)
eps = 1e-6
for k,v in params.items():
if '_' in k:
continue
# we have a real parameter!
# for each value inside the parameter
v_orig = v
for index, j in np.ndenumerate(v):
params[k][index] = v[index]+eps
h1 = forward(x, params, 'layer1')
probs = forward(h1, params, 'output', softmax)
lossplus, _ = compute_loss_and_acc(y, probs)
params[k][index] = v[index]-2*eps
h1 = forward(x, params, 'layer1')
probs = forward(h1, params, 'output', softmax)
lossminus, _ = compute_loss_and_acc(y, probs)
params['grad_' + k][index] = np.divide(np.subtract(lossplus,lossminus), 2*eps)
params[k][index] = v[index]+eps
total_error = 0
for k in params.keys():
if 'grad_' in k:
# relative error
err = np.abs(params[k] - params_orig[k])/np.maximum(np.abs(params[k]),np.abs(params_orig[k]))
err = err.sum()
print('{} {:.2e}'.format(k, err))
total_error += err
# should be less than 1e-4
print('total {:.2e}'.format(total_error))
``` |
{
"source": "jianshitansuantong233/3pxnet-training",
"score": 2
} |
#### File: 3pxnet-training/training/binarized_modules.py
```python
import torch
import torch.nn as nn
import math
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn.functional as F
import numpy as np
import utils_own
def Binarize(tensor,quant_mode='det'):
if quant_mode=='det':
return tensor.sign()
if quant_mode=='bin':
return (tensor>=0).type(type(tensor))*2-1
else:
return tensor.add_(1).div_(2).add_(torch.rand(tensor.size()).add(-0.5)).clamp_(0,1).round().mul_(2).add_(-1)
def Ternarize(tensor, mult = 0.7, mask = None, permute_list = None, pruned = False, align = False, pack = 32):
if type(mask) == type(None):
mask = torch.ones_like(tensor)
# Fix permutation. Tensor needs to be permuted
if not pruned:
tensor_masked = utils_own.permute_from_list(tensor, permute_list)
if len(tensor_masked.size())==4:
tensor_masked = tensor_masked.permute(0,2,3,1)
if not align:
tensor_flat = torch.abs(tensor_masked.contiguous().view(-1)).contiguous()
tensor_split = torch.split(tensor_flat, pack, dim=0)
tensor_split = torch.stack(tensor_split, dim=0)
tensor_sum = torch.sum(tensor_split, dim=1)
tensor_size = tensor_sum.size(0)
tensor_sorted, _ = torch.sort(tensor_sum)
thres = tensor_sorted[int(mult*tensor_size)]
tensor_flag = torch.ones_like(tensor_sum)
tensor_flag[tensor_sum.ge(-thres) * tensor_sum.le(thres)] = 0
tensor_flag = tensor_flag.repeat(pack).reshape(pack,-1).transpose(1,0).reshape_as(tensor_masked)
else:
tensor_flat = torch.abs(tensor_masked.reshape(tensor_masked.size(0),-1)).contiguous()
tensor_split = torch.split(tensor_flat, pack, dim=1)
tensor_split = torch.stack(tensor_split, dim=1)
tensor_sum = torch.sum(tensor_split, dim=2)
tensor_size = tensor_sum.size(1)
tensor_sorted, _ = torch.sort(tensor_sum, dim=1)
tensor_sorted = torch.flip(tensor_sorted, [1])
multiplier = 32./pack
index = int(torch.ceil((1-mult)*tensor_size/multiplier)*multiplier)
thres = tensor_sorted[:, index-1].view(-1,1)
tensor_flag = torch.zeros_like(tensor_sum)
tensor_flag[tensor_sum.ge(thres)] = 1
tensor_flag[tensor_sum.le(-thres)] = 1
tensor_flag = tensor_flag.repeat(1,pack).reshape(tensor_flag.size(0),pack,-1).transpose(2,1).reshape_as(tensor_masked)
if len(tensor_masked.size())==4:
tensor_flag = tensor_flag.permute(0,3,1,2)
tensor_flag = utils_own.permute_from_list(tensor_flag, permute_list, transpose=True)
tensor_bin = tensor.sign() * tensor_flag
else:
tensor_bin = tensor.sign() * mask
return tensor_bin
class BinarizeLinear(nn.Linear):
def __init__(self, *kargs, **kwargs):
super(BinarizeLinear, self).__init__(*kargs, **kwargs)
self.register_buffer('weight_org', self.weight.data.clone())
def forward(self, input):
if (input.size(1) != 784) and (input.size(1) != 3072):
input.data=Binarize(input.data)
self.weight.data=Binarize(self.weight_org)
out = nn.functional.linear(input, self.weight)
if not self.bias is None:
self.bias.org=self.bias.data.clone()
out += self.bias.view(1, -1).expand_as(out)
return out
class TernarizeLinear(nn.Linear):
def __init__(self, thres, *kargs, **kwargs):
try:
pack = kwargs['pack']
except:
pack = 32
else:
del(kwargs['pack'])
try:
permute = kwargs['permute']
except:
permute = 1
else:
del(kwargs['permute'])
try:
self.align=kwargs['align']
except:
self.align=True
else:
del(kwargs['align'])
super(TernarizeLinear, self).__init__(*kargs, **kwargs)
permute = min(permute, self.weight.size(0))
self.register_buffer('pack', torch.LongTensor([pack]))
self.register_buffer('thres', torch.FloatTensor([thres]))
self.register_buffer('mask', torch.ones_like(self.weight.data))
self.register_buffer('permute_list', torch.LongTensor(np.tile(range(self.weight.size(1)), (permute,1))))
self.register_buffer('weight_org', self.weight.data.clone())
def forward(self, input, pruned=False):
if (input.size(1) != 784) and (input.size(1) != 3072):
input.data=Binarize(input.data)
self.weight.data=Ternarize(self.weight_org, self.thres, self.mask, self.permute_list, pruned, align=self.align, pack=self.pack.item())
out = nn.functional.linear(input, self.weight)
if not self.bias is None:
self.bias.org=self.bias.data.clone()
out += self.bias.view(1, -1).expand_as(out)
return out
class BinarizeConv2d(nn.Conv2d):
def __init__(self, *kargs, **kwargs):
super(BinarizeConv2d, self).__init__(*kargs, **kwargs)
self.register_buffer('weight_org', self.weight.data.clone())
def forward(self, input):
if input.size(1) != 3:
input.data = Binarize(input.data)
self.weight.data=Binarize(self.weight_org)
out = nn.functional.conv2d(input, self.weight, None, self.stride,
self.padding, self.dilation, self.groups)
if not self.bias is None:
self.bias.org=self.bias.data.clone()
out += self.bias.view(1, -1, 1, 1).expand_as(out)
return out
class TernarizeConv2d(nn.Conv2d):
def __init__(self, thres, *kargs, **kwargs):
try:
pack = kwargs['pack']
except:
pack = 32
else:
del(kwargs['pack'])
try:
permute = kwargs['permute']
except:
permute = 1
else:
del(kwargs['permute'])
try:
self.align=kwargs['align']
except:
self.align=True
else:
del(kwargs['align'])
super(TernarizeConv2d, self).__init__(*kargs, **kwargs)
permute = min(permute, self.weight.size(0))
self.register_buffer('pack', torch.LongTensor([pack]))
self.register_buffer('thres', torch.FloatTensor([thres]))
self.register_buffer('mask', torch.ones_like(self.weight.data))
self.register_buffer('permute_list', torch.LongTensor(np.tile(range(self.weight.size(1)), (permute,1))))
self.register_buffer('weight_org', self.weight.data.clone())
def forward(self, input, pruned=False):
if input.size(1) != 3:
input.data = Binarize(input.data)
self.weight.data=Ternarize(self.weight_org, self.thres, self.mask, self.permute_list, pruned, align=self.align, pack=self.pack.item())
out = nn.functional.conv2d(input, self.weight, None, self.stride, self.padding, self.dilation, self.groups)
if not self.bias is None:
self.bias.org=self.bias.data.clone()
out += self.bias.view(1, -1, 1, 1).expand_as(out)
return out
```
#### File: 3pxnet-training/training/network.py
```python
import torch
import numpy as np
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import utils
import utils_own
import binarized_modules
class FC_small(nn.Module):
def __init__(self, full=False, binary=True, first_sparsity=0.8, rest_sparsity=0.9, hid=512, ind=784, align=False):
super(FC_small, self).__init__()
self.align = align
self.pruned = False
self.hid = hid
self.ind = ind
self.full = full
self.binary = binary
if full:
self.fc1 = nn.Linear(ind, hid, bias=False)
self.fc2 = nn.Linear(hid, 10, bias=False)
elif binary:
self.fc1 = binarized_modules.BinarizeLinear(ind, hid, bias=False)
self.fc2 = binarized_modules.BinarizeLinear(hid, 10, bias=False)
else:
self.fc1 = binarized_modules.TernarizeLinear(first_sparsity, ind, hid, bias=False, align=align)
self.fc2 = binarized_modules.TernarizeLinear(rest_sparsity, hid, 10, bias=False, align=align)
self.htanh1 = nn.Hardtanh()
self.bn1 = nn.BatchNorm1d(hid)
self.bn2 = nn.BatchNorm1d(10, affine=True)
self.logsoftmax=nn.LogSoftmax(dim=1)
def forward(self, x):
if self.full:
x = x.view(-1, 784)
if x.size(1)==784:
x = x[:,:768]
x = F.relu(self.fc1(x))
x = self.bn1(x)
x = self.fc2(x)
else:
x = x.view(-1, 784)
if x.size(1)==784:
x = x[:,:768]
if self.binary:
x = self.fc1(x)
else:
x = self.fc1(x, self.pruned)
x = self.bn1(x)
x = self.htanh1(x)
if self.binary:
x = self.fc2(x)
else:
x = self.fc2(x, self.pruned)
x = self.bn2(x)
return x
class FC_large(nn.Module):
def __init__(self, full=False, binary=True, first_sparsity=0.8, rest_sparsity=0.9, hid=4096, ind=768, align=False):
super(FC_large, self).__init__()
self.align = align
self.pruned = False
self.hid = hid
self.ind = ind
self.full = full
self.binary = binary
if full:
self.fc1 = nn.Linear(ind, hid, bias=False)
self.fc2 = nn.Linear(hid, hid, bias=False)
self.fc3 = nn.Linear(hid, hid, bias=False)
self.fc4 = nn.Linear(hid, 10, bias=False)
elif binary:
self.fc1 = binarized_modules.BinarizeLinear(ind, hid, bias=False)
self.fc2 = binarized_modules.BinarizeLinear(hid, hid, bias=False)
self.fc3 = binarized_modules.BinarizeLinear(hid, hid, bias=False)
self.fc4 = binarized_modules.BinarizeLinear(hid, 10, bias=False)
else:
self.fc1 = binarized_modules.TernarizeLinear(first_sparsity, ind, hid, bias=False, align=align)
self.fc2 = binarized_modules.TernarizeLinear(rest_sparsity, hid, hid, bias=False, align=align)
self.fc3 = binarized_modules.TernarizeLinear(rest_sparsity, hid, hid, bias=False, align=align)
self.fc4 = binarized_modules.TernarizeLinear(rest_sparsity, hid, 10, bias=False, align=align)
self.htanh1 = nn.Hardtanh()
self.bn1 = nn.BatchNorm1d(hid)
self.htanh2 = nn.Hardtanh()
self.bn2 = nn.BatchNorm1d(hid)
self.htanh3 = nn.Hardtanh()
self.bn3 = nn.BatchNorm1d(hid)
self.bn4 = nn.BatchNorm1d(10, affine=True)
self.logsoftmax=nn.LogSoftmax(dim=1)
def forward(self, x):
if self.full:
x = x.view(-1, 784)
if x.size(1)==784:
x = x[:,:768]
x = F.relu(self.fc1(x))
x = self.bn1(x)
x = F.relu(self.fc2(x))
x = self.bn2(x)
x = F.relu(self.fc3(x))
x = self.bn3(x)
x = self.fc4(x)
else:
x = x.view(-1, 784)
if x.size(1)==784:
x = x[:,:768]
if self.binary:
x = self.fc1(x)
else:
x = self.fc1(x, self.pruned)
x = self.bn1(x)
x = self.htanh1(x)
if self.binary:
x = self.fc2(x)
else:
x = self.fc2(x, self.pruned)
x = self.bn2(x)
x = self.htanh2(x)
if self.binary:
x = self.fc3(x)
else:
x = self.fc3(x, self.pruned)
x = self.bn3(x)
x = self.htanh3(x)
if self.binary:
x = self.fc4(x)
else:
x = self.fc4(x, self.pruned)
x = self.bn4(x)
return self.logsoftmax(x)
class CNN_medium(nn.Module):
def __init__(self, full=False, binary=True, conv_thres=0.7, fc_thres=0.9, align=False, pad=0):
super(CNN_medium, self).__init__()
self.pruned = False
self.full = full
self.binary = binary
self.pad = pad
self.conv1 = binarized_modules.BinarizeConv2d(3, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(128)
self.htanh1 = nn.Hardtanh(inplace=True)
if full:
self.conv1 = nn.Conv2d(3, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.conv3 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.conv5 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.conv6 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.fc1 = nn.Linear(512*4*4, 10, bias=False)
elif binary:
self.conv1 = binarized_modules.BinarizeConv2d(3, 128, kernel_size=3, stride=1, padding=0, bias=False)
self.conv2 = binarized_modules.BinarizeConv2d(128, 128, kernel_size=3, stride=1, padding=0, bias=False)
self.conv3 = binarized_modules.BinarizeConv2d(128, 256, kernel_size=3, stride=1, padding=0, bias=False)
self.conv4 = binarized_modules.BinarizeConv2d(256, 256, kernel_size=3, stride=1, padding=0, bias=False)
self.conv5 = binarized_modules.BinarizeConv2d(256, 512, kernel_size=3, stride=1, padding=0, bias=False)
self.conv6 = binarized_modules.BinarizeConv2d(512, 512, kernel_size=3, stride=1, padding=0, bias=False)
self.conv7 = binarized_modules.BinarizeConv2d(512, 10, kernel_size=4, padding=0, bias=False)
self.fc1 = binarized_modules.BinarizeLinear(1024, 1024, bias=False)
else:
self.conv1 = binarized_modules.BinarizeConv2d(3, 128, kernel_size=3, stride=1, padding=0, bias=False)
self.conv2 = binarized_modules.TernarizeConv2d(conv_thres, 128, 128, kernel_size=3, padding=0, bias=False, align=align)
self.conv3 = binarized_modules.TernarizeConv2d(conv_thres, 128, 256, kernel_size=3, padding=0, bias=False, align=align)
self.conv4 = binarized_modules.TernarizeConv2d(conv_thres, 256, 256, kernel_size=3, padding=0, bias=False, align=align)
self.conv5 = binarized_modules.TernarizeConv2d(conv_thres, 256, 512, kernel_size=3, padding=0, bias=False, align=align)
self.conv6 = binarized_modules.TernarizeConv2d(conv_thres, 512, 512, kernel_size=3, padding=0, bias=False, align=align)
self.conv7 = binarized_modules.TernarizeConv2d(0.49, 512, 10, kernel_size=4, padding=0, bias=False, align=align)
self.fc1 = binarized_modules.BinarizeLinear(1024, 1024, bias=False)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bn2 = nn.BatchNorm2d(128)
self.htanh2 = nn.Hardtanh(inplace=True)
self.bn3 = nn.BatchNorm2d(256)
self.htanh3 = nn.Hardtanh(inplace=True)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bn4 = nn.BatchNorm2d(256)
self.htanh4 = nn.Hardtanh(inplace=True)
self.bn5 = nn.BatchNorm2d(512)
self.htanh5 = nn.Hardtanh(inplace=True)
self.pool6 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bn6 = nn.BatchNorm2d(512)
self.htanh6 = nn.Hardtanh(inplace=True)
self.bnfc1 = nn.BatchNorm1d(10, affine=True)
self.logsoftmax = nn.LogSoftmax(dim=1)
self.regime = {
0: {'optimizer': 'Adam', 'betas': (0.9, 0.999),'lr': 5e-3},
40: {'lr': 1e-3},
80: {'lr': 5e-4},
100: {'lr': 1e-4},
120: {'lr': 5e-5},
140: {'lr': 1e-5}
}
def forward(self, x):
if self.full:
x = F.relu(self.conv1(x))
x = self.bn1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = self.bn2(x)
x = F.relu(self.conv3(x))
x = self.bn3(x)
x = F.relu(self.conv4(x))
x = self.pool4(x)
x = self.bn4(x)
x = F.relu(self.conv5(x))
x = self.bn5(x)
x = F.relu(self.conv6(x))
x = self.pool6(x)
x = self.bn6(x)
x = x.view(-1, 512*4*4)
x = F.relu(self.fc1(x))
self.fc1_result = x.data.clone()
else:
x = F.pad(x, (1,1,1,1), value=self.pad)
x = self.conv1(x)
x = self.bn1(x)
x = self.htanh1(x)
x = F.pad(x, (1,1,1,1), value=self.pad)
if self.binary:
x = self.conv2(x)
else:
x = self.conv2(x, self.pruned)
x = self.pool2(x)
x = self.bn2(x)
x = self.htanh2(x)
x = F.pad(x, (1,1,1,1), value=self.pad)
if self.binary:
x = self.conv3(x)
else:
x = self.conv3(x, self.pruned)
x = self.bn3(x)
x = self.htanh3(x)
x = F.pad(x, (1,1,1,1), value=self.pad)
if self.binary:
x = self.conv4(x)
else:
x = self.conv4(x, self.pruned)
x = self.pool4(x)
x = self.bn4(x)
x = self.htanh4(x)
x = F.pad(x, (1,1,1,1), value=self.pad)
if self.binary:
x = self.conv5(x)
else:
x = self.conv5(x, self.pruned)
x = self.bn5(x)
x = self.htanh5(x)
x = F.pad(x, (1,1,1,1), value=self.pad)
if self.binary:
x = self.conv6(x)
else:
x = self.conv6(x, self.pruned)
x = self.pool6(x)
x = self.bn6(x)
x = self.htanh6(x)
if self.binary:
x = self.conv7(x)
else:
x = self.conv7(x, self.pruned)
x = x.view(-1, 10)
x = self.bnfc1(x)
return self.logsoftmax(x)
class CNN_large(nn.Module):
def __init__(self, full=False, binary=True, conv_thres=0.7, fc_thres=0.9, align=False, pad=0):
super(CNN_large, self).__init__()
self.pruned = False
self.full = full
self.binary = binary
self.pad = pad
self.conv1 = binarized_modules.BinarizeConv2d(3, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(128)
self.htanh1 = nn.Hardtanh(inplace=True)
if full:
self.conv1 = nn.Conv2d(3, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.conv3 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.conv5 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.conv6 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
self.fc1 = nn.Linear(512*4*4, 1024, bias=False)
self.fc2 = nn.Linear(1024, 1024, bias=False)
self.fc3 = nn.Linear(1024, 10, bias=False)
elif binary:
self.conv1 = binarized_modules.BinarizeConv2d(3, 128, kernel_size=3, stride=1, padding=0, bias=False)
self.conv2 = binarized_modules.BinarizeConv2d(128, 128, kernel_size=3, stride=1, padding=0, bias=False)
self.conv3 = binarized_modules.BinarizeConv2d(128, 256, kernel_size=3, stride=1, padding=0, bias=False)
self.conv4 = binarized_modules.BinarizeConv2d(256, 256, kernel_size=3, stride=1, padding=0, bias=False)
self.conv5 = binarized_modules.BinarizeConv2d(256, 512, kernel_size=3, stride=1, padding=0, bias=False)
self.conv6 = binarized_modules.BinarizeConv2d(512, 512, kernel_size=3, stride=1, padding=0, bias=False)
self.conv7 = binarized_modules.BinarizeConv2d(512, 1024, kernel_size=4, padding=0, bias=False)
self.fc1 = binarized_modules.BinarizeLinear(1024, 1024, bias=False)
self.fc2 = binarized_modules.BinarizeLinear(1024, 10, bias=False)
else:
self.conv1 = binarized_modules.BinarizeConv2d(3, 128, kernel_size=3, stride=1, padding=0, bias=False)
self.conv2 = binarized_modules.TernarizeConv2d(conv_thres, 128, 128, kernel_size=3, padding=0, bias=False, align=align)
self.conv3 = binarized_modules.TernarizeConv2d(conv_thres, 128, 256, kernel_size=3, padding=0, bias=False, align=align)
self.conv4 = binarized_modules.TernarizeConv2d(conv_thres, 256, 256, kernel_size=3, padding=0, bias=False, align=align)
self.conv5 = binarized_modules.TernarizeConv2d(conv_thres, 256, 512, kernel_size=3, padding=0, bias=False, align=align)
self.conv6 = binarized_modules.TernarizeConv2d(conv_thres, 512, 512, kernel_size=3, padding=0, bias=False, align=align)
self.conv7 = binarized_modules.TernarizeConv2d(fc_thres, 512, 1024, kernel_size=4, padding=0, bias=False, align=align)
self.fc1 = binarized_modules.TernarizeLinear(fc_thres, 1024, 1024, bias=False, align=align)
self.fc2 = binarized_modules.TernarizeLinear(0.49, 1024, 10, bias=False)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bn2 = nn.BatchNorm2d(128)
self.htanh2 = nn.Hardtanh(inplace=True)
self.bn3 = nn.BatchNorm2d(256)
self.htanh3 = nn.Hardtanh(inplace=True)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bn4 = nn.BatchNorm2d(256)
self.htanh4 = nn.Hardtanh(inplace=True)
self.bn5 = nn.BatchNorm2d(512)
self.htanh5 = nn.Hardtanh(inplace=True)
self.pool6 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bn6 = nn.BatchNorm2d(512)
self.htanh6 = nn.Hardtanh(inplace=True)
self.bnfc1 = nn.BatchNorm1d(1024)
self.htanhfc1 = nn.Hardtanh(inplace=True)
self.bnfc2 = nn.BatchNorm1d(1024)
self.htanhfc2 = nn.Hardtanh(inplace=True)
self.bnfc3 = nn.BatchNorm1d(10, affine=True)
self.logsoftmax = nn.LogSoftmax(dim=1)
self.regime = {
0: {'optimizer': 'Adam', 'betas': (0.9, 0.999),'lr': 5e-3},
40: {'lr': 1e-3},
80: {'lr': 5e-4},
100: {'lr': 1e-4},
120: {'lr': 5e-5},
140: {'lr': 1e-5}
}
def forward(self, x):
if self.full:
x = F.relu(self.conv1(x))
x = self.bn1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = self.bn2(x)
x = F.relu(self.conv3(x))
x = self.bn3(x)
x = F.relu(self.conv4(x))
x = self.pool4(x)
x = self.bn4(x)
x = F.relu(self.conv5(x))
x = self.bn5(x)
x = F.relu(self.conv6(x))
x = self.pool6(x)
x = self.bn6(x)
x = x.view(-1, 512*4*4)
x = F.relu(self.fc1(x))
x = self.bnfc1(x)
x = F.relu(self.fc2(x))
x = self.bnfc2(x)
x = F.relu(self.fc3(x))
self.fc3_result = x.data.clone()
else:
x = F.pad(x, (1,1,1,1), value=self.pad)
x = self.conv1(x)
x = self.bn1(x)
x = self.htanh1(x)
x = F.pad(x, (1,1,1,1), value=self.pad)
if self.binary:
x = self.conv2(x)
else:
x = self.conv2(x, self.pruned)
x = self.pool2(x)
x = self.bn2(x)
x = self.htanh2(x)
x = F.pad(x, (1,1,1,1), value=self.pad)
if self.binary:
x = self.conv3(x)
else:
x = self.conv3(x, self.pruned)
x = self.bn3(x)
x = self.htanh3(x)
x = F.pad(x, (1,1,1,1), value=self.pad)
if self.binary:
x = self.conv4(x)
else:
x = self.conv4(x, self.pruned)
x = self.pool4(x)
x = self.bn4(x)
x = self.htanh4(x)
x = F.pad(x, (1,1,1,1), value=self.pad)
if self.binary:
x = self.conv5(x)
else:
x = self.conv5(x, self.pruned)
x = self.bn5(x)
x = self.htanh5(x)
x = F.pad(x, (1,1,1,1), value=self.pad)
if self.binary:
x = self.conv6(x)
else:
x = self.conv6(x, self.pruned)
x = self.pool6(x)
x = self.bn6(x)
x = self.htanh6(x)
if self.binary:
x = self.conv7(x)
else:
x = self.conv7(x, self.pruned)
x = x.view(-1, 1024)
x = self.bnfc1(x)
x = self.htanhfc1(x)
if self.binary:
x = self.fc1(x)
else:
x = self.fc1(x, self.pruned)
x = self.bnfc2(x)
x = self.htanhfc2(x)
if self.binary:
x = self.fc2(x)
else:
x = self.fc2(x, self.pruned)
x = self.bnfc3(x)
return self.logsoftmax(x)
class CNN_tiny(nn.Module):
def __init__(self, full=False, binary=True, conv_thres=0.7, fc_thres=0.9, align=False):
super(CNN_tiny, self).__init__()
self.pruned = False
self.full = full
self.binary = binary
if full:
self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=0, bias=False)
self.conv2 = nn.Conv2d(32, 32, kernel_size=5, stride=1, padding=0, bias=False)
self.fc1 = nn.Linear(32*4*4, 10, bias=False)
elif binary:
self.conv1 = binarized_modules.BinarizeConv2d(1, 32, kernel_size=5, stride=1, padding=0, bias=False)
self.conv2 = binarized_modules.BinarizeConv2d(32, 32, kernel_size=5, stride=1, padding=0, bias=False)
self.fc1 = binarized_modules.BinarizeConv2d(32, 10, kernel_size=4, padding=0, bias=False)
else:
self.conv1 = binarized_modules.BinarizeConv2d(1, 32, kernel_size=5, stride=1, padding=0, bias=False)
self.conv2 = binarized_modules.TernarizeConv2d(conv_thres, 32, 32, kernel_size=5, stride=1, padding=0, bias=False, align=align)
self.fc1 = binarized_modules.TernarizeConv2d(0.49, 32, 10, kernel_size=4, padding=0, bias=False, align=align)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bn1 = nn.BatchNorm2d(32)
self.htanh1 = nn.Hardtanh(inplace=True)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.htanh2 = nn.Hardtanh(inplace=True)
self.bnfc1 = nn.BatchNorm1d(10, affine=True)
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, x):
if self.full:
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = self.bn1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = self.bn2(x)
x = x.view(-1, 32*4*4)
x = F.relu(self.fc1(x))
else:
x = self.conv1(x)
x = self.pool1(x)
x = self.bn1(x)
x = self.htanh1(x)
if self.binary:
x = self.conv2(x)
else:
x = self.conv2(x, self.pruned)
x = self.pool2(x)
x = self.bn2(x)
x = self.htanh2(x)
if self.binary:
x = self.fc1(x)
else:
x = self.fc1(x, self.pruned)
x = x.view(-1, 10)
x = self.bnfc1(x)
return self.logsoftmax(x)
``` |
{
"source": "jianshitansuantong233/Riptide",
"score": 3
} |
#### File: anneal/models/squeezenet.py
```python
import tensorflow as tf
import tensorflow.keras.layers as nn
from riptide.anneal.anneal_funcs import *
from tensorflow.keras.regularizers import l2
class SqueezeNet(tf.keras.models.Model):
def __init__(self, classes=1000):
super(SqueezeNet, self).__init__()
self.classes = classes
l2_reg = 5e-6
self.resize = nn.Lambda(lambda image: tf.image.resize(image, [224, 224]))
self.c0 = nn.Conv2D(kernel_size=7, strides=2, filters=96, padding='same', activation='relu', kernel_regularizer=l2(l2_reg))
self.mp0 = nn.MaxPooling2D(pool_size=2)
self.b0 = nn.BatchNormalization()
self.p0 = PACT()
# Fire 1
self.f1c1 = SAWBConv2D(filters=32, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f1b1 = nn.BatchNormalization()
self.f1p1 = PACT()
self.f1c2 = SAWBConv2D(filters=64, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f1b2 = nn.BatchNormalization()
self.f1p2 = PACT()
self.f1c3 = SAWBConv2D(filters=64, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg))
self.f1b3 = nn.BatchNormalization()
self.f1p3 = PACT()
self.f1concat = nn.Concatenate(axis=-1)
# Fire 2
self.f2c1 = SAWBConv2D(filters=32, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f2b1 = nn.BatchNormalization()
self.f2p1 = PACT()
self.f2c2 = SAWBConv2D(filters=64, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f2b2 = nn.BatchNormalization()
self.f2p2 = PACT()
self.f2c3 = SAWBConv2D(filters=64, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg))
self.f2b3 = nn.BatchNormalization()
self.f2p3 = PACT()
self.f2concat = nn.Concatenate(axis=-1)
# Fire 3
self.f3c1 = SAWBConv2D(filters=32, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f3b1 = nn.BatchNormalization()
self.f3p1 = PACT()
self.f3c2 = SAWBConv2D(filters=128, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f3b2 = nn.BatchNormalization()
self.f3p2 = PACT()
self.f3c3 = SAWBConv2D(filters=128, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg))
self.f3b3 = nn.BatchNormalization()
self.f3p3 = PACT()
self.f3concat = nn.Concatenate(axis=-1)
self.mp3 = nn.MaxPooling2D(pool_size=2)
# Fire 4
self.f4c1 = SAWBConv2D(filters=32, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f4b1 = nn.BatchNormalization()
self.f4p1 = PACT()
self.f4c2 = SAWBConv2D(filters=128, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f4b2 = nn.BatchNormalization()
self.f4p2 = PACT()
self.f4c3 = SAWBConv2D(filters=128, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg))
self.f4b3 = nn.BatchNormalization()
self.f4p3 = PACT()
self.f4concat = nn.Concatenate(axis=-1)
# Fire 5
self.f5c1 = SAWBConv2D(filters=64, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f5b1 = nn.BatchNormalization()
self.f5p1 = PACT()
self.f5c2 = SAWBConv2D(filters=192, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f5b2 = nn.BatchNormalization()
self.f5p2 = PACT()
self.f5c3 = SAWBConv2D(filters=192, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg))
self.f5b3 = nn.BatchNormalization()
self.f5p3 = PACT()
self.f5concat = nn.Concatenate(axis=-1)
# Fire 6
self.f6c1 = SAWBConv2D(filters=64, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f6b1 = nn.BatchNormalization()
self.f6p1 = PACT()
self.f6c2 = SAWBConv2D(filters=192, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f6b2 = nn.BatchNormalization()
self.f6p2 = PACT()
self.f6c3 = SAWBConv2D(filters=192, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg))
self.f6b3 = nn.BatchNormalization()
self.f6p3 = PACT()
self.f6concat = nn.Concatenate(axis=-1)
# Fire 7
self.f7c1 = SAWBConv2D(filters=64, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f7b1 = nn.BatchNormalization()
self.f7p1 = PACT()
self.f7c2 = SAWBConv2D(filters=256, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f7b2 = nn.BatchNormalization()
self.f7p2 = PACT()
self.f7c3 = SAWBConv2D(filters=256, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg))
self.f7b3 = nn.BatchNormalization()
self.f7p3 = PACT()
self.f7concat = nn.Concatenate(axis=-1)
self.mp7 = nn.MaxPooling2D(pool_size=2)
# Fire 8
self.f8c1 = SAWBConv2D(filters=64, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f8b1 = nn.BatchNormalization()
self.f8p1 = PACT()
self.f8c2 = SAWBConv2D(filters=256, kernel_size=1, padding='same', kernel_regularizer=l2(l2_reg))
self.f8b2 = nn.BatchNormalization()
self.f8p2 = PACT()
self.f8c3 = SAWBConv2D(filters=256, kernel_size=3, padding='same', kernel_regularizer=l2(l2_reg))
self.f8b3 = nn.BatchNormalization()
self.f8p3 = PACT()
self.f8concat = nn.Concatenate(axis=-1)
# Output
self.avgpool = nn.GlobalAveragePooling2D()
self.classifier = nn.Dense(self.classes, activation='softmax')
def call(self, x, training=None):
y = self.resize(x)
y = self.c0(y)
y = self.mp0(y)
y = self.b0(y, training=training)
y = self.p0(y)
# Fire 1
y = self.f1c1(y)
y = self.f1b1(y, training=training)
y = self.f1p1(y)
y1x = self.f1c2(y)
y1x = self.f1b2(y1x, training=training)
y1x = self.f1p2(y1x)
y3x = self.f1c3(y)
y3x = self.f1b3(y3x, training=training)
y3x = self.f1p3(y3x)
y = self.f1concat([y1x, y3x])
# Fire 2
y = self.f2c1(y)
y = self.f2b1(y, training=training)
y = self.f2p1(y)
y1x = self.f2c2(y)
y1x = self.f2b2(y1x, training=training)
y1x = self.f2p2(y1x)
y3x = self.f2c3(y)
y3x = self.f2b3(y3x, training=training)
y3x = self.f2p3(y3x)
y = self.f2concat([y1x, y3x])
# Fire 3
y = self.f3c1(y)
y = self.f3b1(y, training=training)
y = self.f3p1(y)
y1x = self.f3c2(y)
y1x = self.f3b2(y1x, training=training)
y1x = self.f3p2(y1x)
y3x = self.f3c3(y)
y3x = self.f3b3(y3x, training=training)
y3x = self.f3p3(y3x)
y = self.f3concat([y1x, y3x])
y = self.mp3(y)
# Fire 4
y = self.f4c1(y)
y = self.f4b1(y, training=training)
y = self.f4p1(y)
y1x = self.f4c2(y)
y1x = self.f4b2(y1x, training=training)
y1x = self.f4p2(y1x)
y3x = self.f4c3(y)
y3x = self.f4b3(y3x, training=training)
y3x = self.f4p3(y3x)
y = self.f4concat([y1x, y3x])
# Fire 5
y = self.f5c1(y)
y = self.f5b1(y, training=training)
y = self.f5p1(y)
y1x = self.f5c2(y)
y1x = self.f5b2(y1x, training=training)
y1x = self.f5p2(y1x)
y3x = self.f5c3(y)
y3x = self.f5b3(y3x, training=training)
y3x = self.f5p3(y3x)
y = self.f5concat([y1x, y3x])
# Fire 6
y = self.f6c1(y)
y = self.f6b1(y, training=training)
y = self.f6p1(y)
y1x = self.f6c2(y)
y1x = self.f6b2(y1x, training=training)
y1x = self.f6p2(y1x)
y3x = self.f6c3(y)
y3x = self.f6b3(y3x, training=training)
y3x = self.f6p3(y3x)
y = self.f6concat([y1x, y3x])
# Fire 7
y = self.f7c1(y)
y = self.f7b1(y, training=training)
y = self.f7p1(y)
y1x = self.f7c2(y)
y1x = self.f7b2(y1x, training=training)
y1x = self.f7p2(y1x)
y3x = self.f7c3(y)
y3x = self.f7b3(y3x, training=training)
y3x = self.f7p3(y3x)
y = self.f7concat([y1x, y3x])
y = self.mp7(y)
# Fire 8
y = self.f8c1(y)
y = self.f8b1(y, training=training)
y = self.f8p1(y)
y1x = self.f8c2(y)
y1x = self.f8b2(y1x, training=training)
y1x = self.f8p2(y1x)
y3x = self.f8c3(y)
y3x = self.f8b3(y3x, training=training)
y3x = self.f8p3(y3x)
y = self.f8concat([y1x, y3x])
y = self.avgpool(y)
y = self.classifier(y)
tf.compat.v1.summary.histogram('output', y)
return y
```
#### File: riptide/binary/binary_funcs.py
```python
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from .bit_approximations import load_clusters, load_bits
def log2(x):
return tf.math.log(x) / tf.math.log(2.0)
@tf.custom_gradient
def AP2(x):
#x = tf.clip_by_value(x, 1e-7, 1.0)
# Positive ap2 might be fine
y = 2**(tf.round(log2(tf.abs(x))))
def grad_fn(dy):
return [dy]
return y, grad_fn
def get_numpy(sess, x):
if not isinstance(x, list):
x = [x]
with sess.as_default():
output = sess.run(x)
if len(output) == 1:
output = output[0]
return output
def compute_quantized_shiftnorm(variance,
mean,
epsilon,
previous_weights,
extra_scale,
bits,
rescale=True):
# Compute number of bits to shift.
std_factor = (1.0 / (extra_scale * tf.sqrt(variance + epsilon)))
with tf.name_scope('AP2'):
approximate_std = AP2(std_factor)
# Now determine number of bits needed, the sum of weight scale
# bits and shift norm scale bits.
weight_scale_ap2, _ = get_quantize_bits(previous_weights)
weight_scale_bits = -log2(weight_scale_ap2)
weight_scale_bits = tf.reshape(weight_scale_bits, [-1])
total_shift_bits = weight_scale_bits + bits
# Quantizing the mean is a little tricky, start by determining
# the quantization scale.
mean_scale = 1.0 + ((1.0 / (2.0**bits - 1.0)) *
(1.0 - (1.0 / 2.0**weight_scale_bits)))
# Now quantize each channel of mean appropriately.
with tf.name_scope('FPQ'):
quantized_means = FixedPointQuantize(mean, mean_scale,
total_shift_bits, rescale)
return approximate_std, quantized_means
def get_shiftnorm_ap2(layer, previous_weights, rescale=False):
mean = layer.weights[0].value()
extra_scale = layer.extra_scale
epsilon = layer.epsilon
variance = layer.weights[1].value()
bits = layer.bits
approximate_std, quantized_means = compute_quantized_shiftnorm(
variance, mean, epsilon, previous_weights, extra_scale, bits, rescale)
return approximate_std, quantized_means
def get_quantize_bits(x):
if len(x.shape) > 2:
mean = tf.reduce_mean(tf.abs(tf.reshape(x, [-1, x.shape[-1]])), axis=0)
else:
mean = tf.reduce_mean(tf.abs(x))
# Fix dimensions of mean
for i in range(len(x.shape) - 1):
mean = tf.expand_dims(mean, axis=0)
bits = tf.cast(x >= 0, tf.float32)
bits = (2 * bits) - 1
with tf.name_scope("AP2"):
approximate_mean = AP2(mean)
return approximate_mean, bits
# Fixed point quantize operator that supports per_channel scales and bitwidth.
# Assumes min and max value are both scale.
@tf.custom_gradient
def FixedPointQuantize(inputs, scale, bits, rescale):
# Start by clipping values between specified range.
y = tf.clip_by_value(inputs, -scale, scale)
# Determine floating point value of each bit.
bit_value = scale / (2.0**bits - 1.0)
# Quantize tensor.
y = y / bit_value
y = tf.round(y)
# Readjust to floating point if specified.
y = tf.cond(rescale, true_fn=lambda: y * bit_value, false_fn=lambda: y)
def grad_fn(dy):
grad_mask = tf.cast(tf.abs(inputs) <= scale, tf.float32)
dx = grad_mask * dy
return [dx, None, None, None]
return y, grad_fn
@tf.custom_gradient
def XQuantize(x):
mean, bits = get_quantize_bits(x)
y = mean * bits
def grad_fn(dy):
# Use a larger gradient cutoff to allow weights to grow if needed.
# This can effect the scales based on the means of kernels.
# Likely has no significant effect though.
gradient_cutoff = 10.0
grad_mask = tf.cast(tf.abs(x) <= gradient_cutoff, tf.float32)
# Allow weights to move off away from 1 if needed.
leaky_grad_mask = tf.cast(
tf.logical_or(
tf.logical_and(x > gradient_cutoff, dy > 0),
tf.logical_and(x < -gradient_cutoff, dy < 0)), tf.float32)
dx = grad_mask * dy + 0.1 * leaky_grad_mask * dy
return [dx]
return y, grad_fn
@tf.custom_gradient
def Quantize(x):
bits = tf.cast(x >= 0, tf.float32)
bits = (2 * bits) - 1
y = bits
def grad_fn(dy):
#grad_mask_greater = tf.cast(tf.abs(x) >= 1, tf.float32)
#grad_mask_lesser = tf.cast(tf.abs(x) <= 1, tf.float32)
# Let big values leak a little
#grad_mask = 0.1 * grad_mask_greater + grad_mask_lesser
grad_mask = tf.cast(tf.abs(x) <= 1, tf.float32)
dx = grad_mask * dy
return [dx]
return y, grad_fn
def get_HWGQ_bits(x, clusters):
# Computes HWG quantization and returns the integer binary value.
for i in range(len(x.shape)):
# need to reshape clusters properly.
clusters = tf.expand_dims(clusters, axis=0)
# Add new data axis for proper subtraction.
x = tf.expand_dims(x, axis=-1)
# Compute best fitting cluster for each value in data.
distance = tf.abs(x - clusters)
indices = tf.argmin(distance, axis=-1)
return indices
@tf.custom_gradient
def HWGQuantize(x, clusters):
indices = get_HWGQ_bits(x, clusters)
y = tf.gather(clusters, indices)
def grad_fn(dy):
max_cluster = tf.reduce_max(clusters)
min_cluster = tf.reduce_min(clusters)
grad_filter = tf.logical_and(min_cluster <= x, x <= max_cluster)
dx = dy * tf.cast(grad_filter, tf.float32)
return [dx, None]
return y, grad_fn
# Assumes input is clipped to [0, 1]
@tf.custom_gradient
def DQ(x, bits, bipolar):
# Use small adjustment to avoid rounding inconsistency.
# Adjust for bipolar if needed.
x = tf.cond(bipolar, lambda: (x + 1.0) / 2.0, lambda: x)
epsilon = 1e-5
# Round to nearest linear bin in [0, 1].
output = (1.0 /
(2.0**bits - 1.0)) * tf.round((2.0**bits - 1.0) * x + epsilon)
# Deconvert back to [-1, 1] if bipolar.
output = tf.cond(bipolar, lambda: (output - 0.5) * 2.0, lambda: output)
# Pass through gradient.
def grad_fn(dy):
return [dy, None, None]
return output, grad_fn
def DQuantize(x, bits, bipolar=False):
# Apply clipping in [0, 1] with associated gradient.
if bipolar:
x = tf.clip_by_value(x, -1, 1)
else:
x = tf.clip_by_value(x, 0, 1)
# Quantize linearlly.
return DQ(x, bits, bipolar)
def DQuantizeW(x, bits):
x = tf.tanh(x) / (2.0 * tf.reduce_max(tf.abs(tf.tanh(x)))) + 0.5
return (2. * DQuantize(x, bits)) - 1.0
def DQuantizeBits(x, bits, bipolar=False):
if bipolar:
x = tf.clip_by_value(x, -1, 1)
x = (x + 1.0) / 2.0
else:
x = tf.clip_by_value(x, 0, 1)
return tf.round(x * (2.0**bits - 1.0) + epsilon)
def DQuantizeBitsW(x, bits):
shifted_x = (tf.tanh(x) / (2.0 * tf.reduce_max(tf.abs(tf.tanh(x))))) + 0.5
return DQuantizeBits(shifted_x)
# Takes bit value x and converts it to floating point approximation.
def DBits2Value(x, bits):
return x / (2.0**bits - 1.0)
def DBits2ValueW(x, bits):
approx = DBits2Value(x, bits)
return 2.0 * (approx - 0.5)
```
#### File: riptide/binary/bit_approximations.py
```python
import numpy as np
import os
# Uses least squared approximation to compute the best true binary approximations
# for HWGQ binarization.
def get_binary_repr(value, bits):
output = np.zeros(shape=bits)
for bit in reversed(range(bits)):
bit_set = int(value / 2**bit) != 0
output[bit] = bit_set
if bit_set:
value -= 2**bit
output = np.flip(output)
return output
def approximate_bits(num_bits, values):
# Compose matrix A, the bits for each value, returns
# list with bias at index 0 then bit values.
A = []
for i in range(len(values)):
A.append(get_binary_repr(i, num_bits))
A = np.asarray(A)
output, _, _, _ = np.linalg.lstsq(A, values)
output = np.flip(output)
return output
def compute_approximate_clusters(bits):
num_bits = len(bits)
output = []
for i in range(2**num_bits):
bit_rep = get_binary_repr(i, num_bits)
bit_rep = np.flip(bit_rep)
val_sum = 0
for j in range(num_bits):
val_sum += bits[j] * bit_rep[j]
output.append(val_sum)
return np.asarray(output)
def load_clusters(bits, path="/root/Riptide/riptide/binary/HWGQ_clusters"):
file_path = os.path.join(path, "lstsq_clusters_%d_bit.npy" % bits)
return np.load(file_path).astype(np.float32)
def load_bits(bits, path="/root/Riptide/riptide/binary/HWGQ_clusters"):
file_path = os.path.join(path, "lstsq_bit_values_%d_bit.npy" % bits)
return np.load(file_path).astype(np.float32)
# Example computation
# bits = 4
# clusters = load_cluster(bits, binarizable=False).asnumpy()
# app_bits = approximate_bits(bits, clusters)
# compute_approximate_clusters(app_bits)
```
#### File: binary/models/q_resnetv1b.py
```python
import os
import tensorflow as tf
from .. import binary_layers as nn
#from tensorflow.keras.models import Sequential
from riptide.utils.sequential import forward
class BasicBlockV1b(tf.keras.Model):
"""ResNetV1b BasicBlockV1b
"""
expansion = 1
def __init__(self,
planes,
strides=1,
dilation=1,
downsample=None,
previous_dilation=1,
norm_layer=None,
norm_kwargs={},
data_format='channels_last',
**kwargs):
super(BasicBlockV1b, self).__init__()
self.conv1 = nn.Conv2D(
filters=planes,
kernel_size=3,
strides=strides,
padding='same',
dilation_rate=dilation,
use_bias=False,
data_format=data_format)
self.conv2 = nn.Conv2D(
filters=planes,
kernel_size=3,
strides=1,
padding='same',
dilation_rate=previous_dilation,
use_bias=False,
data_format=data_format)
self.downsample = downsample
self.strides = strides
def call(self, x):
residual = x
out = forward(x, self.conv1)
out = forward(out, self.conv2)
if self.downsample is not None:
residual = forward(x, self.downsample)
out = out + residual
return out
class BottleneckV1b(tf.keras.Model):
"""ResNetV1b BottleneckV1b
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(self,
planes,
strides=1,
dilation=1,
downsample=None,
previous_dilation=1,
norm_layer=None,
norm_kwargs={},
last_gamma=False,
data_format='channels_last',
**kwargs):
super(BottleneckV1b, self).__init__()
self.conv1 = nn.Conv2D(
filters=planes,
kernel_size=1,
use_bias=False,
data_format=data_format)
self.conv2 = nn.Conv2D(
filters=planes,
kernel_size=3,
strides=strides,
padding='same',
dilation_rate=dilation,
use_bias=False,
data_format=data_format)
self.conv3 = nn.Conv2D(
filters=planes * 4,
kernel_size=1,
use_bias=False,
data_format=data_format)
self.downsample = downsample
self.dilation = dilation
self.strides = strides
def call(self, x):
residual = x
out = forward(x, self.conv1)
out = forward(out, self.conv2)
out = forward(out, self.conv3)
if self.downsample is not None:
residual = forward(x, self.downsample)
out = out + residual
return out
class ResNetV1b(tf.keras.Model):
""" Pre-trained ResNetV1b Model, which preduces the strides of 8
featuremaps at conv5.
Parameters
----------
block : Block
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
classes : int, default 1000
Number of classification classes.
dilated : bool, default False
Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
last_gamma : bool, default False
Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero.
deep_stem : bool, default False
Whether to replace the 7x7 conv1 with 3 3x3 convolution layers.
avg_down : bool, default False
Whether to use average pooling for projection skip connection between stages/downsample.
final_drop : float, default 0.0
Dropout ratio before the final classification layer.
Reference:
- <NAME>, et al. "Deep residual learning for image recognition."
Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.
- <NAME>, and <NAME>. "Multi-scale context aggregation by dilated convolutions."
"""
# pylint: disable=unused-variable
def __init__(self,
block,
layers,
classes=1000,
data_format='channels_last',
dilated=False,
norm_layer=nn.BatchNormalization,
norm_kwargs={},
last_gamma=False,
deep_stem=False,
stem_width=32,
avg_down=False,
final_drop=0.0,
name_prefix='',
**kwargs):
self.inplanes = stem_width * 2 if deep_stem else 64
self.data_format = data_format
super(ResNetV1b, self).__init__(name=name_prefix)
self.norm_kwargs = norm_kwargs
with tf.name_scope(self.name):
if not deep_stem:
self.conv1 = nn.Conv2DBatchNorm(
filters=64,
kernel_size=7,
strides=2,
padding='same',
use_bias=False,
data_format=data_format)
else:
self.conv1 = ['conv1']
self.conv1.append(
nn.Conv2DBatchNorm(
filters=stem_width,
kernel_size=3,
strides=2,
padding='same',
use_bias=False,
data_format=data_format))
self.conv1.append(
nn.Conv2D(
filters=stem_width,
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
data_format=data_format))
self.conv1.append(
nn.Conv2D(
filters=stem_width * 2,
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
data_format=data_format))
self.maxpool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding='same',
data_format=data_format)
self.layer1 = self._make_layer(
1,
block,
64,
layers[0],
avg_down=avg_down,
norm_layer=norm_layer,
last_gamma=last_gamma,
data_format=data_format)
self.layer2 = self._make_layer(
2,
block,
128,
layers[1],
strides=2,
avg_down=avg_down,
norm_layer=norm_layer,
last_gamma=last_gamma,
data_format=data_format)
if dilated:
self.layer3 = self._make_layer(
3,
block,
256,
layers[2],
strides=1,
dilation=2,
avg_down=avg_down,
norm_layer=norm_layer,
last_gamma=last_gamma,
data_format=data_format)
self.layer4 = self._make_layer(
4,
block,
512,
layers[3],
strides=1,
dilation=4,
avg_down=avg_down,
norm_layer=norm_layer,
last_gamma=last_gamma,
data_format=data_format)
else:
self.layer3 = self._make_layer(
3,
block,
256,
layers[2],
strides=2,
avg_down=avg_down,
norm_layer=norm_layer,
last_gamma=last_gamma,
data_format=data_format)
self.layer4 = self._make_layer(
4,
block,
512,
layers[3],
strides=2,
avg_down=avg_down,
norm_layer=norm_layer,
last_gamma=last_gamma,
data_format=data_format)
self.avgpool = nn.GlobalAveragePooling2D(data_format=data_format)
self.flat = nn.Flatten()
self.drop = None
if final_drop > 0.0:
self.drop = nn.Dropout(final_drop)
self.fc = []
self.fc.append(nn.Dense(units=classes, use_bias=False))
self.fc.append(nn.Scalu())
def _make_layer(self,
stage_index,
block,
planes,
blocks,
strides=1,
dilation=1,
avg_down=False,
norm_layer=None,
last_gamma=False,
data_format='channels_last'):
downsample = None
if strides != 1 or self.inplanes != planes * block.expansion:
downsample = ['down%d' % stage_index]
if avg_down:
if dilation == 1:
downsample.append(
nn.AveragePooling2D(
pool_size=strides,
strides=strides,
padding='same',
data_format=data_format))
else:
downsample.append(
nn.AveragePooling2D(
pool_size=1,
strides=1,
padding='same',
data_format=data_format))
downsample.append(
nn.Conv2D(
filters=planes * block.expansion,
kernel_size=1,
strides=1,
use_bias=False,
data_format=data_format))
else:
downsample.append(
nn.Conv2D(
filters=planes * block.expansion,
kernel_size=1,
strides=strides,
use_bias=False,
data_format=data_format))
layers = ['layers%d' % stage_index]
if dilation in (1, 2):
layers.append(
block(
planes,
strides,
dilation=1,
downsample=downsample,
previous_dilation=dilation,
norm_layer=norm_layer,
norm_kwargs=self.norm_kwargs,
last_gamma=last_gamma,
data_format=data_format))
elif dilation == 4:
layers.append(
block(
planes,
strides,
dilation=2,
downsample=downsample,
previous_dilation=dilation,
norm_layer=norm_layer,
norm_kwargs=self.norm_kwargs,
last_gamma=last_gamma,
data_format=data_format))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
planes,
dilation=dilation,
previous_dilation=dilation,
norm_layer=norm_layer,
norm_kwargs=self.norm_kwargs,
last_gamma=last_gamma,
data_format=data_format))
return layers
def call(self, x):
if self.data_format == 'channels_first':
x = tf.transpose(x, [0, 3, 1, 2])
x = forward(x, self.conv1)
x = forward(x, self.maxpool)
x = forward(x, self.layer1)
x = forward(x, self.layer2)
x = forward(x, self.layer3)
x = forward(x, self.layer4)
x = forward(x, self.avgpool)
x = forward(x, self.flat)
if self.drop is not None:
x = forward(x, self.drop)
x = forward(x, self.fc)
return x
def resnet18_v1b(**kwargs):
"""Constructs a ResNetV1b-18 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
last_gamma : bool, default False
Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero.
"""
model = ResNetV1b(
BasicBlockV1b, [2, 2, 2, 2], name_prefix='resnetv1b', **kwargs)
return model
def resnet34_v1b(**kwargs):
"""Constructs a ResNetV1b-34 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`;
last_gamma : bool, default False
Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero.
"""
model = ResNetV1b(
BasicBlockV1b, [3, 4, 6, 3], name_prefix='resnetv1b', **kwargs)
return model
def resnet50_v1b(**kwargs):
"""Constructs a ResNetV1b-50 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`;
last_gamma : bool, default False
Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero.
"""
model = ResNetV1b(
BottleneckV1b, [3, 4, 6, 3], name_prefix='resnetv1b', **kwargs)
return model
def resnet101_v1b(**kwargs):
"""Constructs a ResNetV1b-101 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`;
last_gamma : bool, default False
Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero.
"""
model = ResNetV1b(
BottleneckV1b, [3, 4, 23, 3], name_prefix='resnetv1b', **kwargs)
return model
def resnet152_v1b(**kwargs):
"""Constructs a ResNetV1b-152 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`;
last_gamma : bool, default False
Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero.
"""
model = ResNetV1b(
BottleneckV1b, [3, 8, 36, 3], name_prefix='resnetv1b', **kwargs)
return model
def resnet50_v1c(**kwargs):
"""Constructs a ResNetV1c-50 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`;
"""
model = ResNetV1b(
BottleneckV1b, [3, 4, 6, 3],
deep_stem=True,
name_prefix='resnetv1c_',
**kwargs)
return model
def resnet101_v1c(**kwargs):
"""Constructs a ResNetV1c-101 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`;
"""
model = ResNetV1b(
BottleneckV1b, [3, 4, 23, 3],
deep_stem=True,
name_prefix='resnetv1c_',
**kwargs)
return model
def resnet152_v1c(**kwargs):
"""Constructs a ResNetV1b-152 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`;
"""
model = ResNetV1b(
BottleneckV1b, [3, 8, 36, 3],
deep_stem=True,
name_prefix='resnetv1c_',
**kwargs)
return model
def resnet50_v1d(**kwargs):
"""Constructs a ResNetV1d-50 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`;
"""
model = ResNetV1b(
BottleneckV1b, [3, 4, 6, 3],
deep_stem=True,
avg_down=True,
name_prefix='resnetv1d_',
**kwargs)
return model
def resnet101_v1d(**kwargs):
"""Constructs a ResNetV1d-50 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`;
"""
model = ResNetV1b(
BottleneckV1b, [3, 4, 23, 3],
deep_stem=True,
avg_down=True,
name_prefix='resnetv1d_',
**kwargs)
return model
def resnet152_v1d(**kwargs):
"""Constructs a ResNetV1d-50 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`;
"""
model = ResNetV1b(
BottleneckV1b, [3, 8, 36, 3],
deep_stem=True,
avg_down=True,
name_prefix='resnetv1d_',
**kwargs)
return model
def resnet50_v1e(**kwargs):
"""Constructs a ResNetV1e-50 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`;
"""
model = ResNetV1b(
BottleneckV1b, [3, 4, 6, 3],
deep_stem=True,
avg_down=True,
stem_width=64,
name_prefix='resnetv1e_',
**kwargs)
return model
def resnet101_v1e(**kwargs):
"""Constructs a ResNetV1e-50 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`;
"""
model = ResNetV1b(
BottleneckV1b, [3, 4, 23, 3],
deep_stem=True,
avg_down=True,
stem_width=64,
name_prefix='resnetv1e_',
**kwargs)
return model
def resnet152_v1e(**kwargs):
"""Constructs a ResNetV1e-50 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`;
"""
model = ResNetV1b(
BottleneckV1b, [3, 8, 36, 3],
deep_stem=True,
avg_down=True,
stem_width=64,
name_prefix='resnetv1e_',
**kwargs)
return model
def resnet50_v1s(**kwargs):
"""Constructs a ResNetV1s-50 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`;
"""
model = ResNetV1b(
BottleneckV1b, [3, 4, 6, 3],
deep_stem=True,
stem_width=64,
name_prefix='resnetv1s_',
**kwargs)
return model
def resnet101_v1s(**kwargs):
"""Constructs a ResNetV1s-101 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`;
"""
model = ResNetV1b(
BottleneckV1b, [3, 4, 23, 3],
deep_stem=True,
stem_width=64,
name_prefix='resnetv1s_',
**kwargs)
return model
def resnet152_v1s(**kwargs):
"""Constructs a ResNetV1s-152 model.
Parameters
----------
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.norm_layer`;
"""
model = ResNetV1b(
BottleneckV1b, [3, 8, 36, 3],
deep_stem=True,
stem_width=64,
name_prefix='resnetv1s_',
**kwargs)
return model
```
#### File: riptide/models/cifarnet.py
```python
import os
import tensorflow as tf
from riptide.binary import binary_layers as nn
#from tensorflow.keras.models import Sequential
from riptide.utils.sequential import forward_layer_list
class CifarNet(tf.keras.Model):
def __init__(self):
super(CifarNet, self).__init__()
self.conv1 = nn.NormalConv2D(
filters=32,
kernel_size=3,
strides=2,
padding='same',
activation='relu',
use_bias=False)
self.bn1 = nn.BatchNormalization()
self.conv2 = nn.Conv2D(
filters=16,
kernel_size=1,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn2 = nn.BatchNormalization()
self.conv3 = nn.Conv2D(
filters=64,
kernel_size=3,
strides=2,
padding='same',
activation='relu',
use_bias=False)
self.bn3 = nn.BatchNormalization()
self.conv4 = nn.Conv2D(
filters=32,
kernel_size=1,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn4 = nn.BatchNormalization()
self.conv5 = nn.Conv2D(
filters=128,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn5 = nn.BatchNormalization()
self.conv6 = nn.Conv2D(
filters=64,
kernel_size=1,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn6 = nn.BatchNormalization()
self.conv7 = nn.Conv2D(
filters=32,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn7 = nn.BatchNormalization()
self.global_pooling = nn.GlobalAveragePooling2D()
self.dense = nn.NormalDense(10, use_bias=False)
def call(self, inputs, training=None):
with tf.name_scope('normal'):
x = self.conv1(inputs)
self.conv1.kernel)
x = self.bn1(x, training=training)
x = self.conv2(x)
x = self.bn2(x, training=training)
x = self.conv3(x)
x = self.bn3(x, training=training)
x = self.conv4(x)
x = self.bn4(x, training=training)
x = self.conv5(x)
x = self.bn5(x, training=training)
x = self.conv6(x)
x = self.bn6(x, training=training)
x = self.conv7(x)
x = self.bn7(x)
x = self.global_pooling(x)
with tf.name_scope('normal'):
x = self.dense(x)
return x
```
#### File: riptide/models/darknet.py
```python
import os
import tensorflow as tf
from riptide.binary import binary_layers as nn
#from tensorflow.keras.models import Sequential
from riptide.utils.sequential import forward_layer_list
class DarkNet(tf.keras.Model):
def __init__(self):
super(DarkNet, self).__init__()
self.conv1 = nn.NormalConv2D(
filters=16,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn1 = nn.BatchNormalization()
self.mxp1 = nn.MaxPool2D(pool_size=2, strides=2)
self.conv2 = nn.Conv2D(
filters=32,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn2 = nn.BatchNormalization()
self.mxp2 = nn.MaxPool2D(pool_size=2, strides=2)
self.conv3 = nn.Conv2D(
filters=64,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn3 = nn.BatchNormalization()
self.mxp3 = nn.MaxPool2D(pool_size=2, strides=2)
self.conv4 = nn.Conv2D(
filters=128,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn4 = nn.BatchNormalization()
self.mxp4 = nn.MaxPool2D(pool_size=2, strides=2)
self.conv5 = nn.Conv2D(
filters=256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn5 = nn.BatchNormalization()
self.mxp5 = nn.MaxPool2D(pool_size=2, strides=2)
self.conv6 = nn.Conv2D(
filters=512,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn6 = nn.BatchNormalization()
self.mxp6 = nn.MaxPool2D(pool_size=2, strides=2)
self.conv7 = nn.Conv2D(
filters=1024,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn7 = nn.BatchNormalization()
self.avgpool = nn.GlobalAveragePooling2D()
self.output_layer = nn.NormalDense(1000, use_bias=False)
def call(self, inputs, training=None):
with tf.name_scope('unbinarized'):
x = self.conv1(inputs)
x = self.bn1(x, training=training)
x = self.mxp1(x)
x = self.conv2(x)
x = self.bn2(x, training=training)
x = self.mxp2(x)
x = self.conv3(x)
x = self.bn3(x, training=training)
x = self.mxp3(x)
x = self.conv4(x)
x = self.bn4(x, training=training)
x = self.mxp4(x)
x = self.conv5(x)
x = self.bn5(x, training=training)
x = self.mxp5(x)
x = self.conv6(x)
x = self.bn6(x, training=training)
x = self.mxp6(x)
x = self.conv7(x)
x = self.bn7(x, training=training)
x = self.avgpool(x)
with tf.name_scope('unbinarized'):
x = self.output_layer(x)
return x
```
#### File: riptide/models/squeezenet_normal.py
```python
import tensorflow as tf
import tensorflow.keras.layers as nn
bnmomemtum=0.9
class SqueezeNet(tf.keras.Model):
def __init__(self, classes=1000):
super(SqueezeNet, self).__init__()
self.classes = classes
self.c0 = tf.keras.layers.Conv2D(kernel_size=7, strides=2, filters=96, padding='same', use_bias=True, activation='relu')
self.b0 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.mp0 = tf.keras.layers.MaxPooling2D(pool_size=2)
# Fire 1
self.f1c1 = tf.keras.layers.Conv2D(filters=32, kernel_size=1, activation='relu', padding='same')
self.f1b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f1c2 = tf.keras.layers.Conv2D(filters=64, kernel_size=1, activation='relu', padding='same')
self.f1b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f1c3 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation='relu', padding='same')
self.f1b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f1concat = tf.keras.layers.Concatenate(axis=-1)
# Fire 2
self.f2c1 = tf.keras.layers.Conv2D(filters=32, kernel_size=1, activation='relu', padding='same')
self.f2b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f2c2 = tf.keras.layers.Conv2D(filters=64, kernel_size=1, activation='relu', padding='same')
self.f2b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f2c3 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation='relu', padding='same')
self.f2b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f2concat = tf.keras.layers.Concatenate(axis=-1)
# Fire 3
self.f3c1 = tf.keras.layers.Conv2D(filters=32, kernel_size=1, activation='relu', padding='same')
self.f3b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f3c2 = tf.keras.layers.Conv2D(filters=128, kernel_size=1, activation='relu', padding='same')
self.f3b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f3c3 = tf.keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same')
self.f3b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f3concat = tf.keras.layers.Concatenate(axis=-1)
self.mp3 = tf.keras.layers.MaxPooling2D(pool_size=2)
# Fire 4
self.f4c1 = tf.keras.layers.Conv2D(filters=32, kernel_size=1, activation='relu', padding='same')
self.f4b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f4c2 = tf.keras.layers.Conv2D(filters=128, kernel_size=1, activation='relu', padding='same')
self.f4b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f4c3 = tf.keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same')
self.f4b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f4concat = tf.keras.layers.Concatenate(axis=-1)
# Fire 5
self.f5c1 = tf.keras.layers.Conv2D(filters=64, kernel_size=1, activation='relu', padding='same')
self.f5b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f5c2 = tf.keras.layers.Conv2D(filters=192, kernel_size=1, activation='relu', padding='same')
self.f5b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f5c3 = tf.keras.layers.Conv2D(filters=192, kernel_size=3, activation='relu', padding='same')
self.f5b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f5concat = tf.keras.layers.Concatenate(axis=-1)
# Fire 6
self.f6c1 = tf.keras.layers.Conv2D(filters=64, kernel_size=1, activation='relu', padding='same')
self.f6b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f6c2 = tf.keras.layers.Conv2D(filters=192, kernel_size=1, activation='relu', padding='same')
self.f6b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f6c3 = tf.keras.layers.Conv2D(filters=192, kernel_size=3, activation='relu', padding='same')
self.f6b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f6concat = tf.keras.layers.Concatenate(axis=-1)
# Fire 7
self.f7c1 = tf.keras.layers.Conv2D(filters=64, kernel_size=1, activation='relu', padding='same')
self.f7b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f7c2 = tf.keras.layers.Conv2D(filters=256, kernel_size=1, activation='relu', padding='same')
self.f7b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f7c3 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, activation='relu', padding='same')
self.f7b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f7concat = tf.keras.layers.Concatenate(axis=-1)
self.mp7 = tf.keras.layers.MaxPooling2D(pool_size=2)
# Fire 8
self.f8c1 = tf.keras.layers.Conv2D(filters=64, kernel_size=1, activation='relu', padding='same')
self.f8b1 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f8c2 = tf.keras.layers.Conv2D(filters=256, kernel_size=1, activation='relu', padding='same')
self.f8b2 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f8c3 = tf.keras.layers.Conv2D(filters=256, kernel_size=3, activation='relu', padding='same')
self.f8b3 = tf.keras.layers.BatchNormalization(momentum=bnmomemtum)
self.f8concat = tf.keras.layers.Concatenate(axis=-1)
# Output
self.avgpool = tf.keras.layers.GlobalAveragePooling2D()
self.classifier = tf.keras.layers.Dense(1000, activation='softmax')
def call(self, x, training=None):
y = self.c0(x)
y = self.b0(y, training=training)
y = self.mp0(y)
# Fire 1
y = self.f1c1(y)
y = self.f1b1(y, training=training)
y1x = self.f1c2(y)
y1x = self.f1b2(y1x, training=training)
y3x = self.f1c3(y)
y3x = self.f1b3(y3x, training=training)
y = self.f1concat([y1x, y3x])
# Fire 2
y = self.f2c1(y)
y = self.f2b1(y, training=training)
y1x = self.f2c2(y)
y1x = self.f2b2(y1x, training=training)
y3x = self.f2c3(y)
y3x = self.f2b3(y3x, training=training)
y = self.f2concat([y1x, y3x])
# Fire 3
y = self.f3c1(y)
y = self.f3b1(y, training=training)
y1x = self.f3c2(y)
y1x = self.f3b2(y1x, training=training)
y3x = self.f3c3(y)
y3x = self.f3b3(y3x, training=training)
y = self.f3concat([y1x, y3x])
y = self.mp3(y)
# Fire 4
y = self.f4c1(y)
y = self.f4b1(y, training=training)
y1x = self.f4c2(y)
y1x = self.f4b2(y1x, training=training)
y3x = self.f4c3(y)
y3x = self.f4b3(y3x, training=training)
y = self.f4concat([y1x, y3x])
# Fire 5
y = self.f5c1(y)
y = self.f5b1(y, training=training)
y1x = self.f5c2(y)
y1x = self.f5b2(y1x, training=training)
y3x = self.f5c3(y)
y3x = self.f5b3(y3x, training=training)
y = self.f5concat([y1x, y3x])
# Fire 6
y = self.f6c1(y)
y = self.f6b1(y, training=training)
y1x = self.f6c2(y)
y1x = self.f6b2(y1x, training=training)
y3x = self.f6c3(y)
y3x = self.f6b3(y3x, training=training)
y = self.f6concat([y1x, y3x])
# Fire 7
y = self.f7c1(y)
y = self.f7b1(y, training=training)
y1x = self.f7c2(y)
y1x = self.f7b2(y1x, training=training)
y3x = self.f7c3(y)
y3x = self.f7b3(y3x, training=training)
y = self.f7concat([y1x, y3x])
y = self.mp7(y)
# Fire 8
y = self.f8c1(y)
y = self.f8b1(y, training=training)
y1x = self.f8c2(y)
y1x = self.f8b2(y1x, training=training)
y3x = self.f8c3(y)
y3x = self.f8b3(y3x, training=training)
y = self.f8concat([y1x, y3x])
y = self.avgpool(y)
y = self.classifier(y)
return y
```
#### File: riptide/models/vgg11.py
```python
import os
import tensorflow as tf
from riptide.binary import binary_layers as nn
#from tensorflow.keras.models import Sequential
from riptide.utils.sequential import forward_layer_list
class vgg11(tf.keras.Model):
def __init__(self, classes=1000):
super(vgg11, self).__init__()
# Set up configurable maxpool or stride dimension reduction.
self.scope = nn.Config.current
use_maxpool = self.scope.use_maxpool
if use_maxpool:
reduce_stride = 1
else:
reduce_stride = 2
self.conv1 = nn.NormalConv2D(
filters=64,
kernel_size=3,
strides=1,
padding='same',
activation='relu')
self.bn1 = nn.NormalBatchNormalization()
self.pool1 = nn.NormalMaxPool2D(pool_size=2, strides=2)
self.quantize = nn.EnterInteger(1.0)
self.conv2 = nn.BinaryConv2D(
filters=128,
kernel_size=3,
strides=reduce_stride,
padding='same',
activation='relu',
use_bias=False)
self.bn2 = nn.BatchNormalization()
self.pool2 = nn.MaxPool2D(pool_size=2, strides=2)
self.conv3 = nn.BinaryConv2D(
filters=256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn3 = nn.BatchNormalization()
self.conv4 = nn.BinaryConv2D(
filters=256,
kernel_size=3,
strides=reduce_stride,
padding='same',
activation='relu',
use_bias=False)
self.bn4 = nn.BatchNormalization()
self.pool3 = nn.MaxPool2D(pool_size=2, strides=2)
self.conv5 = nn.BinaryConv2D(
filters=512,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn5 = nn.BatchNormalization()
self.conv6 = nn.BinaryConv2D(
filters=512,
kernel_size=3,
strides=reduce_stride,
padding='same',
activation='relu',
use_bias=False)
self.bn6 = nn.BatchNormalization()
self.pool4 = nn.MaxPool2D(pool_size=2, strides=2)
self.conv7 = nn.BinaryConv2D(
filters=512,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
use_bias=False)
self.bn7 = nn.BatchNormalization()
self.conv8 = nn.BinaryConv2D(
filters=512,
kernel_size=3,
strides=reduce_stride,
padding='same',
activation='relu',
use_bias=False)
self.bn8 = nn.BatchNormalization()
self.pool5 = nn.MaxPool2D(pool_size=2, strides=2)
self.avgpool = nn.GlobalAveragePooling2D()
self.classifier = nn.BinaryDense(classes, use_bias=False)
self.scalu = nn.Scalu()
self.softmax = nn.Activation('softmax')
def call(self, inputs, training=None, debug=False):
layers = []
with tf.name_scope('unbinarized'):
x = self.conv1(inputs)
layers.append(x)
x = self.bn1(x, training=training)
layers.append(x)
x = self.pool1(x)
layers.append(x)
# When running in binary, need to reduce spread of normal distribution
x = self.quantize(x)
layers.append(x)
# Continue with binary layers.
x = self.conv2(x)
layers.append(x)
x = self.bn2(
x, conv_weights=self.conv2.weights[0].value(), training=training)
layers.append(x)
x = self.pool2(x)
if self.scope.use_maxpool:
layers.append(x)
x = self.conv3(x)
layers.append(x)
x = self.bn3(
x, conv_weights=self.conv3.weights[0].value(), training=training)
layers.append(x)
x = self.conv4(x)
layers.append(x)
x = self.bn4(
x, conv_weights=self.conv4.weights[0].value(), training=training)
layers.append(x)
x = self.pool3(x)
if self.scope.use_maxpool:
layers.append(x)
x = self.conv5(x)
layers.append(x)
x = self.bn5(
x, conv_weights=self.conv5.weights[0].value(), training=training)
layers.append(x)
x = self.conv6(x)
layers.append(x)
x = self.bn6(
x, conv_weights=self.conv6.weights[0].value(), training=training)
layers.append(x)
x = self.pool4(x)
if self.scope.use_maxpool:
layers.append(x)
x = self.conv7(x)
layers.append(x)
x = self.bn7(
x, conv_weights=self.conv7.weights[0].value(), training=training)
layers.append(x)
x = self.conv8(x)
layers.append(x)
x = self.bn8(
x, conv_weights=self.conv8.weights[0].value(), training=training)
layers.append(x)
x = self.pool5(x)
if self.scope.use_maxpool:
layers.append(x)
x = self.avgpool(x)
layers.append(x)
#with tf.name_scope('unbinarized'):
x = self.classifier(x)
layers.append(x)
x = self.scalu(x)
layers.append(x)
x = self.softmax(x)
layers.append(x)
if debug:
return layers
else:
return x
```
#### File: riptide/testing/test_correctness.py
```python
import numpy as np
from riptide.binary.binary_layers import Config
from riptide.binary.float_to_binary import convert_model
from riptide.binary.binary_funcs import DQuantize, XQuantize, get_numpy
from end2end import verify_nnvm_vgg
from end2end.verify_nnvm_vgg import *
class CorrectnessTest(tf.test.TestCase):
def test_model(self):
# Verify script sets up model so just import from there.
# Run test input through network and get each layers output.
with graph.as_default():
test_input = np.ones(shape=[1, 224, 224, 3], dtype=np.float32)
test_tensor = tf.convert_to_tensor(test_input)
layers = model(test_tensor, training=False, debug=True)
# Convert layers to integer representation for comparison to
# fast implementation.
converted_layers = convert_model(model, layers)
# Check each layer versus the fast implementation TODO
for i, layer in enumerate(converted_layers):
if model.layers[i].name == 'conv2d':
nnvm_output = verify_nnvm_vgg.run(
test_input, stop_layer=model.layers[i].name)
layer_np = get_numpy(sess, layer)
correct = np.allclose(layer_np, nnvm_output, rtol=1e-3)
if not correct:
print("Mismatch on layer %d: %s" %
(i, model.layers[i].name))
elif 'shift_normalization' in model.layers[i].name:
nnvm_output = verify_nnvm_vgg.run(
test_input, stop_layer=model.layers[i].name)
layer_np = get_numpy(sess, converted_layers[i])
correct = np.allclose(layer_np, nnvm_output, rtol=1e-3)
if not correct:
print("Mismatch on layer %d: %s" %
(i, model.layers[i].name))
if __name__ == '__main__':
tf.test.main()
```
#### File: riptide/utils/learning_rate.py
```python
import tensorflow as tf
def learning_rate_with_smooth_decay(batch_size,
batch_denom,
decay_epochs,
decay_rate,
base_lr=0.1,
warmup=False,
staircase=False,
warmup_epochs=5,
num_images=1281167):
""" Get a learning rate the smoothly decays as training progresses.
Args:
batch_size: Number of samples processed per batch.
batch_denom: Base batch_size, used to scale down learning rate for smaller batches or scale up for large batches.
decay_epochs: Number of epochs to decay the learning rate by a factor of decay_rate.
decay_rate: Amount to decay learning rate each decay_epochs.
base_lr: Starting learning rate.
warmup: Run a 5 epoch warmup to the initial lr.
staircase: If True, learning decay is not smooth.
warmup_epochs: Number of epochs to increase the lr to the base_lr.
num_images: Number of images in the dataset.
"""
initial_learning_rate = base_lr * batch_size / batch_denom
steps_per_epoch = num_images / batch_size
def learning_rate_fn(global_step):
if warmup:
warmup_steps = int(steps_per_epoch * warmup_epochs)
start_step = global_step - warmup_steps
else:
start_step = global_step
lr = tf.train.exponential_decay(
initial_learning_rate,
start_step,
steps_per_epoch * decay_epochs,
decay_rate,
staircase=staircase)
if warmup:
warmup_lr = (initial_learning_rate * tf.cast(
global_step, tf.float32) / tf.cast(warmup_steps, tf.float32))
return tf.cond(global_step < warmup_steps, lambda: warmup_lr,
lambda: lr)
return lr
return learning_rate_fn
``` |
{
"source": "jianshu93/parasail-sys-arm",
"score": 2
} |
#### File: parasail_c/util/codegen.py
```python
import copy
import os
import re
import sys
from isa import sse2
from isa import sse41
from isa import avx2
from isa import altivec
from isa import neon
keys = list(sse2.keys())
# gather templates
template_dir = "templates/"
template_filenames = [
"nw_diag.c",
"nw_scan.c",
"nw_striped.c",
"sg_diag.c",
"sg_scan.c",
"sg_striped.c",
"sw_diag.c",
"sw_scan.c",
"sw_striped.c",
"nw_stats_diag.c",
"nw_stats_scan.c",
"nw_stats_striped.c",
"sg_stats_diag.c",
"sg_stats_scan.c",
"sg_stats_striped.c",
"sw_stats_diag.c",
"sw_stats_scan.c",
"sw_stats_striped.c",
"nw_trace_diag.c",
"nw_trace_scan.c",
"nw_trace_striped.c",
"sg_trace_diag.c",
"sg_trace_scan.c",
"sg_trace_striped.c",
"sw_trace_diag.c",
"sw_trace_scan.c",
"sw_trace_striped.c",
]
special_templates = [
#"sg_diag_8.c",
"sw_diag_8.c",
"sw_stats_diag_8.c",
#"sg_trace_diag_8.c",
"sw_trace_diag_8.c",
]
bias_templates = [
"sw_striped_bias.c",
"sw_stats_striped_bias.c",
]
output_dir = "generated/"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
def generate_H(params):
text = ""
if "striped" in params["NAME"]:
params["PVH_VAR"] = "pvHStore"
else:
params["PVH_VAR"] = "pvH"
params["SG_TMP"] = "-open-gap*(segNum*segLen+i)"
if "sg" in params["NAME"]:
params["SG_TMP"] = "s1_beg ? 0 : (-open-gap*(segNum*segLen+i))"
if "neon" in params["ISA"]:
text = """ /* initialize H */
{
%(INDEX)s index = 0;
for (i=0; i<segLen; ++i) {
%(INDEX)s segNum = 0;
%(VTYPE)s h;
for (segNum=0; segNum<segWidth; ++segNum) {
int64_t tmp = %(SG_TMP)s;
h.i%(WIDTH)s[segNum] = tmp < INT%(WIDTH)s_MIN ? INT%(WIDTH)s_MIN : tmp;
}
%(VSTORE)s(&%(PVH_VAR)s[index], h);
++index;
}
}""" % params
else:
text = """ /* initialize H */
{
%(INDEX)s index = 0;
for (i=0; i<segLen; ++i) {
%(INDEX)s segNum = 0;
%(VTYPE)s_%(WIDTH)s_t h;
for (segNum=0; segNum<segWidth; ++segNum) {
int64_t tmp = %(SG_TMP)s;
h.v[segNum] = tmp < INT%(WIDTH)s_MIN ? INT%(WIDTH)s_MIN : tmp;
}
%(VSTORE)s(&%(PVH_VAR)s[index], h.m);
++index;
}
}""" % params
params["INIT_H"] = text
return params
def generate_H_and_E(params):
text = ""
if "striped" in params["NAME"]:
params["PVH_VAR"] = "pvHStore"
else:
params["PVH_VAR"] = "pvH"
params["PVEA_STORE"] = ""
params["SG_TMP"] = "-open-gap*(segNum*segLen+i)"
if "sg" in params["NAME"]:
params["SG_TMP"] = "s1_beg ? 0 : (-open-gap*(segNum*segLen+i))"
if "striped" in params["NAME"] and "trace" in params["NAME"]:
if "neon" in params["ISA"]:
params["E_M_VAR"] = "e"
else:
params["E_M_VAR"] = "e.m"
params["PVEA_STORE"] = """
%(VSTORE)s(&pvEaStore[index], %(E_M_VAR)s);""" % params
if "neon" in params["ISA"]:
text = """ /* initialize H and E */
{
%(INDEX)s index = 0;
for (i=0; i<segLen; ++i) {
%(INDEX)s segNum = 0;
%(VTYPE)s h;
%(VTYPE)s e;
for (segNum=0; segNum<segWidth; ++segNum) {
int64_t tmp = %(SG_TMP)s;
h.i%(WIDTH)s[segNum] = tmp < INT%(WIDTH)s_MIN ? INT%(WIDTH)s_MIN : tmp;
tmp = tmp - open;
e.i%(WIDTH)s[segNum] = tmp < INT%(WIDTH)s_MIN ? INT%(WIDTH)s_MIN : tmp;
}
%(VSTORE)s(&%(PVH_VAR)s[index], h);
%(VSTORE)s(&pvE[index], e);%(PVEA_STORE)s
++index;
}
}""" % params
else:
text = """ /* initialize H and E */
{
%(INDEX)s index = 0;
for (i=0; i<segLen; ++i) {
%(INDEX)s segNum = 0;
%(VTYPE)s_%(WIDTH)s_t h;
%(VTYPE)s_%(WIDTH)s_t e;
for (segNum=0; segNum<segWidth; ++segNum) {
int64_t tmp = %(SG_TMP)s;
h.v[segNum] = tmp < INT%(WIDTH)s_MIN ? INT%(WIDTH)s_MIN : tmp;
tmp = tmp - open;
e.v[segNum] = tmp < INT%(WIDTH)s_MIN ? INT%(WIDTH)s_MIN : tmp;
}
%(VSTORE)s(&%(PVH_VAR)s[index], h.m);
%(VSTORE)s(&pvE[index], e.m);%(PVEA_STORE)s
++index;
}
}""" % params
params["INIT_H_AND_E"] = text
return params
def generate_printer(params):
text = ""
trace = ""
bias = ""
rowcol = ""
bias_rowcol = ""
if "striped" in params["NAME"] or "scan" in params["NAME"]:
for lane in range(params["LANES"]):
params["LANE"] = lane
if params["LANES"] / 10:
text += " array[1LL*(%(LANE)2d*seglen+t)*dlen + d] = (%(INT)s)%(VEXTRACT)s(vH, %(LANE)2d);\n" % params
trace += " array[1LL*(%(LANE)2d*seglen+t)*dlen + d] = (int8_t)%(VEXTRACT)s(vH, %(LANE)2d);\n" % params
else:
text += " array[1LL*(%(LANE)s*seglen+t)*dlen + d] = (%(INT)s)%(VEXTRACT)s(vH, %(LANE)s);\n" % params
trace += " array[1LL*(%(LANE)s*seglen+t)*dlen + d] = (int8_t)%(VEXTRACT)s(vH, %(LANE)s);\n" % params
for lane in range(params["LANES"]):
params["LANE"] = lane
if params["LANES"] / 10:
rowcol += " col[%(LANE)2d*seglen+t] = (%(INT)s)%(VEXTRACT)s(vH, %(LANE)2d);\n" % params
else:
rowcol += " col[%(LANE)s*seglen+t] = (%(INT)s)%(VEXTRACT)s(vH, %(LANE)s);\n" % params
for lane in range(params["LANES"]):
params["LANE"] = lane
if params["LANES"] / 10:
bias += " array[1LL*(%(LANE)2d*seglen+t)*dlen + d] = (%(INT)s)%(VEXTRACT)s(vH, %(LANE)2d) - bias;\n" % params
else:
bias += " array[1LL*(%(LANE)s*seglen+t)*dlen + d] = (%(INT)s)%(VEXTRACT)s(vH, %(LANE)s) - bias;\n" % params
for lane in range(params["LANES"]):
params["LANE"] = lane
if params["LANES"] / 10:
bias_rowcol += " col[%(LANE)2d*seglen+t] = (%(INT)s)%(VEXTRACT)s(vH, %(LANE)2d) - bias;\n" % params
else:
bias_rowcol += " col[%(LANE)s*seglen+t] = (%(INT)s)%(VEXTRACT)s(vH, %(LANE)s) - bias;\n" % params
elif "diag" in params["NAME"]:
for lane in range(params["LANES"]):
params["LANE"] = lane
params["LANE_END"] = params["LANES"]-lane-1
text += """
if (0 <= i+%(LANE)s && i+%(LANE)s < s1Len && 0 <= j-%(LANE)s && j-%(LANE)s < s2Len) {
array[1LL*(i+%(LANE)s)*s2Len + (j-%(LANE)s)] = (%(INT)s)%(VEXTRACT)s(vWH, %(LANE_END)s);
}\n"""[1:] % params
trace += """
if (0 <= i+%(LANE)s && i+%(LANE)s < s1Len && 0 <= j-%(LANE)s && j-%(LANE)s < s2Len) {
array[1LL*(i+%(LANE)s)*s2Len + (j-%(LANE)s)] = (int8_t)%(VEXTRACT)s(vWH, %(LANE_END)s);
}\n"""[1:] % params
for lane in range(params["LANES"]):
params["LANE"] = lane
params["LANE_END"] = params["LANES"]-lane-1
rowcol += """
if (i+%(LANE)s == s1Len-1 && 0 <= j-%(LANE)s && j-%(LANE)s < s2Len) {
row[j-%(LANE)s] = (%(INT)s)%(VEXTRACT)s(vWH, %(LANE_END)s);
}\n"""[1:] % params
rowcol += """
if (j-%(LANE)s == s2Len-1 && 0 <= i+%(LANE)s && i+%(LANE)s < s1Len) {
col[(i+%(LANE)s)] = (%(INT)s)%(VEXTRACT)s(vWH, %(LANE_END)s);
}\n"""[1:] % params
else:
print("bad printer name")
sys.exit(1)
params["PRINTER"] = text[:-1] # remove last newline
params["PRINTER_TRACE"] = trace[:-1] # remove last newline
params["PRINTER_BIAS"] = bias[:-1] # remove last newline
params["PRINTER_ROWCOL"] = rowcol[:-1] # remove last newline
params["PRINTER_BIAS_ROWCOL"] = bias_rowcol[:-1] # remove last newline
return params
def generate_saturation_check(params):
width = params["WIDTH"]
# by commenting this out, all bit widths get sat checks
#if width == 8:
if True:
params["SATURATION_CHECK_DECL"] = """
%(INT)s NEG_LIMIT = 0;
%(INT)s POS_LIMIT = 0;
%(INT)s score = 0;
%(VTYPE)s vNegLimit;
%(VTYPE)s vPosLimit;
%(VTYPE)s vSaturationCheckMin;
%(VTYPE)s vSaturationCheckMax;""".strip() % params
params["SATURATION_CHECK_INIT"] = """
NEG_LIMIT = (-open < matrix->min ? INT%(WIDTH)s_MIN + open : INT%(WIDTH)s_MIN - matrix->min) + 1;
POS_LIMIT = INT%(WIDTH)s_MAX - matrix->max - 1;
score = NEG_LIMIT;
vNegLimit = %(VSET1)s(NEG_LIMIT);
vPosLimit = %(VSET1)s(POS_LIMIT);
vSaturationCheckMin = vPosLimit;
vSaturationCheckMax = vNegLimit;""".strip() % params
if "diag" in params["NAME"]:
params["SATURATION_CHECK_MID"] = """
/* check for saturation */
{
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vWH);
vSaturationCheckMin = %(VMIN)s(vSaturationCheckMin, vWH);
}""".strip() % params
elif "scan" in params["NAME"]:
params["SATURATION_CHECK_MID"] = """
/* check for saturation */
{
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vH);
vSaturationCheckMin = %(VMIN)s(vSaturationCheckMin, vH);
}""".strip() % params
elif "striped" in params["NAME"]:
params["SATURATION_CHECK_MID"] = """
/* check for saturation */
{
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vH);
vSaturationCheckMin = %(VMIN)s(vSaturationCheckMin, vH);
vSaturationCheckMin = %(VMIN)s(vSaturationCheckMin, vE);
vSaturationCheckMin = %(VMIN)s(vSaturationCheckMin, vF);
}""".strip() % params
else:
params["SATURATION_CHECK_MID"] = """
/* check for saturation */
{
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vH);
vSaturationCheckMin = %(VMIN)s(vSaturationCheckMin, vH);
}""".strip() % params
params["SATURATION_CHECK_FINAL"] = """
if (%(VMOVEMASK)s(%(VOR)s(
%(VCMPLT)s(vSaturationCheckMin, vNegLimit),
%(VCMPGT)s(vSaturationCheckMax, vPosLimit)))) {
result->flag |= PARASAIL_FLAG_SATURATED;
score = 0;
end_query = 0;
end_ref = 0;
}""".strip() % params
params["STATS_SATURATION_CHECK_DECL"] = """
%(INT)s NEG_LIMIT = 0;
%(INT)s POS_LIMIT = 0;
%(INT)s score = 0;
%(VTYPE)s vNegLimit;
%(VTYPE)s vPosLimit;
%(VTYPE)s vSaturationCheckMin;
%(VTYPE)s vSaturationCheckMax;""".strip() % params
params["STATS_SATURATION_CHECK_INIT"] = """
NEG_LIMIT = (-open < matrix->min ? INT%(WIDTH)s_MIN + open : INT%(WIDTH)s_MIN - matrix->min) + 1;
POS_LIMIT = INT%(WIDTH)s_MAX - matrix->max - 1;
score = NEG_LIMIT;
vNegLimit = %(VSET1)s(NEG_LIMIT);
vPosLimit = %(VSET1)s(POS_LIMIT);
vSaturationCheckMin = vPosLimit;
vSaturationCheckMax = vNegLimit;""".strip() % params
if "diag" in params["NAME"]:
params["STATS_SATURATION_CHECK_MID"] = """
/* check for saturation */
{
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vWH);
vSaturationCheckMin = %(VMIN)s(vSaturationCheckMin, vWH);
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vWM);
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vWS);
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vWL);
}""".strip() % params
elif "scan" in params["NAME"]:
params["STATS_SATURATION_CHECK_MID1"] = """
/* check for saturation */
{
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vH);
vSaturationCheckMin = %(VMIN)s(vSaturationCheckMin, vH);
}""".strip() % params
params["STATS_SATURATION_CHECK_MID2"] = """
/* check for saturation */
{
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vM);
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vS);
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vL);
}""".strip() % params
else:
params["STATS_SATURATION_CHECK_MID"] = """
/* check for saturation */
{
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vH);
vSaturationCheckMin = %(VMIN)s(vSaturationCheckMin, vH);
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vHM);
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vHS);
vSaturationCheckMax = %(VMAX)s(vSaturationCheckMax, vHL);
}""".strip() % params
params["STATS_SATURATION_CHECK_FINAL"] = """
if (%(VMOVEMASK)s(%(VOR)s(
%(VCMPLT)s(vSaturationCheckMin, vNegLimit),
%(VCMPGT)s(vSaturationCheckMax, vPosLimit)))) {
result->flag |= PARASAIL_FLAG_SATURATED;
score = 0;
matches = 0;
similar = 0;
length = 0;
end_query = 0;
end_ref = 0;
}""".strip() % params
if width == 8:
params["NEG_INF"] = "INT8_MIN"
params["VADD"] = params["VADDSx8"]
params["VSUB"] = params["VSUBSx8"]
elif width == 16:
params["NEG_INF"] = "INT16_MIN"
params["VADD"] = params["VADDSx16"]
params["VSUB"] = params["VSUBSx16"]
if "sw" in params["NAME"] and "striped" in params["NAME"]:
pass
else:
for p in ["VMAX", "VMIN", "VCMPLT", "VCMPGT"]:
if (params[p].endswith("_rpl")
and params[p] not in params["FIXES"]):
params["FIXES"] += params[params[p]]
else:
params["SATURATION_CHECK_DECL"] = ""
params["SATURATION_CHECK_INIT"] = ""
params["SATURATION_CHECK_MID"] = ""
params["SATURATION_CHECK_FINAL"] = ""
params["STATS_SATURATION_CHECK_DECL"] = ""
params["STATS_SATURATION_CHECK_INIT"] = ""
params["STATS_SATURATION_CHECK_MID"] = ""
params["STATS_SATURATION_CHECK_MID1"] = ""
params["STATS_SATURATION_CHECK_MID2"] = ""
params["STATS_SATURATION_CHECK_FINAL"] = ""
return params
def generated_params_diag(params):
lanes = params["LANES"]
params["DIAG_I"] = ",".join(["%d"%i for i in range(lanes)])
params["DIAG_ILO"] = ",".join(["%d"%i for i in range(lanes//2,lanes)])
params["DIAG_IHI"] = ",".join(["%d"%i for i in range(lanes//2)])
params["DIAG_J"] = ",".join(["%d"%-i for i in range(lanes)])
params["DIAG_JLO"] = ",".join(["%d"%-i for i in range(lanes//2,lanes)])
params["DIAG_JHI"] = ",".join(["%d"%-i for i in range(lanes//2)])
params["DIAG_IBoundary"] = " ".join(
["-open-%d*gap,\n"%(i)
for i in range(lanes)])[:-2]
params["DIAG_VS1"] = " ".join(
["s1[i+%d],\n"%i
for i in range(lanes)])[:-2]
params["DIAG_MATROW_DECL"] = " ".join(
["const int * const restrict matrow%d = &matrix->matrix[matrix->size * ((matrix->type == PARASAIL_MATRIX_TYPE_SQUARE) ? s1[i+%d] : ((i+%d >= s1Len) ? s1Len-1 : i+%d))];\n"%(i,i,i,i)
for i in range(lanes)])[:-1]
params["DIAG_MATROW_USE"] = " ".join(
["matrow%d[s2[j-%d]],\n"%(i,i)
for i in range(lanes)])[:-2]
return params
def generated_params_striped(params):
params["STRIPED_INSERT_MASK"] = "0,"*(params["LANES"]-1)+"1"
params["POSITION_MASK"] = ",".join([str(i) for i in range(params["LANES"])])
return params
def generated_params_scan(params):
lanes = params["LANES"]
if params["LANES"] / 10:
params["STATS_SCAN_VFT"] = (" "*3).join(
["tmp.v[%2d] = MAX(tmp.v[%2d]-segLen*gap, tmp.v[%2d]);\n"%(i,i-1,i)
for i in range(1,lanes)])[:-1]
params["STATS_SCAN_UMP"] = (" "*4).join(
["uMp.v[%2d] = uC.v[%2d] ? uMp.v[%2d] : uMp.v[%2d];\n"%(i,i,i-1,i)
for i in range(1,lanes)])[:-1]
params["STATS_SCAN_USP"] = (" "*4).join(
["uSp.v[%2d] = uC.v[%2d] ? uSp.v[%2d] : uSp.v[%2d];\n"%(i,i,i-1,i)
for i in range(1,lanes)])[:-1]
params["STATS_SCAN_ULP"] = (" "*4).join(
["uLp.v[%2d] = uC.v[%2d] ? uLp.v[%2d] + uLp.v[%2d] : uLp.v[%2d];\n"%(
i,i,i,i-1,i)
for i in range(1,lanes)])[:-1]
else:
params["STATS_SCAN_VFT"] = (" "*3).join(
["tmp.v[%d] = MAX(tmp.v[%d]-segLen*gap, tmp.v[%d]);\n"%(i,i-1,i)
for i in range(1,lanes)])[:-1]
params["STATS_SCAN_UMP"] = (" "*4).join(
["uMp.v[%d] = uC.v[%d] ? uMp.v[%d] : uMp.v[%d];\n"%(i,i,i-1,i)
for i in range(1,lanes)])[:-1]
params["STATS_SCAN_USP"] = (" "*4).join(
["uSp.v[%d] = uC.v[%d] ? uSp.v[%d] : uSp.v[%d];\n"%(i,i,i-1,i)
for i in range(1,lanes)])[:-1]
params["STATS_SCAN_ULP"] = (" "*4).join(
["uLp.v[%d] = uC.v[%d] ? uLp.v[%d] + uLp.v[%d] : uLp.v[%d];\n"%(
i,i,i,i-1,i)
for i in range(1,lanes)])[:-1]
params["STATS_SCAN_INSERT_MASK"] = "0,"*(params["LANES"]-1)+"1"
params["SCAN_INSERT_MASK"] = "1"+",0"*(params["LANES"]-1)
params["SCAN_NEG_INF_FRONT"] = "0,"*(params["LANES"]-1)+"NEG_LIMIT"
params["POSITION_MASK"] = ",".join([str(i) for i in range(params["LANES"])])
if "avx" in params["ISA"]:
params["SCAN_AVX2_BLENDV_FIX"] = """
/* clang optimization broke blendv in this code */
#if defined(__clang__) && defined(__OPTIMIZE__)
#define _mm256_blendv_epi8 _mm256_blendv_epi8_rpl
static inline __m256i _mm256_blendv_epi8_rpl(__m256i a, __m256i b, __m256i mask) {
a = _mm256_andnot_si256(mask, a);
a = _mm256_or_si256(a, _mm256_and_si256(mask, b));
return a;
}
#endif
"""
else:
params["SCAN_AVX2_BLENDV_FIX"] = ""
return params
def generated_params(template, params):
# some params are generated from given params
bits = params["BITS"]
width = params["WIDTH"]
params["INDEX"] = "int32_t"
params["ALIGNMENT"] = bits//8
params["BYTES"] = width//8
params["LANES"] = bits//width
params["LAST_POS"] = params["LANES"]-1
params["INT"] = "int%(WIDTH)s_t" % params
params["NEG_INF"] = "(INT%(WIDTH)s_MIN/(%(INT)s)(2))" % params
if "diag" in params["NAME"]:
params = generated_params_diag(params)
elif "striped" in params["NAME"]:
params = generated_params_striped(params)
elif "scan" in params["NAME"]:
params = generated_params_scan(params)
# select appropriate vector functions for given width
suffix = "x%s" % width
for key in keys:
if key.endswith(suffix):
new_key = key.split('x')[0]
params[new_key] = params[key]
fixes = ""
template_params = re.findall(r'%\([A-Za-z0-9]+\)s', template)
for param in params:
wrapped_param = r'%%(%s)s'%param
if (wrapped_param in template_params
and str(params[param]).endswith("_rpl")):
fixes += params[params[param]]
if ("trace" in params["NAME"]
and ("scan" in params["NAME"] or "striped" in params["NAME"])
and "nw" not in params["NAME"]
and "sg" not in params["NAME"]):
if params["VEXTRACT"].endswith("_rpl"):
fixes = params[params["VEXTRACT"]] + fixes
params["FIXES"] = fixes
params = generate_printer(params)
params = generate_saturation_check(params)
params = generate_H(params)
params = generate_H_and_E(params)
return params
for template_filename in template_filenames:
template = open(template_dir+template_filename).read()
for width in [64,32,16,8]:
for isa in [sse2,sse41,avx2,altivec,neon]:
params = copy.deepcopy(isa)
params["WIDTH"] = width
prefix = template_filename[:-2]
prefix_prof = prefix + "_profile"
parts = prefix.split('_')
table_prefix = ""
rowcol_prefix = ""
trace_prefix = ""
suffix_prefix = ""
suffix_prefix_prof = ""
if 'sg' in parts[0]:
parts[0] = parts[0].replace('sg', 'sg_flags')
prefix = prefix.replace('sg', 'sg_flags')
prefix_prof = prefix_prof.replace('sg', 'sg_flags')
if len(parts) == 2:
table_prefix = "%s_table_%s" % (parts[0], parts[1])
rowcol_prefix = "%s_rowcol_%s" % (parts[0], parts[1])
trace_prefix = "%s_trace_%s" % (parts[0], parts[1])
suffix_prefix = parts[1]
if len(parts) == 3:
table_prefix = "%s_%s_table_%s" % (parts[0], parts[1], parts[2])
rowcol_prefix = "%s_%s_rowcol_%s" % (parts[0], parts[1], parts[2])
trace_prefix = "%s_trace_%s" % (parts[0], parts[2])
suffix_prefix = parts[2]
table_prefix_prof = table_prefix + "_profile"
rowcol_prefix_prof = rowcol_prefix + "_profile"
trace_prefix_prof = trace_prefix + "_profile"
suffix_prefix_prof = suffix_prefix + "_profile"
suffix = "_%s_%s%s_%s_%s" % (suffix_prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
suffix_prof = "_%s_%s%s_%s_%s" % (suffix_prefix_prof,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_name = "%s_%s%s_%s_%s" % (prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_table_name = "%s_%s%s_%s_%s" % (table_prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_rowcol_name = "%s_%s%s_%s_%s" % (rowcol_prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_trace_name = "%s_%s%s_%s_%s" % (trace_prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_pname = "%s_%s%s_%s_%s" % (prefix_prof,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_table_pname = "%s_%s%s_%s_%s" % (table_prefix_prof,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_rowcol_pname = "%s_%s%s_%s_%s" % (rowcol_prefix_prof,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_trace_pname = "%s_%s%s_%s_%s" % (trace_prefix_prof,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
params["SUFFIX"] = suffix
params["SUFFIX_PROF"] = suffix_prof
params["NAME"] = "parasail_"+function_name
params["NAME_BASE"] = params["NAME"].replace("_stats", "")
params["NAME_TABLE"] = "parasail_"+function_table_name
params["NAME_ROWCOL"] = "parasail_"+function_rowcol_name
params["NAME_TRACE"] = "parasail_"+function_trace_name
params["PNAME"] = "parasail_"+function_pname
params["PNAME_BASE"] = params["PNAME"].replace("_stats", "")
params["PNAME_TABLE"] = "parasail_"+function_table_pname
params["PNAME_ROWCOL"] = "parasail_"+function_rowcol_pname
params["PNAME_TRACE"] = "parasail_"+function_trace_pname
params = generated_params(template, params)
if 'flags' in function_name:
function_name = function_name.replace('_flags', '')
output_filename = "%s%s.c" % (output_dir, function_name)
result = template % params
writer = open(output_filename, "w")
writer.write(template % params)
writer.write("\n")
writer.close()
# some templates have specializations for certain bit widths, e.g., 8
for template_filename in special_templates:
template = open(template_dir+template_filename).read()
prefix = template_filename[:-2]
parts = prefix.split('_')
width = int(parts[-1])
parts = parts[:-1]
if 'sg' in parts[0]:
parts[0] = parts[0].replace('sg', 'sg_flags')
prefix = "_".join(parts)
table_prefix = ""
rowcol_prefix = ""
trace_prefix = ""
suffix_prefix = ""
if len(parts) == 2:
table_prefix = "%s_table_%s" % (parts[0], parts[1])
rowcol_prefix = "%s_rowcol_%s" % (parts[0], parts[1])
trace_prefix = "%s_trace_%s" % (parts[0], parts[1])
suffix_prefix = parts[1]
if len(parts) == 3:
table_prefix = "%s_%s_table_%s" % (parts[0], parts[1], parts[2])
rowcol_prefix = "%s_%s_rowcol_%s" % (parts[0], parts[1], parts[2])
trace_prefix = "%s_%s_%s" % (parts[0], parts[1], parts[2])
suffix_prefix = parts[2]
for isa in [sse2,sse41,avx2,altivec,neon]:
params = copy.deepcopy(isa)
params["WIDTH"] = width
suffix = "_%s_%s%s_%s_%s" % (suffix_prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_name = "%s_%s%s_%s_%s" % (prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_table_name = "%s_%s%s_%s_%s" % (table_prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_rowcol_name = "%s_%s%s_%s_%s" % (rowcol_prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_trace_name = "%s_%s%s_%s_%s" % (trace_prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
params["SUFFIX"] = suffix
params["NAME"] = "parasail_"+function_name
params["NAME_TABLE"] = "parasail_"+function_table_name
params["NAME_ROWCOL"] = "parasail_"+function_rowcol_name
params["NAME_TRACE"] = "parasail_"+function_trace_name
params = generated_params(template, params)
if 'flags' in function_name:
function_name = function_name.replace('_flags', '')
output_filename = "%s%s.c" % (output_dir, function_name)
result = template % params
writer = open(output_filename, "w")
writer.write(template % params)
writer.write("\n")
writer.close()
# some templates have specializations for using bias
for template_filename in bias_templates:
template = open(template_dir+template_filename).read()
prefix = template_filename[:-2]
parts = prefix.split('_')
parts = parts[:-1]
if 'sg' in parts[0]:
parts[0] = parts[0].replace('sg', 'sg_flags')
prefix = "_".join(parts)
prefix_prof = prefix + "_profile"
table_prefix = ""
rowcol_prefix = ""
trace_prefix = ""
suffix_prefix = ""
if len(parts) == 2:
table_prefix = "%s_table_%s" % (parts[0], parts[1])
rowcol_prefix = "%s_rowcol_%s" % (parts[0], parts[1])
trace_prefix = "%s_trace_%s" % (parts[0], parts[1])
suffix_prefix = parts[1]
if len(parts) == 3:
table_prefix = "%s_%s_table_%s" % (parts[0], parts[1], parts[2])
rowcol_prefix = "%s_%s_rowcol_%s" % (parts[0], parts[1], parts[2])
trace_prefix = "%s_%s_trace_%s" % (parts[0], parts[1], parts[2])
suffix_prefix = parts[2]
table_prefix_prof = table_prefix + "_profile"
rowcol_prefix_prof = rowcol_prefix + "_profile"
trace_prefix_prof = trace_prefix + "_profile"
suffix_prefix_prof = suffix_prefix + "_profile"
for width in [16,8]:
for isa in [sse2,sse41,avx2,altivec,neon]:
params = copy.deepcopy(isa)
params["WIDTH"] = width
suffix = "_%s_%s%s_%s_%s" % (suffix_prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
suffix_prof = "_%s_%s%s_%s_%s" % (suffix_prefix_prof,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_name = "%s_%s%s_%s_%s" % (prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_table_name = "%s_%s%s_%s_%s" % (table_prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_rowcol_name = "%s_%s%s_%s_%s" % (rowcol_prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_trace_name = "%s_%s%s_%s_%s" % (trace_prefix,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_pname = "%s_%s%s_%s_%s" % (prefix_prof,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_table_pname = "%s_%s%s_%s_%s" % (table_prefix_prof,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_rowcol_pname = "%s_%s%s_%s_%s" % (rowcol_prefix_prof,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
function_trace_pname = "%s_%s%s_%s_%s" % (trace_prefix_prof,
isa["ISA"], isa["ISA_VERSION"], isa["BITS"], width)
params["SUFFIX"] = suffix
params["SUFFIX_PROF"] = suffix_prof
params["NAME"] = "parasail_"+function_name
params["NAME_BASE"] = params["NAME"].replace("_stats", "")
params["NAME_TABLE"] = "parasail_"+function_table_name
params["NAME_ROWCOL"] = "parasail_"+function_rowcol_name
params["NAME_TRACE"] = "parasail_"+function_trace_name
params["PNAME"] = "parasail_"+function_pname
params["PNAME_BASE"] = params["PNAME"].replace("_stats", "")
params["PNAME_TABLE"] = "parasail_"+function_table_pname
params["PNAME_ROWCOL"] = "parasail_"+function_rowcol_pname
params["PNAME_TRACE"] = "parasail_"+function_trace_pname
params = generated_params(template, params)
params["VADD"] = params["VADDSx%d"%width]
params["VSUB"] = params["VSUBSx%d"%width]
output_filename = "%s%s.c" % (output_dir, function_name)
result = template % params
writer = open(output_filename, "w")
writer.write(template % params)
writer.write("\n")
writer.close()
```
#### File: parasail_c/util/func_group_traces.py
```python
print """/**
* @file
*
* @author <EMAIL>
*
* Copyright (c) 2015 Battelle Memorial Institute.
*/
#ifndef _PARASAIL_FUNCTION_GROUP_TRACE_H_
#define _PARASAIL_FUNCTION_GROUP_TRACE_H_
#include "parasail.h"
typedef struct parasail_function_group {
const char * name;
parasail_function_info_t *fs;
} parasail_function_group_t;
"""
def print_fmt(*args):
fmt = '{%-36s %-38s %5s %10s %-8s %6s %5s %3s %1s 0, 1, %1s %1s},'
new_args = [arg for arg in args]
new_args[0] = '%s,' % new_args[0]
new_args[1] = '"%s",' % new_args[1]
new_args[2] = '"%s",' % new_args[2]
new_args[3] = '"%s",' % new_args[3]
new_args[4] = '"%s",' % new_args[4]
new_args[5] = '"%s",' % new_args[5]
new_args[6] = '"%s",' % new_args[6]
new_args[7] = '%d,' % new_args[7]
new_args[8] = '%d,' % new_args[8]
new_args[9] = '%d,' % new_args[9]
new_args[10]= '%d' % new_args[10]
print fmt % tuple(new_args)
def print_null():
fmt = '{%s, "%s", "%s", "%s", "%s", "%s", "%s", %d, %d, 0, 1, %d, %d},'
print fmt[:-1] % ("NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", 0, 0, 0, 0)
isa_to_bits = {
"sse2" : 128,
"sse41" : 128,
"avx2" : 256,
"altivec" : 128,
"neon" : 128,
}
for table in ["_trace"]:
for stats in [""]:
for alg in ["nw", "sg", "sw", "sg_qb", "sg_qe", "sg_qx", "sg_db", "sg_de", "sg_dx", "sg_qb_de", "sg_qe_db"]:
is_table = 0
is_stats = 0
is_trace = 1
if stats:
is_stats = 1
pre = "parasail_"+alg+stats+table
for isa in ["sse2", "sse41", "avx2", "altivec", "neon"]:
print "#if HAVE_%s" % isa.upper()
print "static parasail_function_info_t %s_%s_functions[] = {" % (pre, isa)
print_fmt(pre, pre, alg+stats, "orig", "NA", "32", "32", 1, is_table, is_stats, 1)
print_fmt(pre+"_scan", pre+"_scan", alg+stats, "scan", "NA", "32", "32", 1, is_table, is_stats, 0)
bits = isa_to_bits[isa]
for par in ["scan", "striped", "diag"]:
widths = [64, 32, 16, 8]
for width in widths:
name = "%s_%s_%s_%s_%s" % (pre, par, isa, bits, width)
print_fmt(name, name, alg+stats, par, isa, bits, width, bits/width, is_table, is_stats, 0)
print_null()
print "};"
print 'static parasail_function_group_t %s_%s = {"%s_%s", %s_%s_functions};' % ((pre, isa)*3)
print "#endif"
# non-isa-specific functions
isa = "disp"
print "static parasail_function_info_t %s_%s_functions[] = {" % (pre, isa)
print_fmt(pre, pre, alg+stats, "orig", "NA", "32", "32", 1, is_table, is_stats, 1)
print_fmt(pre+"_scan", pre+"_scan", alg+stats, "scan", "NA", "32", "32", 1, is_table, is_stats, 0)
# also print the dispatcher function
for par in ["scan", "striped", "diag"]:
for width in [64, 32, 16, 8]:
name = "%s_%s_%s" % (pre, par, width)
print_fmt(name, name, alg+stats, par, "disp", "NA", width, -
1, is_table, is_stats, 0)
# also print the saturation check function
for par in ["scan", "striped", "diag"]:
name = "%s_%s_sat" % (pre, par)
print_fmt(name, name, alg+stats, par, "sat", "NA", 8, -1, is_table, is_stats, 0)
print_null()
print "};"
print 'static parasail_function_group_t %s_%s = {"%s_%s", %s_%s_functions};' % ((pre, isa)*3)
print """
#endif /* _PARASAIL_FUNCTION_GROUP_TRACE_H_ */
"""
``` |
{
"source": "Jianshu-Hu/pddm",
"score": 2
} |
#### File: pddm/envs/robot.py
```python
import numpy as np
from collections import deque
from collections import namedtuple
# obervations structure
observation = namedtuple(
'observation',
['time', 'qpos_robot', 'qvel_robot', 'qpos_object', 'qvel_object'])
class Robot(object):
def __init__(self, n_jnt, n_obj, n_dofs, pos_bounds=None, vel_bounds=None, **kwargs):
self.n_jnt = n_jnt
self.n_obj = n_obj
self.n_dofs = n_dofs
self.has_obj = False
if self.n_obj>0:
self.has_obj = True
# Cache that gets updated
self.observation_cache_maxsize = 5
self.observation_cache = deque([], maxlen=self.observation_cache_maxsize)
# Pos and vel bounds
self.pos_bounds = None
if pos_bounds is not None:
pos_bounds = np.array(pos_bounds, dtype=np.float32)
assert pos_bounds.shape == (self.n_dofs, 2)
for low, high in pos_bounds:
assert low < high
self.pos_bounds = pos_bounds
self.vel_bounds = None
if vel_bounds is not None:
vel_bounds = np.array(vel_bounds, dtype=np.float32)
assert vel_bounds.shape == (self.n_dofs, 2)
for low, high in vel_bounds:
assert low < high
self.vel_bounds = vel_bounds
# refresh the observation cache
def _observation_cache_refresh(self, env):
for _ in range(self.observation_cache_maxsize):
self.get_obs(env, robot_noise_ratio=0, object_noise_ratio=0)
# get past observation
def get_obs_from_cache(self, env, index=-1):
assert (index>=0 and index<self.observation_cache_maxsize) or \
(index<0 and index>=-self.observation_cache_maxsize), \
"cache index out of bound. (cache size is %2d)"%self.observation_cache_maxsize
obs = self.observation_cache[index]
if self.has_obj:
return obs.time, obs.qpos_robot, obs.qvel_robot, obs.qpos_object, obs.qvel_object
else:
return obs.time, obs.qpos_robot, obs.qvel_robot
# get observation
def get_obs(self, env, robot_noise_ratio=0.05, object_noise_ratio=0.05):
qp = env.sim.data.qpos[:self.n_jnt].ravel()
qv = env.sim.data.qvel[:self.n_jnt].ravel()
if self.has_obj:
qp_obj = env.sim.data.qpos[-self.n_obj:].ravel()
qv_obj = env.sim.data.qvel[-self.n_obj:].ravel()
else:
qp_obj = None
qv_obj = None
self.time = env.sim.data.time
# Simulate observation noise
if not env.initializing:
noise_amp = robot_noise_ratio*(env.model.jnt_range[:self.n_jnt,1]-env.model.jnt_range[:self.n_jnt,0])
qp += noise_amp*env.np_random.uniform(low=-.5, high=.5, size=self.n_jnt)
qv += noise_amp*env.np_random.uniform(low=-.5, high=.5, size=self.n_jnt)
if self.has_obj:
noise_amp = object_noise_ratio*(env.model.jnt_range[-self.n_obj:,1]-env.model.jnt_range[-self.n_obj:,0])
qp_obj += noise_amp*env.np_random.uniform(low=-.5, high=.5, size=self.n_obj)
qv_obj += noise_amp*env.np_random.uniform(low=-.5, high=.5, size=self.n_obj)
# cache observations
obs = observation(
time=self.time,
qpos_robot=qp,
qvel_robot=qv,
qpos_object=qp_obj,
qvel_object=qv_obj)
self.observation_cache.append(obs)
if self.has_obj:
return obs.time, obs.qpos_robot, obs.qvel_robot, obs.qpos_object, obs.qvel_object
else:
return obs.time, obs.qpos_robot, obs.qvel_robot
# clip only joint position limits
# since we can only control those anyway
def ctrl_position_limits(self, ctrl_position):
ctrl_feasible_position = np.clip(ctrl_position,
self.pos_bounds[:self.n_jnt, 0],
self.pos_bounds[:self.n_jnt, 1])
return ctrl_feasible_position
# enforce velocity limits.
def enforce_velocity_limits(self, ctrl_position, step_duration):
last_obs = self.observation_cache[-1]
desired_vel = (ctrl_position[:self.n_jnt] - last_obs.qpos_robot[:self.n_jnt])/step_duration
feasible_vel = np.clip(desired_vel, self.vel_bounds[:self.n_jnt, 0], self.vel_bounds[:self.n_jnt, 1])
feasible_position = last_obs.qpos_robot + feasible_vel*step_duration
return feasible_position
# step the robot env
def step(self, env, ctrl_desired, step_duration):
# Populate observation cache during startup
if env.initializing:
self._observation_cache_refresh(env)
# enforce velocity limits
ctrl_feasible = self.enforce_velocity_limits(ctrl_desired, step_duration)
# enforce position limits
ctrl_feasible = self.ctrl_position_limits(ctrl_feasible)
# Send controls to the robot
env.do_simulation(ctrl_feasible, int(step_duration/env.sim.model.opt.timestep)) # render is folded in here
return 1
# clip the whole thing
def clip_positions(self, positions):
assert len(positions) == self.n_jnt or len(positions) == self.n_dofs
pos_bounds = self.pos_bounds[:len(positions)]
return np.clip(positions, pos_bounds[:, 0], pos_bounds[:, 1])
def reset(self, env, reset_pose, reset_vel):
reset_pose = self.clip_positions(reset_pose)
# env.sim.reset()
env.sim.data.qpos[:self.n_jnt] = reset_pose[:self.n_jnt].copy()
env.sim.data.qvel[:self.n_jnt] = reset_vel[:self.n_jnt].copy()
if self.has_obj:
env.sim.data.qpos[-self.n_obj:] = reset_pose[-self.n_obj:].copy()
env.sim.data.qvel[-self.n_obj:] = reset_vel[-self.n_obj:].copy()
env.sim.forward()
# refresh observation cache before exit
self._observation_cache_refresh(env)
```
#### File: pddm/policies/policy_random.py
```python
import numpy as np
class Policy_Random(object):
def __init__(self, env):
#vars
self.env = env
self.low_val = -1 * np.ones(self.env.action_space.low.shape)
self.high_val = np.ones(self.env.action_space.high.shape)
self.shape = self.env.action_space.shape
self.counter = 0
self.rand_ac = np.random.uniform(self.low_val, self.high_val, self.shape)
def get_action(self, observation, prev_action, random_sampling_params, hold_action_overrideToOne=False):
# params for random sampling
sample_velocities = random_sampling_params['sample_velocities']
vel_min = random_sampling_params['vel_min']
vel_max = random_sampling_params['vel_max']
hold_action = random_sampling_params['hold_action']
if hold_action_overrideToOne:
hold_action = 1
### for a position-controlled robot,
# sample random velocities
# instead of random actions
# (for smoother exploration)
if sample_velocities:
if prev_action is None:
# generate random action for right now
self.rand_ac = np.random.uniform(self.low_val, self.high_val, self.shape)
action = self.rand_ac
# generate velocity, to be used if next steps might hold_action
self.vel_sample = np.random.uniform(vel_min, vel_max, self.env.action_space.low.shape)
self.direction_num = np.random.randint(0, 2, self.env.action_space.low.shape)
self.vel_sample[self.direction_num==0] = -self.vel_sample[self.direction_num==0]
else:
if (self.counter%hold_action)==0:
self.vel_sample = np.random.uniform(vel_min, vel_max, self.env.action_space.low.shape)
self.direction_num = np.random.randint(0, 2, self.env.action_space.low.shape)
self.vel_sample[self.direction_num==0] = -self.vel_sample[self.direction_num==0]
#go opposite direction if you hit limit
self.vel_sample[prev_action<=self.low_val] = np.abs(self.vel_sample)[prev_action<=self.low_val] #need to do larger action
self.vel_sample[prev_action>=self.high_val] = -np.abs(self.vel_sample)[prev_action>=self.high_val]
#new action
action = prev_action + self.vel_sample
### else, for a torque-controlled robot,
# just uniformly sample random actions
else:
if (self.counter%hold_action)==0:
self.rand_ac = np.random.uniform(self.low_val, self.high_val, self.shape)
action = self.rand_ac
self.counter +=1
return action, 0
```
#### File: pddm/scripts/visualize_iteration.py
```python
import numpy as np
import pickle
import sys
import os
import argparse
import traceback
#my imports
from pddm.utils.helper_funcs import visualize_rendering
from pddm.utils.helper_funcs import create_env
import pddm.envs
def vis_iter(args, load_dir):
##########################
## load in data
##########################
#params
paramfile = open(load_dir + '/params.pkl', 'rb')
params = pickle.load(paramfile)
env_name = params.env_name
#data to visualize
if args.eval:
with open(load_dir + '/saved_rollouts/rollouts_eval.pickle',
'rb') as handle:
rollouts_info = pickle.load(handle)
else:
with open(
load_dir + '/saved_rollouts/rollouts_info_' + str(args.iter_num) +
'.pickle', 'rb') as handle:
rollouts_info = pickle.load(handle)
##########################
## visualize
##########################
#create env
use_env, dt_from_xml = create_env(env_name)
rewards = []
scores = []
for vis_index in range(len(rollouts_info)):
print("\n\nROLLOUT NUMBER ", vis_index, " .... num steps loaded: ", rollouts_info[vis_index]['actions'].shape[0])
#visualize rollouts from this iteration
_, rewards_for_rollout, scores_for_rollout = visualize_rendering(
rollouts_info[vis_index],
use_env,
params,
visualize=True,
visualize_mpes=args.view_live_mpe_plot)
rewards.append(np.sum(rewards_for_rollout))
scores.append(np.mean(scores_for_rollout[-5:])) # rollout_meanFinalScore
print("\n\n########################\nREWARDS across rollouts from this training iteration.... mean: ",
np.mean(rewards), ", std: ", np.std(rewards))
print("SCORES across rollouts from this training iteration.... mean: ", np.mean(scores), ", std: ",
np.std(scores), "\n\n")
def main():
##########################
## vars to specify
##########################
parser = argparse.ArgumentParser()
parser.add_argument('--job_path', type=str) #address this path WRT your working directory
parser.add_argument('--iter_num', type=int, default=1) #if eval is False, visualize rollouts from this iteration
parser.add_argument('--eval', action="store_true") #if this is True, visualize rollouts from rollouts_eval.pickle
parser.add_argument('--view_live_mpe_plot', action="store_true")
args = parser.parse_args()
##########################
## do visualization
##########################
#directory to load from
load_dir = os.path.abspath(args.job_path)
print("LOADING FROM: ", load_dir)
assert os.path.isdir(load_dir)
try:
vis_iter(args, load_dir)
except (KeyboardInterrupt, SystemExit):
print('Terminating...')
sys.exit(0)
except Exception as e:
print('ERROR: Exception occured while running a job....')
traceback.print_exc()
if __name__ == '__main__':
main()
``` |
{
"source": "JIANSHULI/Douyin_Auto_iOS",
"score": 2
} |
#### File: JIANSHULI/Douyin_Auto_iOS/DouYin_wechat_jump_auto_iOS.py
```python
import os
import shutil
import time
import math
import random
import json
from PIL import Image, ImageDraw
import wda
# import wechat_jump_game.common as common
try:
from wechat_jump_game.common import apiutil
from wechat_jump_game.common.compression import resize_image
print('Load from wechat_jump_game.')
except:
from common import debug, config, screenshot, UnicodeStreamFilter
# from common.auto_adb import auto_adb
from common import apiutil
from common.compression import resize_image
print('Load from Douyin-Bot/')
import sys
################################################
def _random_bias(num):
"""
random bias
:param num:
:return:
"""
print('num = ', num)
return random.randint(-num, num)
def pull_screenshot(Use_App='Wechat_Jump', FACE_PATH = '', id=0):
if 'Wechat_Jump' in Use_App:
c.screenshot('1.png')
elif 'DouYin' in Use_App:
c.screenshot(FACE_PATH + 'autojump.png')
def jump(distance):
press_time = distance * time_coefficient / 1000
print('press time: {}'.format(press_time))
s.tap_hold(random.uniform(0, 320), random.uniform(64, 320), press_time)
def backup_screenshot(ts):
"""
为了方便失败的时候 debug
"""
if not os.path.isdir(screenshot_backup_dir):
os.mkdir(screenshot_backup_dir)
shutil.copy('1.png', '{}{}.png'.format(screenshot_backup_dir, ts))
def save_debug_creenshot(ts, im, piece_x, piece_y, board_x, board_y):
draw = ImageDraw.Draw(im)
# 对debug图片加上详细的注释
draw.line((piece_x, piece_y) + (board_x, board_y), fill=2, width=3)
draw.line((piece_x, 0, piece_x, im.size[1]), fill=(255, 0, 0))
draw.line((0, piece_y, im.size[0], piece_y), fill=(255, 0, 0))
draw.line((board_x, 0, board_x, im.size[1]), fill=(0, 0, 255))
draw.line((0, board_y, im.size[0], board_y), fill=(0, 0, 255))
draw.ellipse(
(piece_x - 10, piece_y - 10, piece_x + 10, piece_y + 10),
fill=(255, 0, 0))
draw.ellipse(
(board_x - 10, board_y - 10, board_x + 10, board_y + 10),
fill=(0, 0, 255))
del draw
im.save('{}{}_d.png'.format(screenshot_backup_dir, ts))
def set_button_position(im):
"""
将swipe设置为 `再来一局` 按钮的位置
"""
global swipe_x1, swipe_y1, swipe_x2, swipe_y2
w, h = im.size
left = w / 2
top = 1003 * (h / 1280.0) + 10
swipe_x1, swipe_y1, swipe_x2, swipe_y2 = left, top, left, top
def find_piece_and_board(im):
w, h = im.size
print("size: {}, {}".format(w, h))
piece_x_sum = piece_x_c = piece_y_max = 0
board_x = board_y = 0
scan_x_border = int(w / 8) # 扫描棋子时的左右边界
scan_start_y = 0 # 扫描的起始 y 坐标
im_pixel = im.load()
# 以 50px 步长,尝试探测 scan_start_y
for i in range(under_game_score_y, h, 50):
last_pixel = im_pixel[0, i]
for j in range(1, w):
pixel = im_pixel[j, i]
# 不是纯色的线,则记录scan_start_y的值,准备跳出循环
if pixel != last_pixel:
scan_start_y = i - 50
break
if scan_start_y:
break
print("scan_start_y: ", scan_start_y)
# 从 scan_start_y 开始往下扫描,棋子应位于屏幕上半部分,这里暂定不超过 2/3
for i in range(scan_start_y, int(h * 2 / 3)):
# 横坐标方面也减少了一部分扫描开销
for j in range(scan_x_border, w - scan_x_border):
pixel = im_pixel[j, i]
# 根据棋子的最低行的颜色判断,找最后一行那些点的平均值,这个颜
# 色这样应该 OK,暂时不提出来
if (50 < pixel[0] < 60) \
and (53 < pixel[1] < 63) \
and (95 < pixel[2] < 110):
piece_x_sum += j
piece_x_c += 1
piece_y_max = max(i, piece_y_max)
if not all((piece_x_sum, piece_x_c)):
return 0, 0, 0, 0
piece_x = piece_x_sum / piece_x_c
piece_y = piece_y_max - piece_base_height_1_2 # 上移棋子底盘高度的一半
for i in range(int(h / 3), int(h * 2 / 3)):
last_pixel = im_pixel[0, i]
if board_x or board_y:
break
board_x_sum = 0
board_x_c = 0
for j in range(w):
pixel = im_pixel[j, i]
# 修掉脑袋比下一个小格子还高的情况的 bug
if abs(j - piece_x) < piece_body_width:
continue
# 修掉圆顶的时候一条线导致的小 bug,这个颜色判断应该 OK,暂时不提出来
if abs(pixel[0] - last_pixel[0]) \
+ abs(pixel[1] - last_pixel[1]) \
+ abs(pixel[2] - last_pixel[2]) > 10:
board_x_sum += j
board_x_c += 1
if board_x_sum:
board_x = board_x_sum / board_x_c
# 按实际的角度来算,找到接近下一个 board 中心的坐标 这里的角度应该
# 是 30°,值应该是 tan 30°, math.sqrt(3) / 3
board_y = piece_y - abs(board_x - piece_x) * math.sqrt(3) / 3
if not all((board_x, board_y)):
return 0, 0, 0, 0
return piece_x, piece_y, board_x, board_y
######### Which App to Use ##########
App_List = ['DouYin', 'Wechat_Jump']
Use_App = 'DouYin'
c = wda.Client(url='http://172.16.58.3:8100')
s = c.session()
if len(sys.argv) == 1:
try:
w = s.window_size()[0]
h = s.window_size()[1]
Follow_Sign_x = w/1080 * 1050
Follow_Sign_y = h/1920 * 920
except:
w = 750 / 2
h = 1334 / 2
Follow_Sign_x = 730 / 2
Follow_Sign_y = 640 / 2
else:
w = int(sys.argv[1])
h = int(sys.argv[2])
Follow_Sign_x = w / 1080 * 990
Follow_Sign_y = h / 1920 * 950
print('Follow_Sign_x: %s; Follow_Sign_y: %s'%(Follow_Sign_x, Follow_Sign_y))
def main():
if 'Wechat_Jump' in Use_App:
####################################################################
######################## Wechat_Jump ###############################
with open('config.json', 'r') as f:
config = json.load(f)
# Magic Number,不设置可能无法正常执行,请根据具体截图从上到下按需设置
under_game_score_y = config['under_game_score_y']
# 长按的时间系数,请自己根据实际情况调节
press_coefficient = config['press_coefficient']
# 二分之一的棋子底座高度,可能要调节
piece_base_height_1_2 = config['piece_base_height_1_2']
# 棋子的宽度,比截图中量到的稍微大一点比较安全,可能要调节
piece_body_width = config['piece_body_width']
time_coefficient = config['press_coefficient']
# 模拟按压的起始点坐标,需要自动重复游戏请设置成“再来一局”的坐标
swipe = config.get('swipe', {
"x1": 320,
"y1": 410,
"x2": 320,
"y2": 410
})
VERSION = "1.1.4"
screenshot_backup_dir = 'screenshot_backups/'
if not os.path.isdir(screenshot_backup_dir):
os.mkdir(screenshot_backup_dir)
while True:
pull_screenshot()
im = Image.open("./1.png")
# 获取棋子和 board 的位置
piece_x, piece_y, board_x, board_y = find_piece_and_board(im)
ts = int(time.time())
print(ts, piece_x, piece_y, board_x, board_y)
if piece_x == 0:
return
set_button_position(im)
distance = math.sqrt(
(board_x - piece_x) ** 2 + (board_y - piece_y) ** 2)
jump(distance)
save_debug_creenshot(ts, im, piece_x, piece_y, board_x, board_y)
backup_screenshot(ts)
# 为了保证截图的时候应落稳了,多延迟一会儿,随机值防 ban
time.sleep(random.uniform(1, 1.1))
elif 'DouYin' in Use_App:
#####################################################################
########################### DouYin ##################################
# 申请地址 http://ai.qq.com
AppID = '1106858595'
AppKey = '<KEY>'
FACE_PATH = 'face/'
Max_Try = 10
Girls = True
Follow_Her = False
Like_Her = True
# 审美标准
BEAUTY_THRESHOLD = 80
Likes_max = 1
Save_Origin = True
Save_Whole = True
Save_Face = True
for i in range(Max_Try):
c = wda.Client(url='http://172.16.58.3:8100') # Please replace this by your own url from WebDriverAgent output.
s = c.session()
# s.swipe_up_pro()
time.sleep(3)
pull_screenshot(Use_App=Use_App, FACE_PATH=FACE_PATH)
if Save_Origin:
im = Image.open(FACE_PATH + 'autojump.png')
im.save(FACE_PATH + 'autojump_%s.png'%(i))
try:
resize_image(FACE_PATH + 'autojump.png', FACE_PATH + 'optimized.png', 1024 * 1024)
with open(FACE_PATH + 'optimized.png', 'rb') as bin_data:
image_data = bin_data.read()
except:
with open(FACE_PATH + 'autojump.png', 'rb') as bin_data:
image_data = bin_data.read()
ai_obj = apiutil.AiPlat(AppID, AppKey)
rsp = ai_obj.face_detectface(image_data, 0)
if rsp['ret'] == 0:
beauty = 0
for face in rsp['data']['face_list']:
print(face)
face_area = (face['x'], face['y'], face['x'] + face['width'], face['y'] + face['height'])
print(face_area)
img = Image.open(FACE_PATH + "optimized.png")
if Save_Whole:
img.save(FACE_PATH + face['face_id'] + '_Whole.png')
if Save_Face:
cropped_img = img.crop(face_area).convert('RGB')
cropped_img.save(FACE_PATH + face['face_id'] + '.png')
# 性别判断
if Girls:
if face['beauty'] > beauty and face['gender'] < 50:
beauty = face['beauty']
else:
if face['beauty'] > beauty and face['gender'] > 50:
beauty = face['beauty']
# 是个美人儿~关注点赞走一波
if beauty > BEAUTY_THRESHOLD:
print('发现漂亮妹子!!!')
print('颜值: %s' %beauty)
if Like_Her:
for i in range(int((beauty - BEAUTY_THRESHOLD)/((100 - BEAUTY_THRESHOLD)/Likes_max) + 1)):
s.double_tap(x=w/2, y=h/2)
print('Heart!')
# time.sleep(0.11)
if Follow_Her:
s.tap(x=Follow_Sign_x, y=Follow_Sign_y)
print('Follow!')
# time.sleep(0.2)
time.sleep(3)
else:
print('颜值: %s' % beauty)
try:
s.swipe_up_pro()
except:
time.sleep(10)
c = wda.Client(url='http://172.16.58.3:8100')
s = c.session()
try:
s.swipe_up_pro()
except:
pass
time.sleep(1)
if __name__ == '__main__':
main()
``` |
{
"source": "Jiansion/reptile",
"score": 3
} |
#### File: reptile/chapter3/1_find_a.py
```python
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
# html = urlopen('http://www.qianjia.com/').read()
# bsObj = BeautifulSoup(html, 'html.parser')
# aList = bsObj.findAll('a', href=re.compile('^http'))
# # 用于保存链接,使用 set() 实现链接去重
# pages = set()
# for link in aList:
# href = link['href']
# if href not in pages:
# pages.add(href)
#
pages = set()
def getLink(pageUrl):
global pages
try:
html = urlopen(pageUrl).read()
bsObj = BeautifulSoup(html, 'html.parser')
aList = bsObj.findAll('a', href=re.compile('^http'))
for link in aList:
href = link['href']
if href not in pages:
pages.add(href)
print(href)
getLink(href)
except Exception as e:
print(e)
getLink('http://www.qianjia.com/')
``` |
{
"source": "Jiansiyu/Calib",
"score": 2
} |
#### File: SpectroCalib/dataLoader/dataLoader.py
```python
import os
class dataloader(object):
def __init__(self):
pass
def LoadCSV(self,fname):
raise NotImplementedError
``` |
{
"source": "jiansoung/flask-todos-api",
"score": 2
} |
#### File: app/controllers/items_controller.py
```python
from flask import g, Blueprint, request, jsonify
from app.models import Todo, Item
from .concerns import only_allow, dict_copy, authorize_request
from app.exceptions import exceptions
from app.lib import Message
__all__ = []
blueprint = Blueprint('items_controller', __name__)
@blueprint.route("/todos/<int:todo_id>/items")
def index(todo_id):
todo = request.todo
items = [item.to_dict() for item in todo.items]
return jsonify(items)
@blueprint.route("/todos/<int:todo_id>/items", methods=["POST"])
def create(todo_id):
todo = request.todo
item_params = request.item_params
item = todo.create_item(item_params)
return jsonify(item.to_dict()), 201
@blueprint.route("/todos/<int:todo_id>/items/<int:item_id>")
def show(todo_id, item_id):
item = request.item
return jsonify(item.to_dict())
@blueprint.route("/todos/<int:todo_id>/items/<int:item_id>", methods=["PUT"])
def update(todo_id, item_id):
item = request.item
item_params = request.item_params
item.update(item_params)
return jsonify(), 204
@blueprint.route("/todos/<int:todo_id>/items/<int:item_id>", methods=['DELETE'])
def destroy(todo_id, item_id):
item = request.item
item.destroy()
return jsonify(), 204
@blueprint.before_request
def need_authorize_request():
authorize_request()
@blueprint.before_request
def set_todo():
if 'current_user' not in g:
authorize_request()
view_args = request.view_args
if 'todo_id' in view_args:
todo_id = view_args['todo_id']
todo = Todo.query.filter_by(id=todo_id, user=g.current_user).first()
if todo is None:
raise exceptions.RecordNotFound(Message.not_found())
request.todo = todo
@blueprint.before_request
@only_allow([show, update, destroy], blueprint)
def set_item():
if not hasattr(request, 'todo'):
set_todo()
view_args = request.view_args
if 'item_id' in view_args:
todo = request.todo
item_id = view_args['item_id']
item = Item.query.filter_by(id=item_id, todo=todo).first()
if item is None:
raise exceptions.RecordNotFound(Message.not_found())
request.item = item
@blueprint.before_request
@only_allow([create, update], blueprint)
def set_item_params():
keys = ['name', 'done', 'todo_id']
params = { **request.view_args, **request.json }
request.item_params = dict_copy(params, keys)
```
#### File: flask-todos-api/app/__init__.py
```python
from flask import Flask
from app import models, controllers
from app.extensions import db, bcrypt, migrate
def create_app():
app = Flask(__name__)
app.config.from_pyfile("config.py")
bcrypt.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
controllers.init_app(app)
return app
``` |
{
"source": "jiansowa/Paddle",
"score": 2
} |
#### File: dygraph/dygraph_to_static/ifelse_transformer.py
```python
from __future__ import print_function
import six
import copy
from collections import defaultdict
# gast is a generic AST to represent Python2 and Python3's Abstract Syntax Tree(AST).
# It provides a compatibility layer between the AST of various Python versions,
# as produced by ast.parse from the standard ast module.
# See details in https://github.com/serge-sans-paille/gast/
import gast
from paddle.fluid import unique_name
from paddle.fluid.dygraph.dygraph_to_static.utils import create_funcDef_node, ast_to_source_code
from paddle.fluid.dygraph.dygraph_to_static.utils import create_assign_node
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import StaticAnalysisVisitor
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper
from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_static_variable_gast_node
TRUE_FUNC_PREFIX = 'true_fn'
FALSE_FUNC_PREFIX = 'false_fn'
class IfElseTransformer(gast.NodeTransformer):
"""
Transform if/else statement of Dygraph into Static Graph.
"""
def __init__(self, wrapper_root):
assert isinstance(
wrapper_root, AstNodeWrapper
), "Type of input node should be AstNodeWrapper, but received %s ." % type(
wrapper_root)
self.root = wrapper_root.node
self.static_analysis_visitor = StaticAnalysisVisitor(self.root)
def transform(self):
"""
Main function to transform AST.
"""
self.visit(self.root)
def visit_If(self, node):
self.generic_visit(node)
new_vars_stmts, true_func_node, false_func_node, return_name_ids = transform_if_else(
node, self.root)
new_node = create_convert_ifelse_node(return_name_ids, node.test,
true_func_node, false_func_node)
return new_vars_stmts + [true_func_node, false_func_node] + [new_node]
def visit_Call(self, node):
# Remove `numpy()` statement, like `Tensor.numpy()[i]` -> `Tensor[i]`
if isinstance(node.func, gast.Attribute):
attribute = node.func
if attribute.attr == 'numpy':
node = attribute.value
self.generic_visit(node)
return node
def visit_IfExp(self, node):
"""
Transformation with `true_fn(x) if Tensor > 0 else false_fn(x)`
"""
self.generic_visit(node)
new_node = create_convert_ifelse_node(None, node.test, node.body,
node.orelse, True)
# Note: A blank line will be added separately if transform gast.Expr
# into source code. Using gast.Expr.value instead to avoid syntax error
# in python.
if isinstance(new_node, gast.Expr):
new_node = new_node.value
return new_node
class NameVisitor(gast.NodeVisitor):
def __init__(self, end_node=None):
# The terminate node of the visitor.
self.end_node = end_node
# Dict to store the names and ctxs of vars.
self.name_ids = defaultdict(list)
# List of current visited nodes
self.ancestor_nodes = []
# Available only when end_node is set.
self._is_finished = False
self._candidate_ctxs = (gast.Store, gast.Load, gast.Param)
self._def_func_names = set()
def visit(self, node):
"""Visit a node."""
if node == self.end_node or self._is_finished:
self._is_finished = True
return
self.ancestor_nodes.append(node)
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
ret = visitor(node)
self.ancestor_nodes.pop()
return ret
def visit_If(self, node):
"""
For nested `if/else`, the created vars are not always visible for parent node.
In addition, the vars created in `if.body` are not visible for `if.orelse`.
Case 1:
x = 1
if m > 1:
res = new_tensor
res = res + 1 # Error, `res` is not visible here.
Case 2:
if x_tensor > 0:
res = new_tensor
else:
res = res + 1 # Error, `res` is not visible here.
In above two cases, we should consider to manage the scope of vars to parsing
the arguments and returned vars correctly.
"""
if not self.end_node:
self.generic_visit(node)
else:
before_if_name_ids = copy.deepcopy(self.name_ids)
body_name_ids = self._visit_child(node.body)
# If traversal process stops early in `if.body`, return the currently seen name_ids.
if self._is_finished:
self._update_name_ids(before_if_name_ids)
else:
else_name_ids = self._visit_child(node.orelse)
# If traversal process stops early in `if.orelse`, return the currently seen name_ids.
if self._is_finished:
self._update_name_ids(before_if_name_ids)
else:
# Blocks the vars in `if.body` and only inserts the vars both created in 'if/else' branch
# into name_ids.
new_name_ids = self._find_new_name_ids(body_name_ids,
else_name_ids)
for new_name_id in new_name_ids:
before_if_name_ids[new_name_id].append(gast.Store())
self.name_ids = before_if_name_ids
def visit_Attribute(self, node):
if not self._is_call_func_name_node(node):
self.generic_visit(node)
def visit_Name(self, node):
blacklist = {'True', 'False', 'None'}
if node.id in blacklist: return
if node.id in self._def_func_names:
return
if not self._is_call_func_name_node(node):
if isinstance(node.ctx, self._candidate_ctxs):
self.name_ids[node.id].append(node.ctx)
def visit_Assign(self, node):
# Visit `value` firstly.
node._fields = ('value', 'targets')
self.generic_visit(node)
def visit_FunctionDef(self, node):
self._def_func_names.add(node.name)
if not self.end_node:
self.generic_visit(node)
else:
before_name_ids = copy.deepcopy(self.name_ids)
self.name_ids = defaultdict(list)
self.generic_visit(node)
if self._is_finished:
self._update_name_ids(before_name_ids)
else:
self.name_ids = before_name_ids
def _visit_child(self, node):
self.name_ids = defaultdict(list)
if isinstance(node, list):
for item in node:
if isinstance(item, gast.AST):
self.visit(item)
elif isinstance(node, gast.AST):
self.visit(node)
return copy.deepcopy(self.name_ids)
def _find_new_name_ids(self, body_name_ids, else_name_ids):
def is_required_ctx(ctxs, required_ctx):
for ctx in ctxs:
if isinstance(ctx, required_ctx):
return True
return False
candidate_name_ids = set(body_name_ids.keys()) & set(else_name_ids.keys(
))
store_ctx = gast.Store
new_name_ids = set()
for name_id in candidate_name_ids:
if is_required_ctx(body_name_ids[name_id],
store_ctx) and is_required_ctx(
else_name_ids[name_id], store_ctx):
new_name_ids.add(name_id)
return new_name_ids
def _is_call_func_name_node(self, node):
if len(self.ancestor_nodes) > 1:
assert self.ancestor_nodes[-1] == node
parent_node = self.ancestor_nodes[-2]
if isinstance(parent_node, gast.Call) and parent_node.func == node:
return True
return False
def _update_name_ids(self, new_name_ids):
for name_id, ctxs in six.iteritems(new_name_ids):
self.name_ids[name_id] = ctxs + self.name_ids[name_id]
def get_name_ids(nodes, end_node=None):
"""
Return all ast.Name.id of python variable in nodes.
"""
name_visitor = NameVisitor(end_node)
for node in nodes:
name_visitor.visit(node)
return name_visitor.name_ids
def parse_cond_args(var_ids_dict, return_ids=None, ctx=gast.Load):
"""
Find out the ast.Name.id list of input by analyzing node's AST information.
"""
name_ids = [
var_id for var_id, var_ctx in six.iteritems(var_ids_dict)
if isinstance(var_ctx[0], ctx)
]
if return_ids:
new_args = set(return_ids) - set(name_ids)
name_ids.extend(list(new_args))
name_ids.sort()
args = [
gast.Name(
id=name_id, ctx=gast.Load(), annotation=None, type_comment=None)
for name_id in name_ids
]
arguments = gast.arguments(
args=args,
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[])
return arguments
def parse_cond_return(parent_vars_dict, if_vars_dict, else_vars_dict,
after_ifelse_vars_dict):
"""
Find out the ast.Name list of output by analyzing node's AST information.
One of the following conditions should be satisfied while determining whether a variable is a return value:
1. the var in parent scope is modified in If.body or If.orelse node.
2. new var is both created in If.body and If.orelse node.
3. new var is created only in one of If.body or If.orelse node, and it used as gast.Load firstly after gast.If node.
For example:
x, y = 5, 10
if x > 4:
x = x+1
z = x*x
q = 10
else:
y = y - 1
z = y*y
m = 20
n = 20
print(q)
n = 30
print(n)
The return_ids are (x, y, z, q) for `If.body` and `If.orelse`node, because
1. x is modified in If.body node,
2. y is modified in If.body node,
3. z is both created in If.body and If.orelse node,
4. q is created only in If.body, and it is used by `print(q)` as gast.Load.
Note:
After transformed, q and z are created in parent scope. For example,
x, y = 5, 10
q = paddle.jit.dy2static.data_layer_not_check(name='q', shape=[-1], dtype='float32')
z = paddle.jit.dy2static.data_layer_not_check(name='z', shape=[-1], dtype='float32')
def true_func(x, y, q):
x = x+1
z = x*x
q = 10
return x,y,z,q
def false_func(x, y, q):
y = y - 1
z = y*y
m = 20
n = 20
return x,y,z,q
x,y,z,q = fluid.layers.cond(x>4, lambda: true_func(x, y), lambda: false_func(x, y, q))
m and n are not in return_ids, because
5. m is created only in If.orelse, but it is not used after gast.If node.
6. n is created only in If.orelse, and it is used by `n = 30` and `print(n)`, but it is not used as gast.Load firstly but gast.Store .
"""
def _is_return_var(ctxs):
for ctx in ctxs:
if isinstance(ctx, (gast.Store, gast.Param)):
return True
return False
def _vars_with_store(ids_dict):
vars = []
for k, ctxs in six.iteritems(ids_dict):
if _is_return_var(ctxs):
vars.append(k)
return vars
def _modified_vars(child_dict, parent_dict):
return set([
var for var in _vars_with_store(child_dict) if var in parent_dict
])
def _vars_loaded_before_store(ids_dict):
new_dict = defaultdict(list)
for k, ctxs in six.iteritems(ids_dict):
for ctx in ctxs:
if isinstance(ctx, gast.Load):
new_dict[k].append(ctx)
elif isinstance(ctx, gast.Store):
break
return new_dict
# modified vars
body_modified_vars = _modified_vars(if_vars_dict, parent_vars_dict)
orelse_modified_vars = _modified_vars(else_vars_dict, parent_vars_dict)
modified_vars = body_modified_vars | orelse_modified_vars
# new vars
body_new_vars = set([
var for var in _vars_with_store(if_vars_dict)
if var not in parent_vars_dict
])
orelse_new_vars = set([
var for var in _vars_with_store(else_vars_dict)
if var not in parent_vars_dict
])
new_vars_in_body_or_orelse = body_new_vars | orelse_new_vars
new_vars_in_one_of_body_or_orelse = body_new_vars ^ orelse_new_vars
# 1. the var in parent scope is modified in If.body or If.orelse node.
modified_vars_from_parent = modified_vars - new_vars_in_body_or_orelse
# 2. new var is both created in If.body and If.orelse node.
new_vars_in_body_and_orelse = body_new_vars & orelse_new_vars
# 3. new var is created only in one of If.body or If.orelse node, and it used as gast.Load firstly after gast.If node.
used_vars_after_ifelse = set(
[var for var in _vars_loaded_before_store(after_ifelse_vars_dict)])
new_vars_to_create = new_vars_in_one_of_body_or_orelse & used_vars_after_ifelse | new_vars_in_body_and_orelse
# 4. generate return_ids of if/else node.
return_ids = list(modified_vars_from_parent | new_vars_in_body_and_orelse |
new_vars_to_create)
return_ids.sort()
return return_ids, modified_vars_from_parent, new_vars_to_create
def transform_if_else(node, root):
"""
Transform ast.If into control flow statement of Paddle static graph.
"""
# TODO(liym27): Consider variable like `self.a` modified in if/else node.
parent_name_ids = get_name_ids([root], end_node=node)
body_name_ids = get_name_ids(node.body)
orelse_name_ids = get_name_ids(node.orelse)
# Get after_ifelse_name_ids, which means used var names after If.body and If.orelse node.
after_ifelse_name_ids = defaultdict(list)
all_name_ids = get_name_ids([root])
for name in all_name_ids:
before_var_names_ids = parent_name_ids.get(name, []) + \
body_name_ids.get(name, []) + orelse_name_ids.get(name, [])
# Note: context of node.Name like gast.Load is a concrete object which has unique id different from other gast.Load
# E.g. ctx of `x` can be [<gast.Load object at 0x142a33c90>, <gast.Load object at 0x142a51950>, <gast.Param object at 0x1407d8250>]
after_var_names_ids = [
ctx for ctx in all_name_ids[name] if ctx not in before_var_names_ids
]
if after_var_names_ids:
after_ifelse_name_ids[name] = after_var_names_ids
return_name_ids, modified_name_ids_from_parent, new_vars_to_create = parse_cond_return(
parent_name_ids, body_name_ids, orelse_name_ids, after_ifelse_name_ids)
# NOTE: Python can create variable only in if body or only in else body, and use it out of if/else.
# E.g.
#
# if x > 5:
# a = 10
# print(a)
#
# Create static variable for those variables
create_new_vars_in_parent_stmts = []
for name in new_vars_to_create:
# NOTE: Consider variable like `self.a` modified in if/else node.
if "." not in name:
create_new_vars_in_parent_stmts.append(
create_static_variable_gast_node(name))
modified_name_ids = modified_name_ids_from_parent | new_vars_to_create
true_func_node = create_funcDef_node(
node.body,
name=unique_name.generate(TRUE_FUNC_PREFIX),
input_args=parse_cond_args(body_name_ids, modified_name_ids),
return_name_ids=return_name_ids)
false_func_node = create_funcDef_node(
node.orelse,
name=unique_name.generate(FALSE_FUNC_PREFIX),
input_args=parse_cond_args(orelse_name_ids, modified_name_ids),
return_name_ids=return_name_ids)
return create_new_vars_in_parent_stmts, true_func_node, false_func_node, return_name_ids
def create_convert_ifelse_node(return_name_ids,
pred,
true_func,
false_func,
is_if_expr=False):
"""
Create `paddle.jit.dy2static.convert_ifelse(
pred, true_fn, false_fn, true_args, false_args, return_vars)`
to replace original `python if/else` statement.
"""
def create_name_nodes(name_ids):
if not name_ids:
return gast.Tuple(elts=[], ctx=gast.Load())
gast_names = [
gast.Name(
id=name_id, ctx=gast.Load(), annotation=None, type_comment=None)
for name_id in name_ids
]
name_node = gast.Tuple(elts=gast_names, ctx=gast.Load())
return name_node
if is_if_expr:
true_args = gast.Tuple(elts=[], ctx=gast.Load())
false_args = gast.Tuple(elts=[], ctx=gast.Load())
true_func_source = "lambda : {}".format(ast_to_source_code(true_func))
false_func_source = "lambda : {}".format(ast_to_source_code(false_func))
else:
true_args = gast.Tuple(elts=true_func.args.args, ctx=gast.Load())
false_args = gast.Tuple(elts=false_func.args.args, ctx=gast.Load())
true_func_source = true_func.name
false_func_source = false_func.name
return_vars = create_name_nodes(return_name_ids)
convert_ifelse_layer = gast.parse(
'paddle.jit.dy2static.convert_ifelse('
'{pred}, {true_fn}, {false_fn}, {true_args}, {false_args}, {return_vars})'.
format(
pred=ast_to_source_code(pred),
true_fn=true_func_source,
false_fn=false_func_source,
true_args=ast_to_source_code(true_args),
false_args=ast_to_source_code(false_args),
return_vars=ast_to_source_code(return_vars))).body[0].value
if return_name_ids:
_, cond_node = create_assign_node(return_name_ids, convert_ifelse_layer)
else: # No variables can be returned if no assign statement in if.body.
cond_node = gast.Expr(value=convert_ifelse_layer)
return cond_node
```
#### File: unittests/dygraph_to_static/test_lstm.py
```python
import numpy as np
import paddle
import unittest
from paddle import nn
class Net(nn.Layer):
def __init__(self, in_channels, hidden_size):
super(Net, self).__init__()
self.lstm = nn.LSTM(
in_channels, hidden_size, direction='bidirectional', num_layers=2)
@paddle.jit.to_static
def forward(self, x):
x, _ = self.lstm(x)
return x
class TestLstm(unittest.TestCase):
def run_lstm(self, to_static):
paddle.jit.ProgramTranslator().enable(to_static)
paddle.disable_static()
paddle.static.default_main_program().random_seed = 1001
paddle.static.default_startup_program().random_seed = 1001
net = Net(12, 2)
x = paddle.zeros((2, 10, 12))
y = net(paddle.to_tensor(x))
return y.numpy()
def test_lstm_to_static(self):
dygraph_out = self.run_lstm(to_static=False)
static_out = self.run_lstm(to_static=True)
self.assertTrue(
np.allclose(dygraph_out, static_out),
msg='dygraph_out is {}\n static_out is \n{}'.format(dygraph_out,
static_out))
if __name__ == "__main__":
unittest.main()
```
#### File: unittests/rnn/test_wrappers.py
```python
import paddle
paddle.set_default_dtype("float64")
from paddle.fluid.layers import sequence_mask
import numpy as np
import unittest
from convert import convert_params_for_cell
from rnn_numpy import GRUCell, RNN, BiRNN
class TestRNNWrapper(unittest.TestCase):
def __init__(self, time_major=True, direction="forward", place="cpu"):
super(TestRNNWrapper, self).__init__("runTest")
self.time_major = time_major
self.direction = direction
self.place = paddle.CPUPlace() if place == "cpu" \
else paddle.CUDAPlace(0)
def setUp(self):
paddle.disable_static(self.place)
cell1 = GRUCell(16, 32)
cell2 = paddle.nn.GRUCell(16, 32)
convert_params_for_cell(cell1, cell2)
rnn1 = RNN(cell1,
is_reverse=self.direction == "backward",
time_major=self.time_major)
rnn2 = paddle.nn.RNN(cell2,
is_reverse=self.direction == "backward",
time_major=self.time_major)
self.rnn1 = rnn1
self.rnn2 = rnn2
def test_with_initial_state(self):
rnn1 = self.rnn1
rnn2 = self.rnn2
x = np.random.randn(12, 4, 16)
if not self.time_major:
x = np.transpose(x, [1, 0, 2])
prev_h = np.random.randn(4, 32)
y1, h1 = rnn1(x, prev_h)
y2, h2 = rnn2(paddle.to_tensor(x), paddle.to_tensor(prev_h))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def test_with_zero_state(self):
rnn1 = self.rnn1
rnn2 = self.rnn2
x = np.random.randn(12, 4, 16)
if not self.time_major:
x = np.transpose(x, [1, 0, 2])
y1, h1 = rnn1(x)
y2, h2 = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def test_with_input_lengths(self):
rnn1 = self.rnn1
rnn2 = self.rnn2
x = np.random.randn(12, 4, 16)
if not self.time_major:
x = np.transpose(x, [1, 0, 2])
sequence_length = np.array([12, 10, 9, 8], dtype=np.int64)
y1, h1 = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
y2 = paddle.multiply(y2, mask, axis=0)
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def runTest(self):
self.test_with_initial_state()
self.test_with_zero_state()
self.test_with_input_lengths()
class TestBiRNNWrapper(unittest.TestCase):
def __init__(self, time_major=True, place="cpu"):
super(TestBiRNNWrapper, self).__init__("runTest")
self.time_major = time_major
self.place = paddle.CPUPlace() if place == "cpu" \
else paddle.CUDAPlace(0)
def setUp(self):
paddle.disable_static(self.place)
fw_cell1 = GRUCell(16, 32)
bw_cell1 = GRUCell(16, 32)
fw_cell2 = paddle.nn.GRUCell(16, 32)
bw_cell2 = paddle.nn.GRUCell(16, 32)
convert_params_for_cell(fw_cell1, fw_cell2)
convert_params_for_cell(bw_cell1, bw_cell2)
rnn1 = BiRNN(fw_cell1, bw_cell1, time_major=self.time_major)
rnn2 = paddle.nn.BiRNN(fw_cell2, bw_cell2, time_major=self.time_major)
self.rnn1 = rnn1
self.rnn2 = rnn2
def test_with_initial_state(self):
rnn1 = self.rnn1
rnn2 = self.rnn2
x = np.random.randn(12, 4, 16)
if not self.time_major:
x = np.transpose(x, [1, 0, 2])
fw_prev_h = np.random.randn(4, 32)
bw_prev_h = np.random.randn(4, 32)
y1, (fw_h1, bw_h1) = rnn1(x, (fw_prev_h, bw_prev_h))
y2, (fw_h2, bw_h2) = rnn2(
paddle.to_tensor(x),
(paddle.to_tensor(fw_prev_h), paddle.to_tensor(bw_prev_h)))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(fw_h1, fw_h2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(bw_h1, bw_h2.numpy(), atol=1e-8, rtol=1e-5)
def test_with_zero_state(self):
rnn1 = self.rnn1
rnn2 = self.rnn2
x = np.random.randn(12, 4, 16)
if not self.time_major:
x = np.transpose(x, [1, 0, 2])
y1, (fw_h1, bw_h1) = rnn1(x)
y2, (fw_h2, bw_h2) = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(fw_h1, fw_h2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(bw_h1, bw_h2.numpy(), atol=1e-8, rtol=1e-5)
def test_with_input_lengths(self):
rnn1 = self.rnn1
rnn2 = self.rnn2
x = np.random.randn(12, 4, 16)
if not self.time_major:
x = np.transpose(x, [1, 0, 2])
sequence_length = np.array([12, 10, 9, 8], dtype=np.int64)
y1, (fw_h1, bw_h1) = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y2, (fw_h2, bw_h2) = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
y2 = paddle.multiply(y2, mask, axis=0)
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(fw_h1, fw_h2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(bw_h1, bw_h2.numpy(), atol=1e-8, rtol=1e-5)
def runTest(self):
self.test_with_initial_state()
self.test_with_zero_state()
self.test_with_input_lengths()
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
devices = ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() \
else ["cpu"]
for direction in ["forward", "backward"]:
for device in devices:
for time_major in [False]:
suite.addTest(TestRNNWrapper(time_major, direction, device))
suite.addTest(TestBiRNNWrapper(time_major, device))
return suite
```
#### File: tests/unittests/test_multiply.py
```python
from __future__ import print_function
import paddle
import paddle.tensor as tensor
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import numpy as np
import unittest
class TestMultiplyAPI(unittest.TestCase):
"""TestMultiplyAPI."""
def __run_static_graph_case(self, x_data, y_data, axis=-1):
with program_guard(Program(), Program()):
x = paddle.static.data(
name='x', shape=x_data.shape, dtype=x_data.dtype)
y = paddle.static.data(
name='y', shape=y_data.shape, dtype=y_data.dtype)
res = tensor.multiply(x, y, axis=axis)
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
outs = exe.run(fluid.default_main_program(),
feed={'x': x_data,
'y': y_data},
fetch_list=[res])
res = outs[0]
return res
def __run_dynamic_graph_case(self, x_data, y_data, axis=-1):
paddle.disable_static()
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
res = paddle.multiply(x, y, axis=axis)
return res.numpy()
def test_multiply(self):
"""test_multiply."""
np.random.seed(7)
# test static computation graph: 1-d array
x_data = np.random.rand(200)
y_data = np.random.rand(200)
res = self.__run_static_graph_case(x_data, y_data)
self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))
# test static computation graph: 2-d array
x_data = np.random.rand(2, 500)
y_data = np.random.rand(2, 500)
res = self.__run_static_graph_case(x_data, y_data)
self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))
# test static computation graph: broadcast
x_data = np.random.rand(2, 500)
y_data = np.random.rand(500)
res = self.__run_static_graph_case(x_data, y_data)
self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))
# test static computation graph: broadcast with axis
x_data = np.random.rand(2, 300, 40)
y_data = np.random.rand(300)
res = self.__run_static_graph_case(x_data, y_data, axis=1)
expected = np.multiply(x_data, y_data[..., np.newaxis])
self.assertTrue(np.allclose(res, expected))
# test dynamic computation graph: 1-d array
x_data = np.random.rand(200)
y_data = np.random.rand(200)
res = self.__run_dynamic_graph_case(x_data, y_data)
self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))
# test dynamic computation graph: 2-d array
x_data = np.random.rand(20, 50)
y_data = np.random.rand(20, 50)
res = self.__run_dynamic_graph_case(x_data, y_data)
self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))
# test dynamic computation graph: broadcast
x_data = np.random.rand(2, 500)
y_data = np.random.rand(500)
res = self.__run_dynamic_graph_case(x_data, y_data)
self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))
# test dynamic computation graph: broadcast with axis
x_data = np.random.rand(2, 300, 40)
y_data = np.random.rand(300)
res = self.__run_dynamic_graph_case(x_data, y_data, axis=1)
expected = np.multiply(x_data, y_data[..., np.newaxis])
self.assertTrue(np.allclose(res, expected))
class TestMultiplyError(unittest.TestCase):
"""TestMultiplyError."""
def test_errors(self):
"""test_errors."""
# test static computation graph: dtype can not be int8
paddle.enable_static()
with program_guard(Program(), Program()):
x = paddle.static.data(name='x', shape=[100], dtype=np.int8)
y = paddle.static.data(name='y', shape=[100], dtype=np.int8)
self.assertRaises(TypeError, tensor.multiply, x, y)
# test static computation graph: inputs must be broadcastable
with program_guard(Program(), Program()):
x = paddle.static.data(name='x', shape=[20, 50], dtype=np.float64)
y = paddle.static.data(name='y', shape=[20], dtype=np.float64)
self.assertRaises(fluid.core.EnforceNotMet, tensor.multiply, x, y)
np.random.seed(7)
# test dynamic computation graph: dtype can not be int8
paddle.disable_static()
x_data = np.random.randn(200).astype(np.int8)
y_data = np.random.randn(200).astype(np.int8)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y)
# test dynamic computation graph: inputs must be broadcastable
x_data = np.random.rand(200, 5)
y_data = np.random.rand(200)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y)
# test dynamic computation graph: inputs must be broadcastable(python)
x_data = np.random.rand(200, 5)
y_data = np.random.rand(200)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y)
# test dynamic computation graph: dtype must be same
x_data = np.random.randn(200).astype(np.int64)
y_data = np.random.randn(200).astype(np.float64)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
self.assertRaises(TypeError, paddle.multiply, x, y)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/unittests/test_parallel_executor_profiler.py
```python
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.test_profiler import TestProfiler
class TestPEProfiler(TestProfiler):
def test_cpu_profiler(self):
exe = fluid.Executor(fluid.CPUPlace())
self.net_profiler(exe, 'CPU', "Default", use_parallel_executor=True)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"profiler is enabled only with GPU")
def test_cuda_profiler(self):
exe = fluid.Executor(fluid.CUDAPlace(0))
self.net_profiler(exe, 'GPU', "OpDetail", use_parallel_executor=True)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"profiler is enabled only with GPU")
def test_all_profiler(self):
exe = fluid.Executor(fluid.CUDAPlace(0))
self.net_profiler(exe, 'All', "AllOpDetail", use_parallel_executor=True)
if __name__ == '__main__':
unittest.main()
```
#### File: nn/functional/input.py
```python
from __future__ import print_function
import warnings
from ...fluid.framework import Variable, in_dygraph_mode
from ...fluid.layer_helper import LayerHelper
from ...fluid.layers import core
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
__all__ = ['one_hot', 'embedding']
def one_hot(x, num_classes, name=None):
"""
The operator converts each id in the input 'x' to an one-hot vector with a
num_classes length. The value in the vector dimension corresponding to the id
is 1, and the value in the remaining dimension is 0.
The shape of output Tensor is generated by appending num_classes dimension
behind the last dimension of the 'x' shape.
.. code-block:: text
Example 1:
input:
x.shape = [4]
x.data = [1, 1, 3, 0]
num_classes = 4
output:
Out.shape = [4, 4]
Out.data = [[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.]]
Example 2:
input:
x.shape = [4]
x.data = [1, 1, 5, 0]
num_classes = 4
output: Throw an exception for Illegal value
The second dimension in X is 5, which is greater than num_classes,
so it throws an exception.
Args:
x(Tensor): Tensor with shape :math:`[N_1, N_2, ..., N_k]` ,
which contains at least one dimension. The data type is int32 or int64.
num_classes(int): An integer defining the num_classes of the one hot dimension. If input 'x'
is word id, num_classes is generally the dictionary size.
Returns:
Tensor: The one-hot representations of 'x'. A Tensor with type float32.
Examples:
.. code-block:: python
import paddle
# Correspond to the first example above, where label.shape is 4 and one_hot_label.shape is [4, 4].
label = paddle.data(name="label", shape=[4, 1], dtype="int64")
# label.shape = [4]
# label.data = [1, 1, 3, 0]
one_hot_label = paddle.nn.functional.one_hot(x=label, num_classes=4)
# one_hot_label.shape = [4, 4]
# one_hot_label.data = [[0., 1., 0., 0.],
# [0., 1., 0., 0.],
# [0., 0., 0., 1.],
# [1., 0., 0., 0.]]
"""
if in_dygraph_mode():
return core.ops.one_hot_v2(x, 'depth', num_classes,
'allow_out_of_range', False)
else:
check_variable_and_dtype(x, 'input', ['int32', 'int64'], 'one_hot_v2')
helper = LayerHelper("one_hot_v2", **locals())
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(num_classes, Variable):
# user attribute
inputs = {'X': x}
attrs = {'depth': num_classes, 'allow_out_of_range': False}
else:
num_classes.stop_gradient = True
inputs = {'X': x, 'depth_tensor': num_classes}
attrs = {'allow_out_of_range': False}
helper.append_op(
type="one_hot_v2",
inputs=inputs,
attrs=attrs,
outputs={'Out': one_hot_out},
stop_gradient=True)
return one_hot_out
def embedding(x, weight, padding_idx=None, sparse=False, name=None):
"""
The operator is used to lookup embeddings vector of ids provided by :attr:`x` .
The shape of output Tensor is generated by appending the last dimension of the input Tensor shape
with embedding size.
**Note:** The id in :attr:`x` must satisfy :math:`0 =< id < weight.shape[0]` ,
otherwise the program will throw an exception and exit.
.. code-block:: text
Case 1:
x is a Tensor.
padding_idx = -1
x.data = [[1, 3], [2, 4], [4, 127]]
x.shape = [3, 2]
weight.shape = [128, 16]
output is a Tensor:
out.shape = [3, 2, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]]] # padding data
The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
It will pad all-zero data when id is 127.
Args:
x(Tensor): A Tensor with type int32/int64, which contains the id information. The value of the input id should
satisfy :math:`0<= id < weight.shape[0]` .
weight (Tensor): The weight. A Tensor with shape of lookup table parameter. It should have two elements which
indicates the size of the dictionary of embeddings and the size of each embedding vector respectively.
sparse(bool): The flag indicating whether to use sparse update. This parameter only
affects the performance of the backwards gradient update. It is recommended to set
True because sparse update is faster. But some optimizers does not support sparse update,
such as :ref:`api_optimizer_AdadeltaOptimizer` , :ref:`api_optimizer_AdamaxOptimizer` ,
:ref:`api_optimizer_DecayedAdagradOptimizer` , :ref:`api_optimizer_FtrlOptimizer` ,
:ref:`api_optimizer_LambOptimizer` and :ref:`api_optimizer_LarsMomentumOptimizer` .
In these cases, sparse must be False. Default: False.
padding_idx(int|long|None): padding_idx needs to be in the interval [-weight.shape[0], weight.shape[0]).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`weight.shape[0] + padding\_idx` . It will output all-zero padding data whenever lookup
encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
If set None, it makes no effect to output. Default: None.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: Embedding Tensor mapped by x. The data type is the same as :attr:`weight`.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
weight = prog.global_block().create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
default_initializer=Constant(1.0))
prog = paddle.static.Program()
weight = prog.global_block().create_parameter(
(128, 100), dtype="float32", default_initializer=Constant(1.0))
label = paddle.data(
name="label",
shape=[4],
append_batch_size=False,
dtype="int64")
emb = nn.embedding(
x=label, weight=weight, sparse=True, name="embedding")
"""
if in_dygraph_mode():
return core.ops.lookup_table_v2(
weight, x, 'is_sparse', sparse, 'is_distributed', False,
'remote_prefetch', False, 'padding_idx', padding_idx)
else:
helper = LayerHelper('embedding', **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(x, 'input', ['int32', 'int64'], 'embedding')
is_distributed = False
remote_prefetch = sparse and (not is_distributed)
tmp = helper.create_variable_for_type_inference(dtype)
padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
weight.shape[0] + padding_idx)
if padding_idx >= weight.shape[0] or padding_idx < -weight.shape[0]:
raise ValueError("padding_idx must be within [-{}, {})".format(
weight.shape[0], weight.shape[0]))
helper.append_op(
type='lookup_table_v2',
inputs={'Ids': x,
'W': weight},
outputs={'Out': tmp},
attrs={
'is_sparse': sparse,
'is_distributed': is_distributed,
'remote_prefetch': remote_prefetch,
'padding_idx': padding_idx
})
return tmp
```
#### File: paddle/tests/test_transforms.py
```python
import unittest
import os
import tempfile
import cv2
import shutil
import numpy as np
from paddle.vision.datasets import DatasetFolder
from paddle.vision.transforms import transforms
import paddle.vision.transforms.functional as F
class TestTransforms(unittest.TestCase):
def setUp(self):
self.data_dir = tempfile.mkdtemp()
for i in range(2):
sub_dir = os.path.join(self.data_dir, 'class_' + str(i))
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
for j in range(2):
if j == 0:
fake_img = (np.random.random(
(280, 350, 3)) * 255).astype('uint8')
else:
fake_img = (np.random.random(
(400, 300, 3)) * 255).astype('uint8')
cv2.imwrite(os.path.join(sub_dir, str(j) + '.jpg'), fake_img)
def tearDown(self):
shutil.rmtree(self.data_dir)
def do_transform(self, trans):
dataset_folder = DatasetFolder(self.data_dir, transform=trans)
for _ in dataset_folder:
pass
def test_trans_all(self):
normalize = transforms.Normalize(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.120, 57.375])
trans = transforms.Compose([
transforms.RandomResizedCrop(224), transforms.GaussianNoise(),
transforms.ColorJitter(
brightness=0.4, contrast=0.4, saturation=0.4,
hue=0.4), transforms.RandomHorizontalFlip(),
transforms.Permute(mode='CHW'), normalize
])
self.do_transform(trans)
def test_normalize(self):
normalize = transforms.Normalize(mean=0.5, std=0.5)
trans = transforms.Compose([transforms.Permute(mode='CHW'), normalize])
self.do_transform(trans)
def test_trans_resize(self):
trans = transforms.Compose([
transforms.Resize(300, [0, 1]),
transforms.RandomResizedCrop((280, 280)),
transforms.Resize(280, [0, 1]),
transforms.Resize((256, 200)),
transforms.Resize((180, 160)),
transforms.CenterCrop(128),
transforms.CenterCrop((128, 128)),
])
self.do_transform(trans)
def test_trans_centerCrop(self):
trans = transforms.Compose([
transforms.CenterCropResize(224),
transforms.CenterCropResize(128, 160),
])
self.do_transform(trans)
def test_flip(self):
trans = transforms.Compose([
transforms.RandomHorizontalFlip(1.0),
transforms.RandomHorizontalFlip(0.0),
transforms.RandomVerticalFlip(0.0),
transforms.RandomVerticalFlip(1.0),
])
self.do_transform(trans)
def test_color_jitter(self):
trans = transforms.BatchCompose([
transforms.BrightnessTransform(0.0),
transforms.HueTransform(0.0),
transforms.SaturationTransform(0.0),
transforms.ContrastTransform(0.0),
])
self.do_transform(trans)
def test_rotate(self):
trans = transforms.Compose([
transforms.RandomRotate(90),
transforms.RandomRotate([-10, 10]),
transforms.RandomRotate(
45, expand=True),
transforms.RandomRotate(
10, expand=True, center=(60, 80)),
])
self.do_transform(trans)
def test_pad(self):
trans = transforms.Compose([transforms.Pad(2)])
self.do_transform(trans)
fake_img = np.random.rand(200, 150, 3).astype('float32')
trans_pad = transforms.Pad(10)
fake_img_padded = trans_pad(fake_img)
np.testing.assert_equal(fake_img_padded.shape, (220, 170, 3))
trans_pad1 = transforms.Pad([1, 2])
trans_pad2 = transforms.Pad([1, 2, 3, 4])
img = trans_pad1(fake_img)
img = trans_pad2(img)
def test_erase(self):
trans = transforms.Compose(
[transforms.RandomErasing(), transforms.RandomErasing(value=0.0)])
self.do_transform(trans)
def test_random_crop(self):
trans = transforms.Compose([
transforms.RandomCrop(200),
transforms.RandomCrop((140, 160)),
])
self.do_transform(trans)
trans_random_crop1 = transforms.RandomCrop(224)
trans_random_crop2 = transforms.RandomCrop((140, 160))
fake_img = np.random.rand(500, 400, 3).astype('float32')
fake_img_crop1 = trans_random_crop1(fake_img)
fake_img_crop2 = trans_random_crop2(fake_img_crop1)
np.testing.assert_equal(fake_img_crop1.shape, (224, 224, 3))
np.testing.assert_equal(fake_img_crop2.shape, (140, 160, 3))
trans_random_crop_same = transforms.RandomCrop((140, 160))
img = trans_random_crop_same(fake_img_crop2)
trans_random_crop_bigger = transforms.RandomCrop((180, 200))
img = trans_random_crop_bigger(img)
trans_random_crop_pad = transforms.RandomCrop((224, 256), 2, True)
img = trans_random_crop_pad(img)
def test_grayscale(self):
trans = transforms.Compose([transforms.Grayscale()])
self.do_transform(trans)
trans_gray = transforms.Grayscale()
fake_img = np.random.rand(500, 400, 3).astype('float32')
fake_img_gray = trans_gray(fake_img)
np.testing.assert_equal(len(fake_img_gray.shape), 3)
np.testing.assert_equal(fake_img_gray.shape[0], 500)
np.testing.assert_equal(fake_img_gray.shape[1], 400)
trans_gray3 = transforms.Grayscale(3)
fake_img = np.random.rand(500, 400, 3).astype('float32')
fake_img_gray = trans_gray3(fake_img)
def test_exception(self):
trans = transforms.Compose([transforms.Resize(-1)])
trans_batch = transforms.BatchCompose([transforms.Resize(-1)])
with self.assertRaises(Exception):
self.do_transform(trans)
with self.assertRaises(Exception):
self.do_transform(trans_batch)
with self.assertRaises(ValueError):
transforms.ContrastTransform(-1.0)
with self.assertRaises(ValueError):
transforms.SaturationTransform(-1.0),
with self.assertRaises(ValueError):
transforms.HueTransform(-1.0)
with self.assertRaises(ValueError):
transforms.BrightnessTransform(-1.0)
with self.assertRaises(ValueError):
transforms.Pad([1.0, 2.0, 3.0])
with self.assertRaises(TypeError):
fake_img = np.random.rand(100, 120, 3).astype('float32')
F.pad(fake_img, '1')
with self.assertRaises(TypeError):
fake_img = np.random.rand(100, 120, 3).astype('float32')
F.pad(fake_img, 1, {})
with self.assertRaises(TypeError):
fake_img = np.random.rand(100, 120, 3).astype('float32')
F.pad(fake_img, 1, padding_mode=-1)
with self.assertRaises(ValueError):
fake_img = np.random.rand(100, 120, 3).astype('float32')
F.pad(fake_img, [1.0, 2.0, 3.0])
with self.assertRaises(ValueError):
transforms.RandomRotate(-2)
with self.assertRaises(ValueError):
transforms.RandomRotate([1, 2, 3])
with self.assertRaises(ValueError):
trans_gray = transforms.Grayscale(5)
fake_img = np.random.rand(100, 120, 3).astype('float32')
trans_gray(fake_img)
def test_info(self):
str(transforms.Compose([transforms.Resize((224, 224))]))
str(transforms.BatchCompose([transforms.Resize((224, 224))]))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiansowa/PaddleSlim",
"score": 2
} |
#### File: quant/pact_quant_aware/pact.py
```python
import sys
import paddle
import paddle.fluid as fluid
from paddleslim.quant import quant_aware, convert
import numpy as np
from paddle.fluid.layer_helper import LayerHelper
def pact(x, name=None):
helper = LayerHelper("pact", **locals())
dtype = 'float32'
init_thres = 20
u_param_attr = fluid.ParamAttr(
name=x.name + '_pact',
initializer=fluid.initializer.ConstantInitializer(value=init_thres),
regularizer=fluid.regularizer.L2Decay(0.0001),
learning_rate=1)
u_param = helper.create_parameter(
attr=u_param_attr, shape=[1], dtype=dtype)
x = fluid.layers.elementwise_sub(
x, fluid.layers.relu(fluid.layers.elementwise_sub(x, u_param)))
x = fluid.layers.elementwise_add(
x, fluid.layers.relu(fluid.layers.elementwise_sub(-u_param, x)))
return x
def get_optimizer():
return fluid.optimizer.MomentumOptimizer(0.0001, 0.9)
```
#### File: quant/quant_post/export_model.py
```python
import os
import sys
import logging
import paddle
import argparse
import functools
import math
import time
import numpy as np
import paddle.fluid as fluid
sys.path[0] = os.path.join(
os.path.dirname("__file__"), os.path.pardir, os.path.pardir)
from paddleslim.common import get_logger
import models
from utility import add_arguments, print_arguments
_logger = get_logger(__name__, level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('model', str, "MobileNet", "The target model.")
add_arg('pretrained_model', str, "../pretrained_model/MobileNetV1_pretained", "Whether to use pretrained model.")
add_arg('data', str, "mnist", "Which data to use. 'mnist' or 'imagenet'")
add_arg('test_period', int, 10, "Test period in epoches.")
# yapf: enable
model_list = [m for m in dir(models) if "__" not in m]
def export_model(args):
if args.data == "mnist":
import paddle.dataset.mnist as reader
train_reader = reader.train()
val_reader = reader.test()
class_dim = 10
image_shape = "1,28,28"
elif args.data == "imagenet":
import imagenet_reader as reader
train_reader = reader.train()
val_reader = reader.val()
class_dim = 1000
image_shape = "3,224,224"
else:
raise ValueError("{} is not supported.".format(args.data))
image_shape = [int(m) for m in image_shape.split(",")]
image = fluid.data(
name='image', shape=[None] + image_shape, dtype='float32')
assert args.model in model_list, "{} is not in lists: {}".format(
args.model, model_list)
# model definition
model = models.__dict__[args.model]()
out = model.net(input=image, class_dim=class_dim)
val_program = fluid.default_main_program().clone(for_test=True)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if args.pretrained_model:
def if_exist(var):
return os.path.exists(
os.path.join(args.pretrained_model, var.name))
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
else:
assert False, "args.pretrained_model must set"
fluid.io.save_inference_model(
'./inference_model/' + args.model,
feeded_var_names=[image.name],
target_vars=[out],
executor=exe,
main_program=val_program,
model_filename='model',
params_filename='weights')
def main():
args = parser.parse_args()
print_arguments(args)
export_model(args)
if __name__ == '__main__':
main()
```
#### File: paddleslim/common/controller.py
```python
import copy
import math
import numpy as np
import paddle.fluid as fluid
__all__ = ['EvolutionaryController', 'RLBaseController']
class EvolutionaryController(object):
"""Abstract controller for all evolutionary searching method.
"""
def update(self, tokens, reward):
"""Update the status of controller according current tokens and reward.
Args:
tokens(list<int>): A solution of searching task.
reward(list<int>): The reward of tokens.
"""
raise NotImplementedError('Abstract method.')
def reset(self, range_table, constrain_func=None):
"""Reset the controller.
Args:
range_table(list<int>): It is used to define the searching space of controller.
The tokens[i] generated by controller should be in [0, range_table[i]).
constrain_func(function): It is used to check whether tokens meet the constraint.
None means there is no constraint. Default: None.
"""
raise NotImplementedError('Abstract method.')
def next_tokens(self):
"""Generate new tokens.
Returns:
list<list>: The next searched tokens.
"""
raise NotImplementedError('Abstract method.')
class RLBaseController(object):
""" Base Controller for reforcement learning"""
def next_tokens(self, *args, **kwargs):
raise NotImplementedError('Abstract method.')
def update(self, *args, **kwargs):
raise NotImplementedError('Abstract method.')
def save_controller(self, program, output_dir):
fluid.save(program, output_dir)
def load_controller(self, program, load_dir):
fluid.load(program, load_dir)
def get_params(self, program):
var_dict = {}
for var in program.global_block().all_parameters():
var_dict[var.name] = np.array(fluid.global_scope().find_var(
var.name).get_tensor())
return var_dict
def set_params(self, program, params_dict, place):
for var in program.global_block().all_parameters():
fluid.global_scope().find_var(var.name).get_tensor().set(
params_dict[var.name], place)
```
#### File: rl_controller/lstm/lstm_controller.py
```python
import math
import logging
import numpy as np
import paddle.fluid as fluid
from paddle.fluid import ParamAttr
from paddle.fluid.layers import RNNCell, LSTMCell, rnn
from paddle.fluid.contrib.layers import basic_lstm
from ...controller import RLBaseController
from ...log_helper import get_logger
from ..utils import RLCONTROLLER
_logger = get_logger(__name__, level=logging.INFO)
uniform_initializer = lambda x: fluid.initializer.UniformInitializer(low=-x, high=x)
class lstm_cell(RNNCell):
def __init__(self, num_layers, hidden_size):
self.num_layers = num_layers
self.hidden_size = hidden_size
self.lstm_cells = []
param_attr = ParamAttr(initializer=uniform_initializer(
1.0 / math.sqrt(hidden_size)))
bias_attr = ParamAttr(initializer=uniform_initializer(
1.0 / math.sqrt(hidden_size)))
for i in range(num_layers):
self.lstm_cells.append(
LSTMCell(hidden_size, param_attr, bias_attr))
def call(self, inputs, states):
new_states = []
for i in range(self.num_layers):
out, new_state = self.lstm_cells[i](inputs, states[i])
new_states.append(new_state)
return out, new_states
@property
def state_shape(self):
return [cell.state_shape for cell in self.lstm_cells]
@RLCONTROLLER.register
class LSTM(RLBaseController):
def __init__(self, range_tables, use_gpu=False, **kwargs):
self.use_gpu = use_gpu
self.range_tables = range_tables
self.lstm_num_layers = kwargs.get('lstm_num_layers') or 1
self.hidden_size = kwargs.get('hidden_size') or 100
self.temperature = kwargs.get('temperature') or None
self.controller_lr = kwargs.get('controller_lr') or 1e-4
self.decay_steps = kwargs.get('controller_decay_steps') or None
self.decay_rate = kwargs.get('controller_decay_rate') or None
self.tanh_constant = kwargs.get('tanh_constant') or None
self.decay = kwargs.get('decay') or 0.99
self.weight_entropy = kwargs.get('weight_entropy') or None
self.controller_batch_size = kwargs.get('controller_batch_size') or 1
self.max_range_table = max(self.range_tables) + 1
self._create_parameter()
self._build_program()
self.place = fluid.CUDAPlace(0) if self.use_gpu else fluid.CPUPlace()
self.exe = fluid.Executor(self.place)
self.exe.run(fluid.default_startup_program())
self.param_dict = self.get_params(self.learn_program)
def _lstm(self, inputs, hidden, cell, token_idx):
cells = lstm_cell(self.lstm_num_layers, self.hidden_size)
output, new_states = cells.call(inputs, states=([[hidden, cell]]))
logits = fluid.layers.fc(new_states[0], self.range_tables[token_idx])
if self.temperature is not None:
logits = logits / self.temperature
if self.tanh_constant is not None:
logits = self.tanh_constant * fluid.layers.tanh(logits)
return logits, output, new_states
def _create_parameter(self):
self.g_emb = fluid.layers.create_parameter(
name='emb_g',
shape=(self.controller_batch_size, self.hidden_size),
dtype='float32',
default_initializer=uniform_initializer(1.0))
self.baseline = fluid.layers.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name='baseline')
self.baseline.stop_gradient = True
def _network(self, hidden, cell, init_actions=None, is_inference=False):
actions = []
entropies = []
sample_log_probs = []
with fluid.unique_name.guard('Controller'):
self._create_parameter()
inputs = self.g_emb
for idx in range(len(self.range_tables)):
logits, output, states = self._lstm(
inputs, hidden, cell, token_idx=idx)
hidden, cell = np.squeeze(states)
probs = fluid.layers.softmax(logits, axis=1)
if is_inference:
action = fluid.layers.argmax(probs, axis=1)
else:
if init_actions:
action = fluid.layers.slice(
init_actions,
axes=[1],
starts=[idx],
ends=[idx + 1])
action = fluid.layers.squeeze(action, axes=[1])
action.stop_gradient = True
else:
action = fluid.layers.sampling_id(probs)
actions.append(action)
log_prob = fluid.layers.softmax_with_cross_entropy(
logits,
fluid.layers.reshape(
action, shape=[fluid.layers.shape(action), 1]),
axis=1)
sample_log_probs.append(log_prob)
entropy = log_prob * fluid.layers.exp(-1 * log_prob)
entropy.stop_gradient = True
entropies.append(entropy)
action_emb = fluid.layers.cast(action, dtype=np.int64)
inputs = fluid.embedding(
action_emb,
size=(self.max_range_table, self.hidden_size),
param_attr=fluid.ParamAttr(
name='emb_w', initializer=uniform_initializer(1.0)))
self.sample_log_probs = fluid.layers.concat(
sample_log_probs, axis=0)
entropies = fluid.layers.stack(entropies)
self.sample_entropies = fluid.layers.reduce_sum(entropies)
return actions
def _build_program(self, is_inference=False):
self.pred_program = fluid.Program()
self.learn_program = fluid.Program()
with fluid.program_guard(self.pred_program):
self.g_emb = fluid.layers.create_parameter(
name='emb_g',
shape=(self.controller_batch_size, self.hidden_size),
dtype='float32',
default_initializer=uniform_initializer(1.0))
fluid.layers.assign(
fluid.layers.uniform_random(shape=self.g_emb.shape),
self.g_emb)
hidden = fluid.data(name='hidden', shape=[None, self.hidden_size])
cell = fluid.data(name='cell', shape=[None, self.hidden_size])
self.tokens = self._network(
hidden, cell, is_inference=is_inference)
with fluid.program_guard(self.learn_program):
hidden = fluid.data(name='hidden', shape=[None, self.hidden_size])
cell = fluid.data(name='cell', shape=[None, self.hidden_size])
init_actions = fluid.data(
name='init_actions',
shape=[None, len(self.range_tables)],
dtype='int64')
self._network(hidden, cell, init_actions=init_actions)
rewards = fluid.data(name='rewards', shape=[None])
self.rewards = fluid.layers.reduce_mean(rewards)
if self.weight_entropy is not None:
self.rewards += self.weight_entropy * self.sample_entropies
self.sample_log_probs = fluid.layers.reduce_sum(
self.sample_log_probs)
fluid.layers.assign(self.baseline - (1.0 - self.decay) *
(self.baseline - self.rewards), self.baseline)
self.loss = self.sample_log_probs * (self.rewards - self.baseline)
clip = fluid.clip.GradientClipByNorm(clip_norm=5.0)
if self.decay_steps is not None:
lr = fluid.layers.exponential_decay(
self.controller_lr,
decay_steps=self.decay_steps,
decay_rate=self.decay_rate)
else:
lr = self.controller_lr
optimizer = fluid.optimizer.Adam(learning_rate=lr, grad_clip=clip)
optimizer.minimize(self.loss)
def _create_input(self, is_test=True, actual_rewards=None):
feed_dict = dict()
np_init_hidden = np.zeros(
(self.controller_batch_size, self.hidden_size)).astype('float32')
np_init_cell = np.zeros(
(self.controller_batch_size, self.hidden_size)).astype('float32')
feed_dict["hidden"] = np_init_hidden
feed_dict["cell"] = np_init_cell
if is_test == False:
if isinstance(actual_rewards, np.float32):
assert actual_rewards != None, "if you want to update controller, you must inputs a reward"
actual_rewards = np.expand_dims(actual_rewards, axis=0)
elif isinstance(actual_rewards, np.float) or isinstance(
actual_rewards, np.float64):
actual_rewards = np.float32(actual_rewards)
assert actual_rewards != None, "if you want to update controller, you must inputs a reward"
actual_rewards = np.expand_dims(actual_rewards, axis=0)
else:
assert actual_rewards.all(
) != None, "if you want to update controller, you must inputs a reward"
actual_rewards = actual_rewards.astype(np.float32)
feed_dict['rewards'] = actual_rewards
feed_dict['init_actions'] = np.array(self.init_tokens).astype(
'int64')
return feed_dict
def next_tokens(self, num_archs=1, params_dict=None, is_inference=False):
""" sample next tokens according current parameter and inputs"""
self.num_archs = num_archs
self.set_params(self.pred_program, params_dict, self.place)
batch_tokens = []
feed_dict = self._create_input()
for _ in range(
int(np.ceil(float(num_archs) / self.controller_batch_size))):
if is_inference:
self._build_program(is_inference=True)
actions = self.exe.run(self.pred_program,
feed=feed_dict,
fetch_list=self.tokens)
for idx in range(self.controller_batch_size):
each_token = {}
for i, action in enumerate(actions):
token = action[idx]
if idx in each_token:
each_token[idx].append(int(token))
else:
each_token[idx] = [int(token)]
batch_tokens.append(each_token[idx])
self.init_tokens = batch_tokens
mod_token = (self.controller_batch_size -
(num_archs % self.controller_batch_size)
) % self.controller_batch_size
if mod_token != 0:
return batch_tokens[:-mod_token]
else:
return batch_tokens
def update(self, rewards, params_dict=None):
"""train controller according reward"""
self.set_params(self.learn_program, params_dict, self.place)
feed_dict = self._create_input(is_test=False, actual_rewards=rewards)
loss = self.exe.run(self.learn_program,
feed=feed_dict,
fetch_list=[self.loss])
_logger.info("Controller: current reward is {}, loss is {}".format(
rewards, loss))
params_dict = self.get_params(self.learn_program)
return params_dict
```
#### File: paddleslim/prune/prune_io.py
```python
import os
import paddle.fluid as fluid
from paddle.fluid import Program
from ..core import GraphWrapper
from ..common import get_logger
import json
import logging
__all__ = ["save_model", "load_model"]
_logger = get_logger(__name__, level=logging.INFO)
_PARAMS_FILE = "__params__"
_SHAPES_FILE = "__shapes__"
def save_model(exe, graph, dirname):
"""
Save weights of model and information of shapes into filesystem.
Args:
exe(paddle.fluid.Executor): The executor used to save model.
graph(Program|Graph): The graph to be saved.
dirname(str): The directory that the model saved into.
"""
assert graph is not None and dirname is not None
graph = GraphWrapper(graph) if isinstance(graph, Program) else graph
fluid.io.save_params(
executor=exe,
dirname=dirname,
main_program=graph.program,
filename=_PARAMS_FILE)
weights_file = os.path.join(dirname, _PARAMS_FILE)
_logger.info("Save model weights into {}".format(weights_file))
shapes = {}
for var in graph.all_parameters():
shapes[var.name()] = var.shape()
SHAPES_FILE = os.path.join(dirname, _SHAPES_FILE)
with open(SHAPES_FILE, "w") as f:
json.dump(shapes, f)
_logger.info("Save shapes of weights into {}".format(SHAPES_FILE))
def load_model(exe, graph, dirname):
"""
Load weights of model and information of shapes from filesystem.
Args:
graph(Program|Graph): The graph to be updated by loaded information..
dirname(str): The directory that the model will be loaded.
"""
assert graph is not None and dirname is not None
graph = GraphWrapper(graph) if isinstance(graph, Program) else graph
SHAPES_FILE = os.path.join(dirname, _SHAPES_FILE)
_logger.info("Load shapes of weights from {}".format(SHAPES_FILE))
with open(SHAPES_FILE, "r") as f:
shapes = json.load(f)
for param, shape in shapes.items():
graph.var(param).set_shape(shape)
_logger.info("Load shapes of weights from {}".format(SHAPES_FILE))
fluid.io.load_params(
executor=exe,
dirname=dirname,
main_program=graph.program,
filename=_PARAMS_FILE)
graph.update_groups_of_conv()
graph.infer_shape()
_logger.info("Load weights from {}".format(
os.path.join(dirname, _PARAMS_FILE)))
```
#### File: PaddleSlim/tests/layers.py
```python
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
def conv_bn_layer(input,
num_filters,
filter_size,
name,
stride=1,
groups=1,
act=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False,
name=name + "_out")
bn_name = name + "_bn"
return fluid.layers.batch_norm(
input=conv,
act=act,
name=bn_name + '_output',
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance', )
```
#### File: PaddleSlim/tests/test_quant_embedding.py
```python
import sys
sys.path.append("../")
import paddle.fluid as fluid
import paddleslim.quant as quant
import unittest
class TestQuantEmbedding(unittest.TestCase):
def test_quant_embedding(self):
train_program = fluid.Program()
with fluid.program_guard(train_program):
input_word = fluid.data(
name="input_word", shape=[None, 1], dtype='int64')
input_emb = fluid.embedding(
input=input_word,
is_sparse=False,
size=[100, 128],
param_attr=fluid.ParamAttr(
name='emb',
initializer=fluid.initializer.Uniform(-0.005, 0.005)))
infer_program = train_program.clone(for_test=True)
use_gpu = True
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
quant_program = quant.quant_embedding(infer_program, place)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jianTaoLiu-SWJTU2012/taolib",
"score": 2
} |
#### File: CoreLib/WWW/Search.py
```python
import sys
import re
from taolib.CoreLib.DB import dbRawChIP, dbMotif
# ------------------------------------
# constants
# ------------------------------------
# ------------------------------------
# Misc functions
# ------------------------------------
def cgi_search_DBRawChIP (dbfile, query):
db = dbRawChIP.DBRawChIP(file=dbfile)
where_clause = ""
if query:
terms = parse_query_text(query)
where_clause = "where "+" and ".join( map(lambda x:str(x[0])+" like \"%%"+str(x[1])+"%%\"", terms) )
c = db.execute('select id,factor,organism,cell,condition,platform,lab,pmid from chips %s order by id' % where_clause)
return c
def parse_query_text ( text ):
"""Return a dictionary with keywords and values paires.
"""
keywds = re.findall('\[\w+\]',text)
fields = re.split('\[\w+\]',text)
keywds.append('[any]')
return zip(keywds,fields)
# ------------------------------------
# Classes
# ------------------------------------
# ------------------------------------
# Main function
# ------------------------------------
def test():
#test function
pass
if __name__ == '__main__':
test()
```
#### File: taolib/Scripts/check_map.py
```python
import os
import sys
import re
from optparse import OptionParser
# ------------------------------------
# constants
# ------------------------------------
MIN_DIST = 50
MAX_DIST = 500
# ------------------------------------
# Misc functions
# ------------------------------------
# ------------------------------------
# Classes
# ------------------------------------
class Pos:
def __init__ (self,chr,s):
self.chr = chr
self.start =s
class Pair:
def __init__ (self,n):
self.n = n
self.left = []
self.right = []
def addleft (self,pos):
self.left.append(pos)
def addright (self,pos):
self.right.append(pos)
def pair(self):
n = 0
for rp in self.right:
for lp in self.left:
if lp.chr == rp.chr:
dist = rp.start - lp.start
if dist<MAX_DIST and dist>MIN_DIST:
n+=1
return n
# ------------------------------------
# Main function
# ------------------------------------
def main():
usage = "usage: %prog [options]"
description = "Analyze the mapping result from xMAN, report the ratio for unique mapping\nIt's the step #3 after sample_seq.py (#1) and xMAN(#2) of the whole pipeline"
optparser = OptionParser(version="%prog 0.1",description=description,usage=usage,add_help_option=False)
optparser.add_option("-h","--help",action="help",help="Show this help message and exit.")
optparser.add_option("-i","--ifile",dest="ifile",type="string",
help="input xMAN mapping file")
optparser.add_option("-o","--ofile",dest="ofile",type="string",
help="output file")
optparser.add_option("-p","--pair",dest="pair",action="store_true",
help="Whether or not to parse the pair-end mapping result")
(options,args) = optparser.parse_args()
# ... you have alarge list of positions
if not options.ifile or not optiions.ofile:
optparser.print_help()
sys.exit(1)
ifhd = open(options.ifile,"r")
ofhd = open(options.ofile,"w")
col_tagno = 4
col_chr = 2
col_start = 3
pairs = {}
for l in ifhd.readlines():
if l.startswith("#"): continue
fields = l.split("\t")
#print fields
chr = fields[col_chr]
start = int(fields[col_start])
tagno = int(fields[col_tagno])
right = False
if tagno % 2 ==0:
tagno-=1
right = True
if not pairs.has_key(tagno):
pairs[tagno]=Pair(tagno)
if chr == "Nomatch":
continue
if right:
pairs[tagno].addright(Pos(chr,start))
else:
pairs[tagno].addleft(Pos(chr,start))
ns = pairs.keys()
ns.sort()
total_unique_pairs = 0
total_pairs = len(ns)
for n in ns:
p = pairs[n].pair()
ofhd.write( "%d\t%d\n" % (n,p))
if p == 1:
total_unique_pairs += 1
ofhd.write( "total: %d\nmapped: %d\nratio: %.2f%%\n" % (total_pairs,total_unique_pairs,float(total_unique_pairs)/total_pairs*100) )
ofhd.close()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
msgl(_("\n;-) See you!"))
sys.exit(0)
```
#### File: taolib/Scripts/convert_gene_ids.py
```python
import sys
from optparse import OptionParser
from Bio import Entrez
# ------------------------------------
# constants
# ------------------------------------
# *Always* tell NCBI who you are
Entrez.email = "your email here"
# ------------------------------------
# Misc functions
# ------------------------------------
def search_genes(id_list,search_field):
"""Use ESearch to convert RefSeq or Gene symbols to standard
Entrez IDs.
A request to esearch.cgi is like:
http://www.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=gene&term=ID_LIST[SEARCH_FIELD]
Return a list of Entrez IDs.
"""
term = " OR ".join(map(lambda x:x+"["+search_field+"]",id_list))
esearch_result = Entrez.esearch(db="gene",term=term,retmod="xml")
parsed_result = Entrez.read(esearch_result)
return parsed_result['IdList']
def fetch_genes(id_list):
"""Fetch Entrez Gene records using Bio.Entrez, in particular epost
(to submit the data to NCBI) and efetch to retrieve the
information, then use Entrez.read to parse the data.
Returns a list of parsed gene records.
"""
request = Entrez.epost("gene",id=",".join(id_list))
try:
result = Entrez.read(request)
except RuntimeError as e:
#FIXME: How generate NAs instead of causing an error with invalid IDs?
print "An error occurred while retrieving the annotations."
print "The error returned was %s" % e
sys.exit(-1)
webEnv = result["WebEnv"]
queryKey = result["QueryKey"]
efetch_result = Entrez.efetch(db="gene", webenv=webEnv, query_key = queryKey, retmode="xml")
genes = Entrez.read(efetch_result)
#print "Retrieved %d records for %d genes" % (len(genes),len(id_list))
return genes
def parse_genes(genes):
"""Parse various gene information including:
1. Species name (taxonomy name)
2. Entrez gene ID
3. Official symbol
4. RefSeq IDs
5. Offical full name
Basically, just to go through the parsed xml data.... A big headache to figure it out...
Return a list of dictionary.
"""
gene_info_list = []
for gene_data in genes:
gene_info = {}
# get entrez ID
try:
gene_info["entrez_id"] = gene_data["Entrezgene_track-info"]["Gene-track"]["Gene-track_geneid"]
except KeyError:
gene_info["entrez_id"] = ""
continue
gene_info["refseq_ids"] = []
for comment in gene_data.get("Entrezgene_comments",[]):
# look for refSeq annotation
if comment.get("Gene-commentary_heading",None) == "NCBI Reference Sequences (RefSeq)":
# get sub-comments
for subcomment in comment.get("Gene-commentary_comment",[]):
for product in subcomment.get("Gene-commentary_products",[]):
if product.get("Gene-commentary_heading",None) == "mRNA Sequence":
gene_info["refseq_ids"].append(product.get("Gene-commentary_accession",""))
# get properties
gene_info["official_symbol"] = "" # optional
gene_info["official_full_name"] = "" # optional
for gene_property in gene_data.get("Entrezgene_properties",[]):
if gene_property.get("Gene-commentary_label",None) == "Nomenclature":
for sub_property in gene_property["Gene-commentary_properties"]:
if sub_property.get("Gene-commentary_label",None) == "Official Symbol":
gene_info["official_symbol"] = sub_property.get("Gene-commentary_text","")
if sub_property.get("Gene-commentary_label",None) == "Official Full Name":
gene_info["official_full_name"] = sub_property.get("Gene-commentary_text","")
# get taxname
try:
gene_info["taxname"] = gene_data["Entrezgene_source"]["BioSource"]["BioSource_org"]["Org-ref"]["Org-ref_taxname"]
except KeyError:
gene_info["taxname"] = ""
continue
gene_info_list.append(gene_info)
return gene_info_list
def print_genes (gene_info_list):
"""Print out parsed entrez gene information in tab-delimited way.
"""
# header
print "%s\t%s\t%s\t%s\t%s" % ("TaxonomyName","EntrezID","OfficialSymbol","RefSeqIDs","OfficialFullName")
for g in gene_info_list:
print "%s\t%s\t%s\t%s\t%s" % (g["taxname"],g["entrez_id"],g["official_symbol"],",".join(g["refseq_ids"]),g["official_full_name"])
# ------------------------------------
# Classes
# ------------------------------------
# ------------------------------------
# Main function
# ------------------------------------
def main():
usage = "usage: %prog [options]"
description = "Use NCBI web API to convert gene ids between different identifier types."
optparser = OptionParser(version="%prog 0.1",description=description,usage=usage,add_help_option=False)
optparser.add_option("-h","--help",action="help",help="Show this help message and exit.")
optparser.add_option("-i","--id",dest="ids",type="string",action="append",
help="Gene id, according to identifier setting of input, can be Entrez, RefSeq, or Gene symbol. Multiple ids are allowed.")
optparser.add_option("-a","--itype",dest="itype",default="entrez",
help="Identifier type of your input ids. Can be 'entrez', 'refseq', or 'symbol'. Default: 'entrez'.")
(options,args) = optparser.parse_args()
if not options.ids:
optparser.print_help()
sys.exit(-1)
input_id_list = options.ids
if options.itype == "refseq":
entrez_id_list = search_genes(input_id_list,"ACCN")
elif options.itype == "symbol":
entrez_id_list = search_genes(input_id_list,"GENE")
elif options.itype == "entrez":
entrez_id_list = input_id_list
entrez_id_genes = fetch_genes(entrez_id_list)
parsed_genes = parse_genes(entrez_id_genes)
print_genes(parsed_genes)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.stderr.write("User interrupt me! ;-) See you!\n")
sys.exit(0)
```
#### File: taolib/Scripts/count_probes_in_peaks.py
```python
import os
import sys
import re
import logging
from random import sample
from optparse import OptionParser
from Cistrome.CoreLib.Parser import XLSIO,WiggleIO
from Cistrome.CoreLib.BasicStat.Func import *
# ------------------------------------
# constants
# ------------------------------------
logging.basicConfig(level=20,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
# ------------------------------------
# Misc functions
# ------------------------------------
error = logging.critical # function alias
warn = logging.warning
debug = logging.debug
info = logging.info
# ------------------------------------
# Classes
# ------------------------------------
# ------------------------------------
# Main function
# ------------------------------------
def main():
usage = "usage: %prog [options]"
description = "Summarize ChIP-chip experiment. Calculate how many probes are included in peak regions."
optparser = OptionParser(version="%prog 0.1",description=description,usage=usage,add_help_option=False)
optparser.add_option("-h","--help",action="help",help="Show this help message and exit.")
optparser.add_option("-p","--peak1",dest="peak1",type="string",
help="peak file in xls format for #1 replicate")
optparser.add_option("-x","--wig1",dest="wig1",type="string",
help="wiggle file for #1 replicate")
optparser.add_option("-o","--ofile",dest="ofile",
help="output file")
optparser.add_option("-f","--format",dest="format",type="string",
help="ma2c, mat or macs, default: ma2c",default="ma2c")
(options,args) = optparser.parse_args()
if not options.peak1 or not options.wig1 or not options.ofile:
optparser.print_help()
sys.exit(1)
format = options.format.lower()
if format == 'ma2c':
xlsparser = XLSIO.parse_pMA2C_xls
elif format == 'mat':
xlsparser = XLSIO.parse_MAT_xls
elif format == 'macs':
xlsparser = XLSIO.parse_MACS_xls
else:
print "unrecognized format: %s" % (format)
sys.exit(1)
ofhd = open(options.ofile,"w")
info("#1 read peaks from first replicate")
peaks1 = XLSIO.parse_pMA2C_xls(open(options.peak1,"r"))
info("#1 read wiggle track from peak file")
wigtrack1 = WiggleIO.WiggleIO(options.wig1).build_wigtrack()
info("#1 finish reading wiggle files")
info("#2 count probes in peaks")
counts = peaks1.extract_wiggle_values_by_chrom(wigtrack1,func=len)
info("#3 output")
chroms = wigtrack1.get_chr_names()
ofhd.write("chr\ttotal\tin_peak\tpercentage\n")
for chrom in chroms:
total_probe_chr = len(wigtrack1.get_data_by_chr(chrom)[0])
if counts.has_key(chrom):
peak_probe_chr = sum(counts[chrom])
else:
peak_probe_chr = 0
info(" chromosome %s: total %d, in peak %d" % (chrom,total_probe_chr,peak_probe_chr))
ofhd.write("%s\t%d\t%d\t%.2f\n" % (chrom,total_probe_chr,
peak_probe_chr,
100.0*peak_probe_chr/total_probe_chr))
ofhd.close()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.stderr.write("User interrupt me! ;-) See you!\n")
sys.exit(0)
```
#### File: taolib/Scripts/kmeans2image.py
```python
import os
import sys
import re
from PIL import Image, ImageDraw
# ------------------------------------
# Main function
# ------------------------------------
help_message = """
Draw the K-means clustering result.
need 6 parameter: %s <kmeans_file> <lim> <x_points> <y_points> <x_ext> <y_ext>
kmeans_file : tab-delimited plain text file. First column is cluster number by k-means, and following columns are data columns.
lim : data value limit
x_points : number of data value columns
y_points : number of rows
x_ext : pixels extended in x-axis
y_ext : pixels extended in y-axis
""" % sys.argv[0]
def main():
if len(sys.argv) < 7:
sys.stderr.write(help_message)
sys.exit(1)
fhd = open (sys.argv[1])
lim = int(sys.argv[2])
x_points = int(sys.argv[3])
y_points = int(sys.argv[4])
x_ext = int(sys.argv[5])
y_ext = int(sys.argv[6])
a = Image.new("RGB",(x_points*x_ext,y_points*y_ext),"white")
d = ImageDraw.Draw(a)
y = 0
for i in fhd:
y += 1
i.strip()
if not re.search("^\d+",i):
continue
values = map(float,i.split())
x = 0
cl = values[0]
for v in values[1:]:
x += 1
c = "hsl(%d,100%%,%d%%)" % (cl*70,min(1,v/lim)*90.0)
d.rectangle([(int(x*x_ext),int(y*y_ext)),(int((x+1)*x_ext),int((y+1)*y_ext))],outline=c,fill=c)
a.save(sys.argv[1]+".png")
print "check %s!" % (sys.argv[1]+".png")
if __name__ == '__main__':
main()
```
#### File: taolib/Scripts/motif_enrich.py
```python
import os
import sys
import logging
from optparse import OptionParser
from glob import glob
import Cistrome
from Cistrome.CoreLib.Parser.MotifScan import read_motif2
from Cistrome.CoreLib.Parser.BedIO import parse_BED
from Cistrome.CoreLib.BasicStat import Prob
import time
# ------------------------------------
# constants
# ------------------------------------
GENOME_SIZE = {"mm8":2644077689L,
"hg18":3080419480L}
logging.basicConfig(level=20,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
# ------------------------------------
# Misc functions
# ------------------------------------
error = logging.critical # function alias
warn = logging.warning
debug = logging.debug
info = logging.info
def log( msg ):
sys.stderr.write( msg )
def fact (n):
return reduce(lambda a,b:a*(b+1),range(n),1)
# ------------------------------------
# Classes
# ------------------------------------
# ------------------------------------
# Main function
# ------------------------------------
def main():
usage = "usage: %prog [options]"
description = "Calculate the motif enrichment measured in foldchange and p-value for one BED file and one reference BED file"
optparser = OptionParser(version="%prog 0.1",description=description,usage=usage,add_help_option=False)
optparser.add_option("-h","--help",action="help",help="Show this help message and exit.")
optparser.add_option("-s","--species",dest="species",type="string",
help="species, must be \"mm8\" or \"hg18\"")
optparser.add_option("-i","--ifile",dest="ifile",type="string",
help="input BED file, e.g. the ChIP regions")
optparser.add_option("-r","--rfile",dest="rfile",type="string",
help="input reference BED file, e.g. the tiled regions. [optional] if not set, use the whole genome as reference")
optparser.add_option("-c","--cutoff",dest="cutoff",type="int",default=0,
help="cutoff for the motif scan score")
optparser.add_option("-d","--idir",dest="idir",type="string",
help="input directory for binary Motif Scan Results files")
optparser.add_option("-a","--all",dest="showall",action="store_true",
help="If set, also show the depleted motifs. Default, not show")
optparser.add_option("--minfc",dest="minfc",type="float",default=0,
help="minimum foldchange for motif enrichment, default:0")
optparser.add_option("--maxp",dest="maxp",type="float",default=1,
help="maximum p-value for motif enrichment, default:1")
optparser.add_option("--minpercent",dest="minpercent",type="int",default=5,
help="minimum percentage of input BED regions containing motif, default:5")
optparser.add_option("--maxpercent",dest="maxpercent",type="int",default=300,
help="maximum percentage of input BED regions containing motif, default:300")
optparser.add_option("--verbose",dest="verbose",type="string",
help="Name of a directory. if set, save verbose information in the direcotry.", default=None)
optparser.add_option("-o","--ofile",dest="ofile",
help="output file")
(options,args) = optparser.parse_args()
if not options.ifile or not options.idir or not options.ofile:
optparser.print_help()
sys.exit(1)
if options.species != "mm8" and options.species != "hg18":
error ("Species must be \"mm8\" for mouse or \"hg18\" for human!")
sys.exit(1)
if options.verbose:
if os.path.exists(options.verbose):
warn ("Path exists!: %s" % (options.verbose))
#sys.exit(1)
else:
os.mkdir(options.verbose)
ofhd = file(options.ofile,"w")
ofhd.write("""#Motif Enrichment Report:
# Generated by %s
# Species: %s
# Motif cutoff: %d
# Input BED: %s
""" % (time.asctime(),options.species,options.cutoff,options.ifile))
if options.rfile:
ofhd.write("# Reference: %s\n" % (options.rfile))
else:
ofhd.write("# Reference: Whole Genome Background\n")
if not options.showall:
ofhd.write("""# Minimum foldchange: %.2f
# Maximum p-value: %.5e
# Minimum percentage of coverage: %d%%
# Maximum percentage of coverage: %d%%
""" % (options.minfc,options.maxp,options.minpercent,options.maxpercent))
ofhd.write("#\n# motif\tnum_detected\tnum_in_reference_region\tpercent\tfc\tpvalue\n")
info("#1. Read BED file: %s" % (options.ifile))
track = parse_BED(file(options.ifile,'r'))
track.sort()
track.merge_overlap()
track_len = track.length()
if options.rfile:
info("#2. Read reference BED file: %s" % (options.rfile))
# add tags
refer_track = parse_BED(file(options.rfile,'r'))
refer_track.sort()
refer_track.merge_overlap()
refer_len = refer_track.length()
whole_genome = False
else:
info("#2. Use whole genome as reference!")
refer_len = GENOME_SIZE[options.species]
whole_genome = True
# ofhd.write("""# Length of Input BED: %d
# # Length of Reference: %d
# """ % (track_len,refer_len))
info("#3. Read Motif files in %s" % (options.idir))
mfiles = glob(options.idir+"/*.dat")
info(" * Altogether %d motif to be processed." %(len(mfiles)))
for mf in mfiles:
mbase = os.path.basename(mf)
info(" motif: read %s..." % (mbase))
if options.verbose:
vfhd = open(options.verbose+"/"+mbase+".txt","w")
mfhd = open(mf,"r")
mtrack = read_motif2(mfhd,options.species,options.cutoff) # a FWTrackI object
mtrack.fw=5
info(" calculating...")
(num,v) = track.overlap_with_FWTrackI(mtrack,verbose=True)
percent = float(len(v))/track.total()*100
if options.verbose:
for eachline in v:
vfhd.write(eachline+"\n")
v=None
vfhd.close()
if whole_genome:
refer_num = mtrack.total
else:
refer_num = refer_track.overlap_with_FWTrackI(mtrack,verbose=False)
if refer_num == 0:
pvalue = "NA"
fc = "NA"
else:
lam = float(track_len)*refer_num/refer_len
fc = "%.4f" % (float(num)/refer_num*refer_len/track_len) # foldchange
if lam == 0:
pvalue = "NA"
elif num == 0:
pvalue = "NA"
else:
pvalue = Prob.poisson_cdf(num,lam,lower=False)
pvalue = "%.3e" % (pvalue)
info("%s\t%d\t%d\t%.2f\t%s\t%s" % (mbase.rstrip(".dat"),num,refer_num,percent,fc,pvalue))
if not options.showall:
if (fc == "NA" or float(fc) < options.minfc or (pvalue != "NA" and float(pvalue) > options.maxp) or float(num)/track.total() < 0.05 or percent < options.minpercent or percent > options.maxpercent):
info( " skipped")
continue
ofhd.write("%s\t%d\t%d\t%.2f\t%s\t%s\n" % (mbase.rstrip(".dat"),num,refer_num,percent,fc,pvalue))
mtrack = None
mfhd.close()
ofhd.close()
info("Over!")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
warn("User interrupt me! ;-) See you!")
sys.exit(0)
```
#### File: jianTaoLiu-SWJTU2012/taolib/setup.py
```python
import os
import sys
from distutils.core import setup, Extension
def main():
if not float(sys.version[:3])>=2.4:
sys.stderr.write("CRITICAL: Python version must be greater than or equal to 2.4! python 2.6.2 is recommended!\n")
sys.exit(1)
setup(name="taolib",
version="1.0",
description="Tao's libraries",
author='<NAME>',
author_email='<EMAIL>',
url='http://vladimirliu.com/~taoliu/',
package_dir={'taolib' : '.'},
packages=['taolib','taolib.CoreLib',
'taolib.CoreLib.DB','taolib.CoreLib.FeatIO',
'taolib.CoreLib.BasicStat','taolib.CoreLib.WWW',
'taolib.CoreLib.Parser','taolib.CoreLib.SeqIO',
'taolib.CoreLib.BinKeeper','taolib.CoreLib.Algorithm',
'taolib.Assoc',
'taolib.ExtApp',
'taolib.Motif',
# 'taolib.IntegrativeBioinformatics',
# 'taolib.IntegrativeBioinformatics.elements',
# 'taolib.IntegrativeBioinformatics.networks',
# 'taolib.IntegrativeBioinformatics.algos',
# 'taolib.IntegrativeBioinformatics.features',
# 'taolib.IntegrativeBioinformatics.links',
# 'taolib.IntegrativeBioinformatics.apache',
],
scripts=[
# 'Scripts/motif_enrich.py',
# 'Scripts/qc_chIP_peak.py',
# 'Scripts/qc_chIP_whole.py',
# 'Scripts/count_probes_in_peaks.py',
# 'Scripts/count_probes_in_ranges.py',
'Scripts/xyz2image.py',
# 'Scripts/refine_peak.py',
# 'Scripts/fq2fa.py',
# 'Scripts/wiggle_reformat.py',
# 'Scripts/wig_correlation.py',
# 'Scripts/wig_correlation_in_bed_file.py',
# 'Scripts/conservation_plot.py',
# 'Scripts/wig_extract_chrom.py',
# 'Scripts/wig_split.py',
# 'Scripts/wig_call_peaks.py',
# 'Scripts/wig_call_peaks2.py',
# 'Scripts/naive_call_peaks.py',
# 'Scripts/wig2bedGraphBins.py',
# 'Scripts/bed_correlation.py',
# 'Scripts/ce_histone_matrix.py',
'Scripts/rand_pos.py',
# 'Scripts/draw_BED.py',
# 'Scripts/norm.py',
# 'Scripts/cutoff.py',
# 'Scripts/ChIP-seq_Pipeline1.py',
# 'Scripts/convert_gene_ids.py',
# 'Scripts/ma2cWigToBedGraph',
# 'Scripts/hmm_conception.py',
'Scripts/bin_chromosome',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Artistic License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Database',
],
requires=['MySQL_python','PIL']
)
if __name__ == '__main__':
main()
```
#### File: test/CoreLib/test_callpeak.py
```python
from taolib.CoreLib.Parser import *
import unittest
class TestCallPeak(unittest.TestCase):
def setUp(self):
self.fhd = open("chr22.score.wig")
def test_callpeak (self):
wio = WiggleIO.WiggleIO(self.fhd)
wtrack = wio.build_wigtrack()
wpeaks = wtrack.call_peaks(cutoff=10,min_window=300,max_gap=50)
print wpeaks.tobed()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jianwang0212/Dash_eth",
"score": 3
} |
#### File: Dash_eth/Data_fetcher/fetcher_bal_orderbook.py
```python
import requests
import pandas as pd
from datetime import datetime
import ccxt
import sqlite3
import config
import settings
now = datetime.utcnow()
def get_fx(currency_f):
fx = requests.get(
'https://apilayer.net/api/live?access_key=a1d6d82a3df7cf7882c9dd2b35146d6e&source=USD&format=1').json()
return fx['quotes']['USD' + currency_f.upper()]
def get_basic(time, ex, currency_f):
now_ts = datetime.timestamp(time)
fx_name = 'USD/' + currency_f.upper()
fx_rate = get_fx(currency_f)
basics = {'time': now.strftime("%y-%m-%d %H:%M:%S"),
'utc': [now_ts],
'exchange': [ex],
'pair': ['ETH/' + currency_f.upper()],
'fx': [float(fx_rate)]}
basics = pd.DataFrame(basics)
return basics
def get_ob(ex, pair):
ex_instance = eval('ccxt.' + ex)()
ob = ex_instance.fetch_order_book(pair)
bid_px = dict(zip([str(i) + '_bid_px' for i in range(1, 6)],
[float(ob['bids'][i][0]) for i in range(5)]))
ask_px = dict(zip([str(i) + '_ask_px' for i in range(1, 6)],
[float(ob['asks'][i][0]) for i in range(5)]))
bid_sz = dict(zip([str(i) + '_bid_sz' for i in range(1, 6)],
[float(ob['bids'][i][1]) for i in range(5)]))
ask_sz = dict(zip([str(i) + '_ask_sz' for i in range(1, 6)],
[float(ob['asks'][i][1]) for i in range(5)]))
elements = {}
elements.update(bid_px)
elements.update(ask_px)
elements.update(bid_sz)
elements.update(ask_sz)
df = pd.DataFrame({k: [v] for k, v in elements.items()})
return df
def get_bal(ex, currency_f):
apiInstance = settings.exchanges[ex]['init']
bal = apiInstance.fetch_balance()
bal_fiat_free = bal[currency_f.upper()]['free']
bal_eth_free = bal['ETH']['free']
bal_fiat_used = bal[currency_f.upper()]['used']
bal_eth_used = bal['ETH']['used']
bal_fiat_total = bal[currency_f.upper()]['total']
bal_eth_total = bal['ETH']['total']
bl = {}
for i in ('bal_fiat_free', 'bal_fiat_used', 'bal_fiat_total', 'bal_eth_free', 'bal_eth_used', 'bal_eth_total'):
bl[i] = locals()[i]
df = pd.DataFrame({k: [v] for k, v in bl.items()})
return df
def get_open_order(ex, currency_f):
apiInstance = settings.exchanges[ex]['init']
open_orders = apiInstance.fetch_open_orders()
df = pd.DataFrame(open_orders)
df.drop(['clientOrderId', 'info'], axis=1)
return df
def fetcher(exchange, now):
ex = exchange['name']
currency_c = 'eth'
currency_f = exchange['currency']
now_ts = now.timestamp()
pair = currency_c.upper() + '/' + currency_f.upper()
apiInstance = settings.exchanges[ex]['init']
basics = get_basic(now, ex, currency_f)
bl = get_bal(ex, currency_f)
ob = get_ob(ex, pair)
df = pd.concat([basics, bl, ob], axis=1)
df_openOrder = get_open_order(ex, currency_f)
num_rows = df_openOrder.shape[0] - 1
basics_repeated = basics.append([basics] * num_rows)
basics_repeated.reset_index(drop=True, inplace=True)
df_openOrder = pd.concat([basics_repeated, df_openOrder], axis=1)
select = ['time', 'utc', 'exchange', 'pair', 'fx', 'id', 'timestamp', 'lastTradeTimestamp',
'symbol', 'type', 'side', 'price', 'cost', 'average', 'amount',
'filled', 'remaining', 'status', 'fee', 'trades']
df_openOrder = df_openOrder[select]
name = ['time', 'utc', 'exchange', 'pair', 'fx', 'OpenOrder_id', 'OpenOrder_timestamp', 'OpenOrder_lastTradeTimestamp',
'OpenOrder_symbol', 'OpenOrder_type', 'OpenOrder_side', 'OpenOrder_price', 'OpenOrder_cost', 'OpenOrder_average', 'OpenOrder_amount',
'OpenOrder_filled', 'OpenOrder_remaining', 'OpenOrder_status', 'OpenOrder_fee', 'OpenOrder_trades']
df_openOrder.columns = name
return df, df_openOrder
def save_to_sql(ex, df, df_openOrder):
conn = sqlite3.connect('/Users/Zi/Projects/Dash_eth/test.db')
table_name = ex + "_bal_orderbook"
df.to_sql(table_name, conn, if_exists='append')
table_name = ex + "_open_order"
df_openOrder.to_sql(table_name, conn, if_exists='append')
conn.close()
exchanges = settings.exchanges
for k, v in exchanges.items():
exchange_name = k
exchange = v
df, df_openOrder = fetcher(exchange, now)
save_to_sql(exchange_name, df, df_openOrder)
```
#### File: jianwang0212/Dash_eth/myapp.py
```python
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_table
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import colorlover as cl
import datetime as dt
import flask
import os
import pandas as pd
import time
import sqlite3
import numpy as np
import math
app = dash.Dash()
# data
exchange_name = 'bitbay'
conn = sqlite3.connect(
'/Users/Zi/Projects/Dash_eth/test.db', check_same_thread=False)
df1 = pd.read_sql_query(
"SELECT * from {}_merge_td_bal_ods".format(exchange_name), conn)
df2 = pd.read_sql_query(
"SELECT * from {}_open_order".format(exchange_name), conn)
df3 = pd.read_sql_query("SELECT * from {}_trades".format(exchange_name), conn)
conn.close()
def fig1_producer(df1, df2, df3):
# setup
rgba_bid = ['rgba(132, 132, 239, .9)',
'rgba(28, 28, 221, .7)',
'rgba(28, 28, 221, .5)',
'rgba(28, 28, 221, .4)',
'rgba(28, 28, 221, .2)']
rgba_ask = ['rgba(221, 28, 86, .9)',
'rgba(221, 28, 86, .7)',
'rgba(221, 28, 86, .5)',
'rgba(221, 28, 86, .4)',
'rgba(221, 28, 86, .2)']
rgba_bal = ['rgba(10, 10, 162, .9)',
'rgba(10, 10, 162, .4)',
'rgba(235, 64, 52, .9)',
'rgba(235, 64, 52, .4)']
# Open orders: create buy & sell column
for side in ['buy', 'sell']:
df2[side + '_OpenOrder_price'] = df2['OpenOrder_price']
condition = df2['OpenOrder_side'] != side
df2.loc[condition, side + '_OpenOrder_price'] = None
# Trades: create buy & sell column
for side in ['buy', 'sell']:
df1[side + '_executed'] = df1['price']
condition = df1['side'] != side
df1.loc[condition, side + '_executed'] = None
# Graph - setup traces
# Market orderbook
for i in [1]:
trace_bid = go.Scatter(x=df1['time_x'],
y=df1[str(i) + '_bid_px'],
name='bid_' + str(i),
line={"shape": 'hv'},
marker_color=rgba_bid[i - 1],
customdata=df1[str(i) + '_bid_sz'],
hovertemplate="mkt_px:" + "%{y}; "
"sz:" + "%{customdata:.3f}<br>"
)
trace_ask = go.Scatter(x=df1['time_x'],
y=df1[str(i) + '_ask_px'],
name='ask_' + str(i),
line={"shape": 'hv'},
marker_color=rgba_ask[i - 1],
customdata=df1[str(i) + '_ask_sz'],
hovertemplate="mkt_px:" + "%{y}; "
"sz:" + "%{customdata:.3f}<br>"
)
# My open orders
trace_open_orders_buy = go.Scatter(x=df2['time'],
y=df2['buy_OpenOrder_price'],
mode='markers',
name='my_bid',
opacity=0.8,
marker=dict(color='Yellow',
size=10,
opacity=0.6,
symbol='line-ew',
line=dict(
color='LightSkyBlue',
width=4)),
hovertemplate="my_bid:" + "%{y}"
)
trace_open_orders_sell = go.Scatter(x=df2['time'],
y=df2['sell_OpenOrder_price'],
mode='markers',
name='my_ask',
opacity=0.8,
marker=dict(color='gold',
size=10,
opacity=0.6,
symbol='line-ew',
line=dict(
color='violet',
width=4)),
hovertemplate="my_ask:" + "%{y}"
)
# My trades
# my trades - marker setup: create a size var for marker
sz = df1['amount'].tolist()
sz1 = [0 if pd.isnull(x) else math.log10(float(x) + 10) * 5 for x in sz]
trace_trades_buy = go.Scatter(x=df1['time_x'],
y=df1['buy_executed'],
name='bid_executed',
mode='markers',
marker=dict(color='LightSkyBlue',
size=sz1,
line=dict(
color='blue',
width=2)
),
marker_symbol='triangle-up',
customdata=df1['amount'],
hovertemplate="buy_px:" + "%{y}; "
"sz:" + "%{customdata}<br>"
)
trace_trades_sell = go.Scatter(x=df1['time_x'],
y=df1['sell_executed'],
name='ask_executed',
mode='markers',
marker=dict(color='LightSkyBlue',
size=sz1,
line=dict(
color='red',
width=2)
),
marker_symbol='triangle-down',
customdata=df1['amount'],
hovertemplate="sell_px:" + "%{y}; "
"sz:" + "%{customdata}<br>"
)
# balance
trace_bal_ff = go.Bar(x=df1['time_x'],
y=df1['bal_fiat_free'] / df1['1_bid_px'],
name='bal_fiat_free',
marker_color=rgba_bal[0])
trace_bal_fu = go.Bar(x=df1['time_x'],
y=df1['bal_fiat_used'] / df1['1_bid_px'],
name='bal_fiat_used',
marker_color=rgba_bal[1])
trace_bal_ef = go.Bar(x=df1['time_x'],
y=df1['bal_eth_free'],
name='bal_eth_free',
marker_color=rgba_bal[2])
trace_bal_eu = go.Bar(x=df1['time_x'],
y=df1['bal_eth_used'],
name='bal_eth_used',
marker_color=rgba_bal[3])
trace_bal_all = go.Scatter(x=df1['time_x'],
y=(df1['bal_eth_total'] * df1['1_bid_px'] +
df1['bal_fiat_total']) / df1['fx_x'],
name='bal_all($)')
# Group traces to fig
fig1_sub1_traces = [trace_bid, trace_ask, trace_open_orders_buy, trace_open_orders_sell,
trace_trades_buy, trace_trades_sell]
fig1_sub2_traces = [trace_bal_ff, trace_bal_fu,
trace_bal_ef, trace_bal_eu]
fig1 = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1,
specs=[[{"secondary_y": True}],
[{"secondary_y": True}]],
subplot_titles=("Market orderbook & my orders/trades (Local currency)", "My total balance & holding position (USD)"))
for i in range(len(fig1_sub1_traces)):
fig1.add_trace(fig1_sub1_traces[i],
row=1, col=1)
for i in range(len(fig1_sub2_traces)):
fig1.add_trace(fig1_sub2_traces[i],
row=2, col=1)
fig1.add_trace(trace_bal_all,
row=2, col=1, secondary_y=True)
fig1.update_layout(barmode='stack')
return fig1
fig1 = fig1_producer(df1, df2, df3)
def fig2_producer(df1, df2, df3):
# Fig 2 Current dash board
# Bal pie chart
pie_labels = ['fiat_free', 'fiat_used', 'eth_free', 'eth_used']
pie_values = [df1['bal_fiat_free'].iloc[-1] / df1['1_bid_px'].iloc[-1], df1['bal_fiat_used'].iloc[-1] / df1['1_bid_px'].iloc[-1],
df1['bal_eth_free'].iloc[-1], df1['bal_eth_used'].iloc[-1]]
trace_pie_bal = go.Pie(
labels=pie_labels, values=pie_values, title='Balance in ETH', hole=0.4,
textinfo='label+percent')
# order book info
boop = df2['buy_OpenOrder_price']
c_boop = boop.loc[~boop.isnull()].iloc[-1]
soop = df2['sell_OpenOrder_price']
c_soop = soop.loc[~soop.isnull()].iloc[-1]
ob_y_bid = [df1['1_bid_px'].iloc[-1], df1['2_bid_px'].iloc[-1],
df1['3_bid_px'].iloc[-1], df1['4_bid_px'].iloc[-1], df1['5_bid_px'].iloc[-1]]
ob_y_ask = [df1['1_ask_px'].iloc[-1], df1['2_ask_px'].iloc[-1],
df1['3_ask_px'].iloc[-1], df1['4_ask_px'].iloc[-1], df1['5_ask_px'].iloc[-1]]
trace_c_ords_bid = go.Scatter(
x=[1] * len(ob_y_bid), y=ob_y_bid, mode="markers", marker_symbol='line-ew',
marker_line_color="midnightblue", marker_color="lightskyblue",
marker_line_width=3, marker_size=150, name='mkt_bid')
trace_c_ords_ask = go.Scatter(
x=[1] * len(ob_y_ask), y=ob_y_ask, mode="markers", marker_symbol='line-ew',
marker_line_color="red", marker_color="red",
marker_line_width=3, marker_size=150, name='mkt_ask')
trace_c_ords_my_bid = go.Scatter(
x=[1], y=[c_boop], mode="markers", marker_symbol='diamond-wide',
marker_line_color="gold", marker_color="blue",
marker_line_width=2, marker_size=15, name='my_bid')
trace_c_ords_my_ask = go.Scatter(
x=[1], y=[c_soop], mode="markers", marker_symbol='diamond-wide',
marker_line_color="gold", marker_color="red",
marker_line_width=2, marker_size=15, name='my_ask')
fig2 = make_subplots(rows=1, cols=2,
specs=[[{"type": "domain"}, {"type": "xy"}]],
subplot_titles=("Holdings", "Order book position"))
fig2.add_trace(trace_pie_bal, row=1, col=1)
fig2.add_trace(trace_c_ords_bid, row=1, col=2)
fig2.add_trace(trace_c_ords_ask, row=1, col=2)
fig2.add_trace(trace_c_ords_my_bid, row=1, col=2)
fig2.add_trace(trace_c_ords_my_ask, row=1, col=2)
fig2.update_layout(title='Current orders and positions')
return fig2
fig2 = fig2_producer(df1, df2, df3)
# Tables
# tbl_my_trades = df3[['time', 'type', 'type', 'takerOrMaker', 'side', 'amount', 'price',
# 'fee_fiat', 'fee_pct']]
# tbl_my_orders = df2[['time', 'OpenOrder_side', 'OpenOrder_price',
# 'OpenOrder_amount', 'OpenOrder_filled', 'OpenOrder_remaining',
# 'OpenOrder_fee']]
# tbl_my_trades = tbl_my_trades.tail().iloc[::-1]
# tbl_my_orders = tbl_my_orders.tail().iloc[::-1]
# plot
app.layout = html.Div(children=[
html.H1('Trading Dashboard'),
dcc.Graph(id='g1',
figure=fig1),
dcc.Graph(id='g2',
figure=fig2),
html.H2('My latest trades'),
dash_table.DataTable(id='table_trades',
# columns=[{"name": i, "id": i}
# for i in tbl_my_trades.columns],
# data=tbl_my_trades.to_dict('records'),
style_as_list_view=True,
style_cell={'padding': '5px'},
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
}),
html.H2('My latest open orders'),
dash_table.DataTable(id='table_openorders',
# columns=[{"name": i, "id": i}
# for i in tbl_my_orders.columns],
# data=tbl_my_orders.to_dict('records'),
style_as_list_view=True,
style_cell={'padding': '5px'},
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
}),
dcc.Interval(
id='interval-component',
interval=70 * 1000, # in milliseconds
n_intervals=0
)
]
)
@app.callback(
[Output('table_openorders', 'data'),
Output('table_trades', 'data'),
Output('table_openorders', 'columns'),
Output('table_trades', 'columns'),
Output('g1', 'figure'),
Output('g2', 'figure')],
[Input('interval-component', 'n_intervals')])
def update_tables_graphs(n):
conn = sqlite3.connect(
'/Users/Zi/Projects/Dash_eth/test.db', check_same_thread=False)
df1 = pd.read_sql_query(
"SELECT * from {}_merge_td_bal_ods".format(exchange_name), conn)
df2 = pd.read_sql_query(
"SELECT * from {}_open_order".format(exchange_name), conn)
df3 = pd.read_sql_query(
"SELECT * from {}_trades".format(exchange_name), conn)
conn.close()
# tables
## tables - data
tbl_my_trades = df3[['time', 'type', 'takerOrMaker', 'side', 'amount', 'price',
'fee_fiat', 'fee_pct']]
tbl_my_orders = df2[['time', 'OpenOrder_side', 'OpenOrder_price',
'OpenOrder_amount', 'OpenOrder_filled', 'OpenOrder_remaining',
'OpenOrder_fee']]
tbl_my_trades = tbl_my_trades.tail().iloc[::-1]
tbl_my_orders = tbl_my_orders.tail().iloc[::-1]
# tables - column
columns_trades = [{"name": i, "id": i} for i in tbl_my_trades.columns]
columns_orders = [{"name": i, "id": i} for i in tbl_my_orders.columns]
# graphs
fig1 = fig1_producer(df1, df2, df3)
fig2 = fig2_producer(df1, df2, df3)
return tbl_my_orders.to_dict('records'), tbl_my_trades.to_dict('records'), columns_orders, columns_trades, fig1, fig2
if __name__ == '__main__':
app.run_server(debug=True)
``` |
{
"source": "jianwang0212/napoli_gang",
"score": 3
} |
#### File: jianwang0212/napoli_gang/risk_server.py
```python
import os
import ccxt
from dataclasses import dataclass
import dataclasses
import typing
import datetime
import numpy as np
from math import floor
import time
@dataclass
class SymbolRisk:
qty: int
start_price: int
@dataclass
class RiskTolerance:
max_risk_per_trade: int
max_net_risk: typing.Dict[str, int]
risk_aversion: typing.Dict[str, float]
def initialise_risk_tolerances(pos, price) -> RiskTolerance:
current_risk = {'ETH': SymbolRisk(
pos, price)}
max_risk_per_trade = 100000 # jpy
max_net_risk = {'ETH': 3000000}
risk_aversion = {sym: np.log(
max_risk_per_trade / current_risk[sym].start_price / max_net_risk[sym]) / max_net_risk[sym] for sym in current_risk.keys()} # -12
return RiskTolerance(max_risk_per_trade=max_risk_per_trade,
max_net_risk=max_net_risk,
risk_aversion=risk_aversion)
@dataclass
class RiskServer:
risk_tolerance: RiskTolerance
current_risk: typing.Dict[str, SymbolRisk]
current_fiat: float
def get_net_risk(self) -> float:
sym_sum = sum(
self.current_risk[sym].qty * self.current_risk[sym].start_price for sym in self.current_risk.keys())
total_sum = self.current_fiat + sym_sum
return total_sum
def get_trading_risk(self, sym: str) -> float:
return self.current_risk[sym].qty * self.current_risk[sym].start_price
def get_bal_ratio(self, sym: str) -> float:
return float(self.get_trading_risk(sym) / self.get_net_risk())
def get_quantity(self, sym: str) -> typing.Tuple[float, float]:
risk = self.get_trading_risk(sym) # if pos > 0 , risk < 0
q_0 = self.risk_tolerance.max_risk_per_trade / \
self.current_risk[sym].start_price # base quantiy
bal_c = sum(
self.current_risk[sym].qty for sym in self.current_risk.keys())
bal_f = self.current_fiat / self.current_risk[sym].start_price
bal_ratio = bal_c / bal_f
qty_multiply = 0.3
if bal_ratio >= 1: # if eth > fiat proportion
# less than q_0 -> bid less
bid_qty = qty_multiply * bal_f * \
np.exp(self.risk_tolerance.risk_aversion[sym] * abs(
risk)) # this number is 0.3, the higher the risk, the lower the number
ask_qty = qty_multiply * bal_c
else:
bid_qty = qty_multiply * bal_f
ask_qty = qty_multiply * bal_c * \
np.exp(self.risk_tolerance.risk_aversion[sym] * abs(risk))
return max(bid_qty, 0.01), max(ask_qty, 0.01)
def update_risk_server(session, risk_tolerance: RiskTolerance) -> RiskServer:
symbols = ['ETH']
fiat = 'JPY'
try:
raw_data = session.fetch_balance()
except:
print("race condition")
time.sleep(0.5)
raw_data = session.fetch_balance()
position = {symbol: raw_data['total'][symbol] for symbol in symbols}
try:
price = session.fetch_ticker('ETH/JPY')['last']
except:
print("price error")
time.sleep(0.5)
price = session.fetch_ticker('ETH/JPY')['last']
current_risk = {'ETH': SymbolRisk(position['ETH'], price)}
current_fiat = raw_data['total'][fiat]
return RiskServer(risk_tolerance, current_risk, current_fiat)
``` |
{
"source": "jianwang0212/wp4",
"score": 3
} |
#### File: jianwang0212/wp4/frequencyFunction.py
```python
import pandas as pd
pd.options.mode.chained_assignment = None
df = pd.read_excel('oxfordshire_lep_with_text.xlsx',
sheet_name='Construction 2019')
def unicode_to_list(element):
return eval(element.replace("' '", "','"))
def element_to_list(element):
return str(element).replace(";", "&").replace('"', "").split("&")
def frequency_count_skill(df):
df = df[['JobID', 'JobText', 'SICCode',
'CanonSkillClusters', 'SOCCode', 'CanonSkills']]
df['CanonSkills'] = df['CanonSkills'].apply(unicode_to_list)
s = df.apply(lambda x: pd.Series(x['CanonSkills']), axis=1).stack(
).reset_index(level=1, drop=True)
s.name = 'CanonSkills'
s = df.drop('CanonSkills', axis=1).join(s)
df2 = pd.DataFrame(s['CanonSkills'].value_counts())
common = df2.loc[df2['CanonSkills'].idxmax()].name
df_common = s[s['CanonSkills'] == "Plumbing"]
return df2, df_common
def frequency_count_skill_cluster(df):
df = df[['JobID', 'JobText', 'SICCode',
'CanonSkillClusters', 'SOCCode']]
df['CanonSkillClusters'] = df['CanonSkillClusters'].apply(element_to_list)
s = df.apply(lambda x: pd.Series(x['CanonSkillClusters']), axis=1).stack(
).reset_index(level=1, drop=True)
s.name = 'CanonSkillClusters'
s = df.drop('CanonSkillClusters', axis=1).join(s)
df2 = pd.DataFrame(s['CanonSkillClusters'].value_counts())
common = df2.loc[df2['CanonSkillClusters'].idxmax()].name
df_common = s[s['CanonSkillClusters'] == "Plumbing"]
return df2, df_common
df2, df_common = frequency_count_skill_cluster(df)
print(df2.head)
print(df_common.head)
```
#### File: wp4/others/annotator.py
```python
import pandas as pd
from nltk.tokensize.punkt import PunktSentenceTokenizer, PunktParameters
tokenizer = PunktSentenceTokenizer()
df = pd.read_csv('batch3_no_eduction_no_health_1.csv')
df = df.dropna(subset=['JobText'])
ads = df.JobText.tolist()
token = 0
while True:
print('>>>>>>>>>>>>>>>>>>>>>> START <<<<<<<<<<<<<<<<<<<<<<<<')
import textwrap
wrapper = textwrap.TextWrapper(replace_whitespace=False)
for l in ads.pop().splitlines():
# print(len(l))
tokenizer.train(l)
if len(l)<67:
# print(token, len(l.split()))
if len(l.split()) == 0 and token == 0:
token = 1
print((textwrap.dedent(l)))
elif len(l.split()) == 0 and token == 1:
continue
else:
token = 0
print((textwrap.dedent(l)))
else:
lines = wrapper.wrap(l)
for line in lines:
print((textwrap.dedent(line)))
print('>>>>>>>>>>>>>>>>>>>>>>> END <<<<<<<<<<<<<<<<<<<<<<<<<')
input("Press Enter to continue...")
import textwrap
def test():
# end first line with \ to avoid the empty line!
s = '''\
hello
world
'''
print((s)) # prints ' hello\n world\n '
print((textwrap.dedent(s))) # prints 'hello\n world\n'
``` |
{
"source": "JianweiSun007/athena",
"score": 2
} |
#### File: athena/models/mtl_seq2seq.py
```python
import tensorflow as tf
from tensorflow.keras.layers import Dense
from .base import BaseModel
from ..loss import CTCLoss
from ..metrics import CTCAccuracy
from .speech_transformer import SpeechTransformer, SpeechTransformer2, SpeechTransformer3, SpeechMemoryTransformer
from ..utils.hparam import register_and_parse_hparams
from ..tools.beam_search import BeamSearchDecoder
from ..tools.ctc_scorer import CTCPrefixScorer
from ..tools.lm_scorer import NGramScorer, RNNScorer
class MtlTransformerCtc(BaseModel):
""" In speech recognition, adding CTC loss to Attention-based seq-to-seq model is known to
help convergence. It usually gives better results than using attention alone.
"""
SUPPORTED_MODEL = {
"speech_transformer": SpeechTransformer,
"speech_transformer2": SpeechTransformer2,
"speech_transformer3": SpeechTransformer3,
"speech_memory_transformer": SpeechMemoryTransformer,
}
default_config = {
"model": "speech_transformer",
"model_config": {"return_encoder_output": True},
"mtl_weight": 0.5
}
def __init__(self, data_descriptions, config=None):
super().__init__()
self.num_class = data_descriptions.num_class + 1
self.sos = self.num_class - 1
self.eos = self.num_class - 1
self.hparams = register_and_parse_hparams(self.default_config, config, cls=self.__class__)
self.loss_function = CTCLoss(blank_index=-1)
self.metric = CTCAccuracy()
self.model = self.SUPPORTED_MODEL[self.hparams.model](
data_descriptions, self.hparams.model_config
)
self.decoder = Dense(self.num_class)
self.ctc_logits = None
def call(self, samples, training=None):
""" call function in keras layers """
output, encoder_output = self.model(samples, training=training)
self.ctc_logits = self.decoder(encoder_output, training=training)
return output
def get_loss(self, logits, samples, training=None):
""" get loss used for training """
logit_length = self.compute_logit_length(samples)
extra_loss = self.loss_function(self.ctc_logits, samples, logit_length)
self.metric(self.ctc_logits, samples, logit_length)
main_loss, metrics = self.model.get_loss(logits, samples, training=training)
mtl_weight = self.hparams.mtl_weight
loss = mtl_weight * main_loss + (1.0 - mtl_weight) * extra_loss
metrics[self.metric.name] = self.metric.result()
return loss, metrics
def compute_logit_length(self, samples):
""" compute the logit length """
return self.model.compute_logit_length(samples)
def reset_metrics(self):
""" reset the metrics """
self.metric.reset_states()
self.model.reset_metrics()
def restore_from_pretrained_model(self, pretrained_model, model_type=""):
""" A more general-purpose interface for pretrained model restoration
:param pretrained_model: checkpoint path of mpc model
:param model_type: the type of pretrained model to restore
"""
self.model.restore_from_pretrained_model(pretrained_model, model_type)
def decode(self, samples, hparams, lm_model=None):
""" beam search decoding """
encoder_output, input_mask = self.model.decode(samples, hparams, return_encoder=True)
# init op
last_predictions = tf.ones([1], dtype=tf.int32) * self.sos
history_predictions = tf.TensorArray(
tf.int32, size=1, dynamic_size=True, clear_after_read=False
)
history_predictions.write(0, last_predictions)
history_predictions = history_predictions.stack()
init_cand_states = [history_predictions]
step = 0
beam_size = 1 if not hparams.beam_search else hparams.beam_size
beam_search_decoder = BeamSearchDecoder(
self.num_class, self.sos, self.eos, beam_size=beam_size
)
beam_search_decoder.build(self.model.time_propagate)
if hparams.beam_search and hparams.ctc_weight != 0:
ctc_scorer = CTCPrefixScorer(
self.eos,
ctc_beam=hparams.beam_size*2,
num_classes=self.num_class,
ctc_weight=hparams.ctc_weight,
)
ctc_logits = self.decoder(encoder_output, training=False)
ctc_logits = tf.math.log(tf.nn.softmax(ctc_logits))
init_cand_states = ctc_scorer.initial_state(init_cand_states, ctc_logits)
beam_search_decoder.add_scorer(ctc_scorer)
if hparams.lm_weight != 0:
if hparams.lm_path is None:
raise ValueError("lm path should not be none")
if hparams.lm_type == "ngram":
lm_scorer = NGramScorer(
hparams.lm_path,
self.sos,
self.eos,
self.num_class,
lm_weight=hparams.lm_weight,
)
elif hparams.lm_type == "rnn":
lm_scorer = RNNScorer(
lm_model,
lm_weight=hparams.lm_weight)
beam_search_decoder.add_scorer(lm_scorer)
predictions = beam_search_decoder(
history_predictions, init_cand_states, step, (encoder_output, input_mask)
)
return predictions
``` |
{
"source": "jianweiSun/algorithms-in-python",
"score": 4
} |
#### File: algorithms-in-python/data_structures/queue.py
```python
class QueueNode(object):
def __init__(self, obj, next_node):
self.obj = obj
self.next_node = next_node
class Queue(object):
def __init__(self, iterable=None):
self.size = 0
self.front = None
self.last = None
def enqueue(self, item):
if self.size == 0:
self.front = QueueNode(item, None)
self.last = self.front
else:
self.last.next_node = QueueNode(item, None)
self.last = self.last.next_node
self.size += 1
def dequeue(self):
if self.size == 0:
raise Exception("is empty")
result = self.front.obj
self.front = self.front.next_node
return result
def is_empty(self):
return self.size == 0
```
#### File: algorithms-in-python/recursives/convert_string_to_number_base.py
```python
def convert_string(number, base_string):
assert isinstance(number, int) and isinstance(base_string, str)
base = len(base_string)
result = str()
while number > 0:
result = base_string[number % base] + result
number = number // base
return result
def convert_string_recursive(number, base_string):
assert isinstance(number, int) and isinstance(base_string, str)
base = len(base_string)
if number < base:
return base_string[number]
else:
return convert_string_recursive(number // base, base_string) + base_string[number % base]
```
#### File: algorithms-in-python/recursives/hanoi.py
```python
def move_tower(height, from_pole, middle_pole, to_pole):
if height >= 1:
move_tower(height-1, from_pole, to_pole, middle_pole)
print "move disk from {} to {}".format(from_pole, to_pole)
move_tower(height-1, middle_pole, from_pole, to_pole)
```
#### File: algorithms-in-python/sorting_and_searching/quick_sort.py
```python
def quick_sort(list_, start, end):
if end > start:
split_position = partition(list_, start, end)
quick_sort(list_, start, split_position-1)
quick_sort(list_, split_position+1, end)
def partition(list_, start, end):
pivot = list_[start]
left_mark = start + 1
right_mark = end
done = False
while not done:
while left_mark <= right_mark and list_[left_mark] <= pivot:
left_mark += 1
while right_mark >= left_mark and list_[right_mark] >= pivot:
right_mark -= 1
if right_mark < left_mark:
done = True
else:
list_[left_mark], list_[right_mark] = list_[right_mark], list_[left_mark]
list_[right_mark], list_[start] = list_[start], list_[right_mark]
return right_mark
```
#### File: algorithms-in-python/sorting_and_searching/sequential_search.py
```python
def sequential_search(list_, item):
pos = 0
is_found = False
while pos < len(list_) and not is_found:
if list_[pos] == item:
is_found = True
else:
pos += 1
return is_found, pos
def ordered_sequential_search(list_, item):
for i in range(len(list_)):
if list_[i] == item:
return True, i
elif list_[i] > item:
return False
return False
```
#### File: algorithms-in-python/trees_and_tree_algorithms/binary_heap.py
```python
class BinaryMinHeap(object):
def __init__(self, l=None):
self.heap_list = [0]
self.size = 0
if l:
self.size = len(l)
i = self.size // 2
self.heap_list = self.heap_list + l
while i > 0:
self.perc_down(i)
i -= 1
def get_min(self):
return self.heap_list[1]
def del_min(self):
result = self.heap_list[1]
self.heap_list[1] = self.heap_list[self.size]
self.size -= 1
self.heap_list.pop()
self.perc_down(1)
return result
def is_empty(self):
return self.size == 0
def __len__(self):
return self.size
def get_min_child_index(self, index):
if index * 2 > self.size:
return None
elif index * 2 + 1 > self.size:
return index * 2
else:
if self.heap_list[index * 2] > self.heap_list[index * 2 + 1]:
return index * 2 + 1
else:
return index * 2
def perc_down(self, index):
while index * 2 < self.size:
min_child_index = self.get_min_child_index(index)
if self.heap_list[index] > self.heap_list[min_child_index]:
self.heap_list[min_child_index], self.heap_list[index] = self.heap_list[index], self.heap_list[min_child_index]
index = min_child_index
def perc_up(self, index):
while index // 2 > 0:
if self.heap_list[index] < self.heap_list[index // 2]:
self.heap_list[index], self.heap_list[index // 2] = self.heap_list[index // 2], self.heap_list[index]
index = index // 2
def insert(self, item):
self.heap_list.append(item)
self.size += 1
self.perc_up(self.size)
```
#### File: algorithms-in-python/trees_and_tree_algorithms/binary_search_tree.py
```python
class BinarySearchTree(object):
def __init__(self):
self.size = 0
self.root = None
def __len__(self):
return self.size
def __iter__(self):
return self.root.__iter__()
def __setitem__(self, key, value):
self.put(key, value)
def __getitem__(self, key):
return self.get(key)
def __delitem__(self, key):
self.delete(key)
def __contains__(self, key):
if self.get(key):
return True
else:
return False
def put(self, key, value):
if self.root:
self._put(key, value, self.root)
else:
self.root = TreeNode(key, value)
self.size += 1
def _put(self, key, value, current_node):
if key < current_node.key:
if current_node.left:
self._put(key, value, current_node.left)
else:
current_node.left = TreeNode(key, value, parent=current_node)
else:
if current_node.right:
self._put(key, value, current_node.right)
else:
current_node.right = TreeNode(key, value, parent=current_node)
def get(self, key):
if self.root:
match_node = self._get(key, self.root)
if match_node:
return match_node.value
else:
return None
else:
return None
def _get(self, key, current_node):
if not current_node:
return None
elif current_node.key == key:
return current_node
elif key < current_node.key:
return self._get(key, current_node.left)
else:
return self._get(key, current_node.right)
def delete(self, key):
if self.size > 1:
node_to_delete = self.get(key)
if node_to_delete:
self.remove(node_to_delete)
self.size -= 1
else:
raise KeyError("Error, Key not found.")
elif self.size == 1 and self.root.key == key:
self.root = None
self.size -= 1
else:
raise KeyError("Error, Key not found.")
def remove(self, node):
assert isinstance(node, TreeNode)
if node.is_leaf():
if node is node.parent.left:
node.parent.left = None
else:
node.parent.right = None
elif node.has_both_child():
successor = node.find_successor()
successor.spice_out()
node.key = successor.key
node.value = successor.value
else: # has only one child
if node.left:
if node.is_left_child():
node.left.parent = node.parent
node.parent.left = node.left
elif node.is_right_child():
node.left.parent = node.parent
node.parent.right = node.left
else:
node.replace_node_data(node.left.key,
node.left.value,
node.left.left,
node.left.right)
else:
if node.is_left_child():
node.right.parent = node.parent
node.parent.left = node.right
elif node.is_right_child():
node.right.parent = node.parent
node.parent.right = node.right
else:
node.replace_node_data(node.right.key,
node.right.value,
node.right.left,
node.right.right)
class TreeNode(object):
def __init__(self, key, value, left=None, right=None, parent=None):
self.key = key
self.value = value
self.left = left
self.right = right
self.parent = parent
def is_left_child(self):
return self.parent and self.parent.left == self
def is_right_child(self):
return self.parent and self.parent.right == self
def is_root(self):
return self.parent is None
def is_leaf(self):
return not (self.left or self.right)
def has_any_child(self):
return self.left or self.right
def has_both_child(self):
return self.left and self.right
def replace_node_data(self, key, value, left, right):
self.key = key
self.value = value
self.left = left
self.right = right
if self.left:
self.left.parent = self
if self.right:
self.right.parent = self
def find_successor(self):
succ = None
if self.right:
succ = self.right.find_min() # BinarySearchTree delete will always has self.right
else:
if self.parent:
if self.is_left_child():
succ = self.parent
else:
self.parent.right = None
succ = self.parent.find_successor()
self.parent.right = self
return succ
def find_min(self):
current = self
while current.left:
current = current.left
return current
def spice_out(self):
if self.is_leaf():
if self.is_left_child():
self.parent.left = None
else:
self.parent.right = None
elif self.has_any_child():
if self.left:
if self.is_left_child():
self.parent.left = self.left
else:
self.parent.right = self.left
self.left.parent = self.parent
else:
if self.is_left_child():
self.parent.left = self.right
else:
self.parent.right = self.right
self.right.parent = self.parent
def __iter__(self):
if self:
if self.left:
for elem in self.left:
yield elem
yield self.key
if self.right:
for elem in self.right:
yield elem
``` |
{
"source": "Jianwei-Wang/python2.7_lib",
"score": 2
} |
#### File: dist-packages/axi/__init__.py
```python
import os
import os.path
import sys
import re
# Setup configuration
PLUGINDIR = os.environ.get("AXI_PLUGIN_DIR", "/usr/share/apt-xapian-index/plugins")
XAPIANDBPATH = os.environ.get("AXI_DB_PATH", "/var/lib/apt-xapian-index")
XAPIANDBSTAMP = os.path.join(XAPIANDBPATH, "update-timestamp")
XAPIANDBLOCK = os.path.join(XAPIANDBPATH, "update-lock")
XAPIANDBUPDATESOCK = os.path.join(XAPIANDBPATH, "update-socket")
XAPIANDBVALUES = os.path.join(XAPIANDBPATH, "values")
XAPIANDBPREFIXES = os.path.join(XAPIANDBPATH, "prefixes")
XAPIANDBDOC = os.path.join(XAPIANDBPATH, "README")
XAPIANINDEX = os.path.join(XAPIANDBPATH, "index")
XAPIANCACHEPATH = os.environ.get("AXI_CACHE_PATH", "/var/cache/apt-xapian-index")
# Default value database in case one cannot be read
DEFAULT_VALUES = dict(version=0, installedsize=1, packagesize=2)
DEFAULT_VALUE_DESCS = dict(
version="package version",
installedsize="installed size",
packagesize="package size"
)
def readValueDB(pathname=XAPIANDBVALUES, quiet=False):
"""
Read the "/etc/services"-style database of value indices
"""
try:
re_empty = re.compile("^\s*(?:#.*)?$")
re_value = re.compile("^(\S+)\s+(\d+)(?:\s*#\s*(.+))?$")
values = {}
descs = {}
for idx, line in enumerate(open(pathname)):
# Skip empty lines and comments
if re_empty.match(line): continue
# Parse teh rest
mo = re_value.match(line)
if not mo:
if not quiet:
print >>sys.stderr, "%s:%d: line is not `name value [# description]': ignored" % (pathname, idx+1)
continue
# Parse the number
name = mo.group(1)
number = int(mo.group(2))
desc = mo.group(3) or ""
values[name] = number
descs[name] = desc
except (OSError, IOError), e:
# If we can't read the database, fallback to defaults
if not quiet:
print >>sys.stderr, "%s: %s. Falling back on a default value database" % (pathname, e)
values = DEFAULT_VALUES
descs = DEFAULT_VALUE_DESCS
return values, descs
```
#### File: python2.7_lib/dist-packages/CDBashApplet.py
```python
import os.path
import subprocess
from CDApplet import CDApplet
####################
### Applet class ###
####################
class CDBashApplet(CDApplet):
def __init__(self):
# call high-level init
self.app_folder = os.path.abspath(".")
CDApplet.__init__(self)
##### private methods #####
def call(self,action):
subprocess.call("cd " + self.app_folder + " && ./" + self.cAppletName + ".sh " + self.cAppletName + " " + self.cBusPath + " " + self.cConfFile + " " + self.cParentAppName + " " + action, shell=True)
##### applet definition #####
def get_config(self,keyfile):
self.call("get_config")
def end(self):
self.call("end")
def begin(self):
self.call("begin")
def reload(self):
self.call("reload")
##### callbacks #####
def on_click(self,iState):
self.call("on_click "+str(iState))
def on_middle_click(self):
self.call("on_middle_click")
def on_build_menu(self):
self.call("on_build_menu")
def on_menu_select(self,iNumEntry):
self.call("on_menu_select "+str(iNumEntry))
def on_scroll(self,bScrollUp):
self.call("on_scroll "+str(bScrollUp))
def on_drop_data(self,cReceivedData):
self.call("on_drop_data '"+cReceivedData+"'")
def on_answer_dialog(self, button, answer):
self.call("on_answer_dialog "+str(button)+" '"+str(answer)+"'")
def on_shortkey(self,key):
self.call("on_shortkey '"+key+"'")
def on_change_focus(self,bIsActive):
self.call("on_change_focus '"+str(bIsActive)+"'")
def on_click_sub_icon(self, iState, cIconID):
self.call("on_click_sub_icon '"+str(iState)+"' '"+cIconID+"'")
```
#### File: dist-packages/CommandNotFound/util.py
```python
from __future__ import print_function
import gettext
import locale
import sys
import gettext
if sys.version >= "3":
_gettext_method = "gettext"
else:
_gettext_method = "ugettext"
_ = getattr(gettext.translation("command-not-found", fallback=True), _gettext_method)
def crash_guard(callback, bug_report_url, version):
""" Calls callback and catches all exceptions.
When something bad happens prints a long error message
with bug report information and exits the program"""
try:
try:
callback()
except Exception as ex:
print(_("Sorry, command-not-found has crashed! Please file a bug report at:"), file=sys.stderr)
print(bug_report_url, file=sys.stderr)
print(_("Please include the following information with the report:"), file=sys.stderr)
print(file=sys.stderr)
print(_("command-not-found version: %s") % version, file=sys.stderr)
print(_("Python version: %d.%d.%d %s %d") % sys.version_info, file=sys.stderr)
try:
import subprocess
subprocess.call(["lsb_release", "-i", "-d", "-r", "-c"], stdout=sys.stderr)
except (ImportError, OSError):
pass
print(_("Exception information:"), file=sys.stderr)
print(file=sys.stderr)
print(ex, file=sys.stderr)
try:
import traceback
traceback.print_exc()
except ImportError:
pass
finally:
sys.exit(127)
__all__ = ["gettext_wrapper", "crash_guard"]
```
#### File: dist-packages/cupshelpers/__init__.py
```python
__all__ = ['set_debugprint_fn',
'Device', 'Printer', 'activateNewPrinter',
'copyPPDOptions', 'getDevices', 'getPrinters',
'missingPackagesAndExecutables', 'missingExecutables',
'parseDeviceID',
'setPPDPageSize',
'ppds',
'openprinting']
def _no_debug (x):
return
_debugprint_fn = _no_debug
def _debugprint (x):
_debugprint_fn (x)
def set_debugprint_fn (debugprint):
"""
Set debugging hook.
@param debugprint: function to print debug output
@type debugprint: fn (str) -> None
"""
global _debugprint_fn
_debugprint_fn = debugprint
from cupshelpers import \
Device, \
Printer, \
activateNewPrinter, \
copyPPDOptions, \
getDevices, \
getPrinters, \
missingPackagesAndExecutables, \
missingExecutables, \
parseDeviceID, \
setPPDPageSize
import ppds
import openprinting
```
#### File: dist-packages/dbus/_expat_introspect_parser.py
```python
from xml.parsers.expat import ParserCreate
from dbus.exceptions import IntrospectionParserException
class _Parser(object):
__slots__ = ('map', 'in_iface', 'in_method', 'sig')
def __init__(self):
self.map = {}
self.in_iface = ''
self.in_method = ''
self.sig = ''
def parse(self, data):
parser = ParserCreate('UTF-8', ' ')
parser.buffer_text = True
parser.StartElementHandler = self.StartElementHandler
parser.EndElementHandler = self.EndElementHandler
parser.Parse(data)
return self.map
def StartElementHandler(self, name, attributes):
if not self.in_iface:
if (not self.in_method and name == 'interface'):
self.in_iface = attributes['name']
else:
if (not self.in_method and name == 'method'):
self.in_method = attributes['name']
elif (self.in_method and name == 'arg'):
if attributes.get('direction', 'in') == 'in':
self.sig += attributes['type']
def EndElementHandler(self, name):
if self.in_iface:
if (not self.in_method and name == 'interface'):
self.in_iface = ''
elif (self.in_method and name == 'method'):
self.map[self.in_iface + '.' + self.in_method] = self.sig
self.in_method = ''
self.sig = ''
def process_introspection_data(data):
"""Return a dict mapping ``interface.method`` strings to the
concatenation of all their 'in' parameters, and mapping
``interface.signal`` strings to the concatenation of all their
parameters.
Example output::
{
'com.example.SignalEmitter.OneString': 's',
'com.example.MethodImplementor.OneInt32Argument': 'i',
}
:Parameters:
`data` : str
The introspection XML. Must be an 8-bit string of UTF-8.
"""
try:
return _Parser().parse(data)
except Exception as e:
raise IntrospectionParserException('%s: %s' % (e.__class__, e))
```
#### File: dist-packages/debtagshw/detectors.py
```python
from __future__ import absolute_import
import logging
import os
import subprocess
LOG=logging.getLogger(__name__)
try:
from gi.repository import GUdev
HAVE_GUDEV = True
except ImportError:
HAVE_GUDEV = False
from .enums import HardwareSupported
from . import opengl
class Detector(object):
""" Base detector class """
# helper functions for tags have this prefix, so the code can find them
# via introspecton, e.g.
# hardware::video:opengl -> _run_check_hardware__video_opengl
CHECK_FUNCTION_PREFIX = "_run_check_"
def is_supported(self, tag):
""" check if the given tag is supported, returns a
HardwareSupported class
"""
f = self._get_func_for_tag(tag)
if f:
return f()
return HardwareSupported.UNKNOWN
def generate_tag_expressions(self):
""" Generate debtags expressions for the given HW """
for tag in self.get_supported_tags():
res = self.is_supported(tag)
if res == HardwareSupported.UNKNOWN:
continue
yield res, [tag]
def get_supported_tags(self):
""" return list of supported tags by this detector """
supported = []
for name in dir(self):
tag = self._get_tag_for_func(name)
if tag:
supported.append(tag)
return supported
# private helpers
def _has_func_for_tag(self, tag):
return hasattr(self, "%s%s" % (
self.CHECK_FUNCTION_PREFIX, tag.replace(":", "_")))
def _get_func_for_tag(self, tag):
return getattr(self, "%s%s" % (
self.CHECK_FUNCTION_PREFIX, tag.replace(":", "_")), None)
def _get_tag_for_func(self, func_name):
if not func_name.startswith("%shardware" % self.CHECK_FUNCTION_PREFIX):
return None
tag = func_name[len(self.CHECK_FUNCTION_PREFIX):].replace("_",":")
return tag
class DetectorUdev(Detector):
""" detect hardware based on udev """
DEBTAG_TO_UDEV_PROPERTY = {
# storage
"hardware::storage:cd" : "ID_CDROM",
"hardware::storage:cd-writer" : "ID_CDROM_CD_R",
"hardware::storage:dvd" : "ID_CDROM_DVD",
"hardware::storage:dvd-writer" : "ID_CDROM_DVD_R",
# input
"hardware::input:touchscreen" : "ID_INPUT_TOUCH",
"hardware::input:mouse" : "ID_INPUT_MOUSE",
"hardware::input:keyboard" : "ID_INPUT_KEYBOARD",
"hardware::input:joystick" : "ID_INPUT_JOYSTICK",
# digicam
"hardware::digicam" : "ID_GPHOTO2",
}
DEBTAG_TO_ID_TYPE = {
# webcam
'hardware::webcam' : 'video',
# floppy
'hardware::floppy' : 'floppy',
}
# all tags this class knows about
SUPPORTED_TAGS = list(DEBTAG_TO_UDEV_PROPERTY.keys()) + \
list(DEBTAG_TO_ID_TYPE.keys())
def __init__(self):
if HAVE_GUDEV:
self._uc = GUdev.Client()
else:
self._uc = None
def is_supported(self, tag):
LOG.debug("DetectorUdev.is_supported: '%s'" % tag)
if self._uc is None:
return HardwareSupported.UNKNOWN
for device in self._uc.query_by_subsystem(None):
#print device.get_property_keys(), device.get_property("DEVPATH")
# supported a (theoretical at this point) udev property that
# sets the debtag tag directly
if device.has_property("HW_DEBTAGS"):
return tag in device.get_property("HW_DEBTAGS")
# use our own device detection magic
prop = self.DEBTAG_TO_UDEV_PROPERTY.get(tag)
if prop and device.has_property(prop):
#print device.get_property(prop)
if bool(device.get_property(prop)):
return HardwareSupported.YES
else:
return HardwareSupported.NO
# use ID_TYPE
if device.has_property("ID_TYPE"):
id_type = device.get_property("ID_TYPE")
if (tag in self.DEBTAG_TO_ID_TYPE and
id_type == self.DEBTAG_TO_ID_TYPE[tag]):
return HardwareSupported.YES
# if we know about the tag and did not find it, return NO
# (LP: #1020057)
if tag in self.SUPPORTED_TAGS:
return HardwareSupported.NO
# otherwise its UNKNOWN
return HardwareSupported.UNKNOWN
def get_supported_tags(self):
return self.SUPPORTED_TAGS
class DetectorCmdline(Detector):
""" detect hardware using cmdline helpers """
LAPTOP_DETECT = "/usr/sbin/laptop-detect"
SCANIMAGE = ["scanimage", "-L"]
# hardware::laptop
def _run_check_hardware__laptop(self):
if os.path.exists(self.LAPTOP_DETECT):
if subprocess.call([self.LAPTOP_DETECT]) == 0:
return HardwareSupported.YES
else:
return HardwareSupported.NO
else:
LOG.warn(
"No laptop-detect '%s' helper found" % self.LAPTOP_DETECT)
return HardwareSupported.UNKOWN
# hardware::scanner
def _run_check_hardware__scanner(self):
# note that this is slow to run (1-2s)
#ver = c_int()
#devices = c_long()
#sane = cdll.LoadLibrary("libsane.so.1")
#res = sane.sane_init(byref(ver), None)
#print res, ver
#if not res == SANE_STATUS_GOOD:
# return False
#print res
#sane.sane_get_devices(byref(devices), False)
# device is SANE_device** device_list how to get data?
#
# Note: you can use multiprocessing.Pool.map to run all checks in
# parallel
try:
output = subprocess.check_output(self.SCANIMAGE,
universal_newlines=True)
if output.startswith("device"):
return HardwareSupported.YES
else:
return HardwareSupported.NO
except Exception:
LOG.warn("error running '%s'" % self.SCANIMAGE)
return HardwareSupported.UNKNOWN
class DetectorCtypes(Detector):
""" detect hardware using ctypes c calls """
def __init__(self):
self.TAG_TO_FUNC = {
'hardware::video:opengl' : self._is_supported,
}
def _is_supported(self):
return opengl.run_check()
def is_supported(self, tag):
if tag in self.TAG_TO_FUNC:
func = self.TAG_TO_FUNC[tag]
res = func()
if res is True:
return HardwareSupported.YES
elif res is False:
return HardwareSupported.NO
return HardwareSupported.UNKNOWN
def get_supported_tags(self):
return list(self.TAG_TO_FUNC.keys())
class DetectorPython(Detector):
""" detect hadware using python imports """
# hardware::printer
def _run_check_hardware__printer(self):
try:
# alternative use lpstat -p
import cups
c = cups.Connection()
if len(c.getPrinters()) > 0:
return HardwareSupported.YES
else:
return HardwareSupported.NO
except ImportError:
LOG.warn("No python-cups installed")
except:
LOG.exception("_run_cups_check")
return HardwareSupported.UNKNOWN
def get_detectors():
""" hepler that returns a list of all lowlevel detector classes """
# introspect the detectors modules to load all availalbe detectors
detectors = []
for name, klass in globals().items():
if name.startswith("Detector"):
detectors.append(klass())
return detectors
```
#### File: dist-packages/deepin_utils/font.py
```python
import pangocairo
def font_name_just_contain_english(font_name):
for font_char in font_name.decode('utf-8'):
if not (
# Is is english char?
(font_char >= u'\u0041' and font_char <=u'\u005a') or (font_char >= u'\u0061' and font_char <=u'\u007a')
# Is is space char?
or font_char == ' '
# Is is number char?
or (font_char >= u'\u0030' and font_char <=u'\u0039')):
return False
return True
def get_font_families(filter_terminal_font=False):
'''Get all font families in system.'''
fontmap = pangocairo.cairo_font_map_get_default()
font_families = fontmap.list_families()
if filter_terminal_font:
font_families = filter(lambda f:
f.is_monospace()
or f.get_name() == "文泉驿等宽微米黑",
filter(lambda f:
not f.get_name() in ["Droid Sans Japanese", "MT Extra", "Monospace"],
font_families))
return sorted(map(lambda f: f.get_name(), font_families))
```
#### File: dist-packages/deepin_utils/multithread.py
```python
import threading
def create_thread(target, args=()):
thread = threading.Thread(target=target, args=args)
thread.setDaemon(True)
return thread
```
#### File: dist-packages/deepin_utils/net.py
```python
import time
import socket
import commands
import traceback
import sys
import dbus
def is_network_connected_by_nm():
sys_bus = dbus.SystemBus()
proxy = sys_bus.get_object("org.freedesktop.NetworkManager","/org/freedesktop/NetworkManager")
interface = dbus.Interface(proxy, "org.freedesktop.NetworkManager")
return interface.state() == 70
def is_network_connected_by_route():
return len(commands.getoutput("route -nNvee").split("\n")) > 2
def is_network_connected():
'''
First try using network-manager api, if not installed, then try route.
Is network connected, if nothing output from command `route -nNvee`, network is disconnected.
@return: Return True if network is connected or command `route -nNvee` failed.
'''
try:
return is_network_connected_by_nm()
except:
print "get network state by dbus failed, then try route"
traceback.print_exc(file=sys.stdout)
try:
return is_network_connected_by_route()
except Exception, e:
print "function is_network_connected got error: %s" % e
traceback.print_exc(file=sys.stdout)
return True
def get_unused_port(address="localhost"):
s = socket.socket()
s.bind((address, 0))
return s.getsockname()[1]
def check_connect_by_port(port, retry_times=6, sleep_time=0.5):
"""
Check connect has active with given port.
@param port: Test port.
@param retry_times: Retry times.
@param sleep_time: Sleep time between retry, in seconds.
@return: Return True if given port is active.
"""
ret_val = False
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
retry_time = 0
while (True):
try:
test_socket.connect(("localhost", port))
ret_val = True
break
except socket.error:
time.sleep(sleep_time)
retry_time += 1
if retry_time >= retry_times:
break
else:
continue
return ret_val
```
#### File: dist-packages/defer/utils.py
```python
__author__ = "<NAME> <<EMAIL>>"
__all__ = ("dbus_deferred_method", "deferable")
from functools import wraps
import inspect
import dbus
from . import defer, Deferred, DeferredException
def dbus_deferred_method(*args, **kwargs):
"""Export the decorated method on the D-Bus and handle a maybe
returned Deferred.
This decorator can be applied to methods in the same way as the
@dbus.service.method method, but it correctly handles the case where
the method returns a Deferred.
This decorator was kindly taken from James Henstridge blog post and
adopted:
http://blogs.gnome.org/jamesh/2009/07/06/watching-iview-with-rygel/
"""
def decorator(function):
function = dbus.service.method(*args, **kwargs)(function)
@wraps(function)
def wrapper(*args, **kwargs):
def ignore_none_callback(*cb_args):
# The deferred method at least returns an tuple containing
# only None. Ignore this case.
if cb_args == (None,):
dbus_callback()
else:
dbus_callback(*cb_args)
dbus_callback = kwargs.pop('_dbus_callback')
dbus_errback = kwargs.pop('_dbus_errback')
deferred = defer(function, *args, **kwargs)
deferred.add_callback(ignore_none_callback)
deferred.add_errback(lambda error: dbus_errback(error.value))
# The @wraps decorator has copied over the attributes added by
# the @dbus.service.method decorator, but we need to manually
# set the async callback attributes.
wrapper._dbus_async_callbacks = ('_dbus_callback', '_dbus_errback')
return wrapper
return decorator
def deferable(func):
"""Add a defer attribute to the decorated function and return a Deferred
object. The callback of the Deferred will be passed as reply_handler
argument and the errback as the error_handler argument to the decorated
function.
This decorator allows to easily make use of Deferreds in a DBus client.
"""
@wraps(func)
def _deferable(*args, **kwargs):
def on_error(error, deferred):
# Make sure that we return a deferred exception
if isinstance(error, DeferredException):
deferred.errback(error)
else:
deferred.errback(DeferredException(error))
try:
# Check if the defer argument was specified
to_defer = kwargs.pop("defer")
except KeyError:
# Check if this function was called from an inline_callbacks
# decorated method
stack = inspect.stack()
try:
to_defer = stack[2][3] == "_inline_callbacks"
except IndexError:
to_defer = False
if to_defer:
deferred = Deferred()
kwargs["reply_handler"] = deferred.callback
kwargs["error_handler"] = lambda err: on_error(err, deferred)
func(*args, **kwargs)
return deferred
return func(*args, **kwargs)
return _deferable
# vim:tw=4:sw=4:et
```
#### File: dist-packages/dirspec/utils.py
```python
from __future__ import unicode_literals, print_function
import errno
import os
import sys
__all__ = ['user_home',
'default_cache_home',
'default_config_home',
'default_config_path',
'default_data_home',
'default_data_path',
'get_env_path',
'get_program_path',
'unicode_path',
]
def _get_exe_path_frozen_win32(exe_name):
"""Get path to the helper .exe on packaged windows."""
# all the .exes are in the same place on windows:
cur_exec_path = os.path.abspath(sys.executable)
exe_dir = os.path.dirname(cur_exec_path)
return os.path.join(exe_dir, exe_name + ".exe")
def _get_exe_path_frozen_darwin(exe_name, app_names):
"""Get path to the sub-app executable on packaged darwin."""
sub_app_name = app_names[exe_name]
main_app_dir = "".join(__file__.partition(".app")[:-1])
main_app_resources_dir = os.path.join(main_app_dir,
"Contents",
"Resources")
exe_bin = os.path.join(main_app_resources_dir,
sub_app_name,
"Contents", "MacOS",
exe_name)
return exe_bin
def get_program_path(program_name, *args, **kwargs):
"""Given a program name, returns the path to run that program.
Raises OSError if the program is not found.
:param program_name: The name of the program to find. For darwin and win32
platforms, the behavior is changed slightly, when sys.frozen is set,
to look in the packaged program locations for the program.
:param search_dirs: A list of directories to look for the program in. This
is only available as a keyword argument.
:param app_names: A dict of program names mapped to sub-app names. Used
for discovering paths in embedded .app bundles on the darwin platform.
This is only available as a keyword argument.
:return: The path to the discovered program.
"""
search_dirs = kwargs.get('fallback_dirs', None)
app_names = kwargs.get('app_names', None)
if getattr(sys, "frozen", None) is not None:
if sys.platform == 'win32':
program_path = _get_exe_path_frozen_win32(program_name)
elif sys.platform == 'darwin':
program_path = _get_exe_path_frozen_darwin(program_name,
app_names)
else:
raise Exception("Unsupported platform for frozen execution: %r" %
sys.platform)
else:
if search_dirs is not None:
for dirname in search_dirs:
program_path = os.path.join(dirname, program_name)
if os.path.exists(program_path):
return program_path
else:
# Check in normal system $PATH, if no fallback dirs specified
from distutils.spawn import find_executable
program_path = find_executable(program_name)
if program_path is None or not os.path.exists(program_path):
raise OSError(errno.ENOENT,
"Could not find executable %r" % program_name)
return program_path
def get_env_path(key, default):
"""Get a UTF-8 encoded path from an environment variable."""
if key in os.environ:
# on windows, environment variables are mbcs bytes
# so we must turn them into utf-8 Syncdaemon paths
try:
path = os.environb.get(key.encode('utf-8'))
except AttributeError:
path = os.environ[key]
return path.decode(sys.getfilesystemencoding()).encode('utf-8')
else:
if not isinstance(default, bytes):
return default.encode('utf-8')
return default
def unicode_path(utf8path):
"""Turn an utf8 path into a unicode path."""
if isinstance(utf8path, bytes):
return utf8path.decode("utf-8")
return utf8path
def get_special_folders():
""" Routine to grab all the Windows Special Folders locations.
If successful, returns dictionary
of shell folder locations indexed on Windows keyword for each;
otherwise, returns an empty dictionary.
"""
# pylint: disable=W0621, F0401, E0611
special_folders = {}
if sys.platform == 'win32':
from win32com.shell import shell, shellcon
# CSIDL_LOCAL_APPDATA = C:\Users\<username>\AppData\Local
# CSIDL_PROFILE = C:\Users\<username>
# CSIDL_COMMON_APPDATA = C:\ProgramData
# More information on these constants at
# http://msdn.microsoft.com/en-us/library/bb762494
# per http://msdn.microsoft.com/en-us/library/windows/desktop/bb762181,
# SHGetFolderPath is deprecated, replaced by SHGetKnownFolderPath
# (http://msdn.microsoft.com/en-us/library/windows/desktop/bb762188)
get_path = lambda name: shell.SHGetFolderPath(
0, getattr(shellcon, name), None, 0).encode('utf8')
special_folders['Personal'] = get_path("CSIDL_PROFILE")
special_folders['Local AppData'] = get_path("CSIDL_LOCAL_APPDATA")
special_folders['AppData'] = os.path.dirname(
special_folders['Local AppData'])
special_folders['Common AppData'] = get_path("CSIDL_COMMON_APPDATA")
return special_folders
# pylint: disable=C0103
if sys.platform == 'win32':
special_folders = get_special_folders()
user_home = special_folders['Personal']
default_config_path = special_folders['Common AppData']
default_config_home = special_folders['Local AppData']
default_data_path = os.path.join(default_config_path, b'xdg')
default_data_home = os.path.join(default_config_home, b'xdg')
default_cache_home = os.path.join(default_data_home, b'cache')
elif sys.platform == 'darwin':
user_home = os.path.expanduser(b'~')
default_cache_home = os.path.join(user_home, b'Library', b'Caches')
default_config_path = b'/Library/Preferences:/etc/xdg'
default_config_home = os.path.join(user_home, b'Library', b'Preferences')
default_data_path = b':'.join([b'/Library/Application Support',
b'/usr/local/share',
b'/usr/share'])
default_data_home = os.path.join(user_home, b'Library',
b'Application Support')
else:
user_home = os.path.expanduser(b'~')
default_cache_home = os.path.join(user_home,
b'.cache')
default_config_path = b'/etc/xdg'
default_config_home = os.path.join(user_home,
b'.config')
default_data_path = b'/usr/local/share:/usr/share'
default_data_home = os.path.join(user_home,
b'.local', b'share')
```
#### File: dtk/ui/button.py
```python
from cache_pixbuf import CachePixbuf
from constant import DEFAULT_FONT_SIZE
from draw import draw_vlinear, draw_pixbuf, draw_line, draw_text
from keymap import get_keyevent_name
from label import Label
from theme import ui_theme
from utils import is_in_rect
import gobject
import gtk
import pango
from deepin_utils.process import run_command
from utils import (get_content_size, color_hex_to_cairo, propagate_expose, set_clickable_cursor,
window_is_max, get_same_level_widgets, widget_fix_cycle_destroy_bug,
get_widget_root_coordinate, WIDGET_POS_BOTTOM_LEFT)
__all__ = ["Button", "ImageButton", "ThemeButton",
"MenuButton", "MinButton", "CloseButton",
"MaxButton", "ToggleButton", "ActionButton",
"CheckButton", "RadioButton", "DisableButton",
"LinkButton", "ComboButton", "SwitchButton"]
class Button(gtk.Button):
'''
Button with Deepin UI style.
@undocumented: key_press_button
@undocumented: expose_button
'''
def __init__(self,
label="",
font_size=DEFAULT_FONT_SIZE):
'''
Initialize Button class.
@param label: Button label.
@param font_size: Button label font size.
'''
gtk.Button.__init__(self)
self.font_size = font_size
self.min_width = 69
self.min_height = 22
self.padding_x = 15
self.padding_y = 3
self.set_label(label)
self.connect("expose-event", self.expose_button)
self.connect("key-press-event", self.key_press_button)
self.keymap = {
"Return" : self.clicked
}
def set_label(self, label, font_size=DEFAULT_FONT_SIZE):
'''
Set label of Button.
@param label: Button label.
@param font_size: Button label font size.
'''
self.label = label
(self.label_width, self.label_height) = get_content_size(label, self.font_size)
self.set_size_request(max(self.label_width + self.padding_x * 2, self.min_width),
max(self.label_height + self.padding_y * 2, self.min_height))
self.queue_draw()
def key_press_button(self, widget, event):
'''
Callback for `button-press-event` signal.
@param widget: Button widget.
@param event: Button press event.
'''
key_name = get_keyevent_name(event)
if self.keymap.has_key(key_name):
self.keymap[key_name]()
def expose_button(self, widget, event):
'''
Callback for `expose-event` signal.
@param widget: Button widget.
@param event: Button press event.
'''
# Init.
cr = widget.window.cairo_create()
rect = widget.allocation
x, y, w, h = rect.x, rect.y, rect.width, rect.height
# Get color info.
if widget.state == gtk.STATE_NORMAL:
text_color = ui_theme.get_color("button_font").get_color()
border_color = ui_theme.get_color("button_border_normal").get_color()
background_color = ui_theme.get_shadow_color("button_background_normal").get_color_info()
elif widget.state == gtk.STATE_PRELIGHT:
text_color = ui_theme.get_color("button_font").get_color()
border_color = ui_theme.get_color("button_border_prelight").get_color()
background_color = ui_theme.get_shadow_color("button_background_prelight").get_color_info()
elif widget.state == gtk.STATE_ACTIVE:
text_color = ui_theme.get_color("button_font").get_color()
border_color = ui_theme.get_color("button_border_active").get_color()
background_color = ui_theme.get_shadow_color("button_background_active").get_color_info()
elif widget.state == gtk.STATE_INSENSITIVE:
text_color = ui_theme.get_color("disable_text").get_color()
border_color = ui_theme.get_color("disable_frame").get_color()
disable_background_color = ui_theme.get_color("disable_background").get_color()
background_color = [(0, (disable_background_color, 1.0)),
(1, (disable_background_color, 1.0))]
# Draw background.
draw_vlinear(
cr,
x + 1, y + 1, w - 2, h - 2,
background_color)
# Draw border.
cr.set_source_rgb(*color_hex_to_cairo(border_color))
draw_line(cr, x + 2, y + 1, x + w - 2, y + 1) # top
draw_line(cr, x + 2, y + h, x + w - 2, y + h) # bottom
draw_line(cr, x + 1, y + 2, x + 1, y + h - 2) # left
draw_line(cr, x + w, y + 2, x + w, y + h - 2) # right
# Draw four point.
if widget.state == gtk.STATE_INSENSITIVE:
top_left_point = ui_theme.get_pixbuf("button/disable_corner.png").get_pixbuf()
else:
top_left_point = ui_theme.get_pixbuf("button/corner.png").get_pixbuf()
top_right_point = top_left_point.rotate_simple(270)
bottom_right_point = top_left_point.rotate_simple(180)
bottom_left_point = top_left_point.rotate_simple(90)
draw_pixbuf(cr, top_left_point, x, y)
draw_pixbuf(cr, top_right_point, x + w - top_left_point.get_width(), y)
draw_pixbuf(cr, bottom_left_point, x, y + h - top_left_point.get_height())
draw_pixbuf(cr, bottom_right_point, x + w - top_left_point.get_width(), y + h - top_left_point.get_height())
# Draw font.
draw_text(cr, self.label, x, y, w, h, self.font_size, text_color,
alignment=pango.ALIGN_CENTER)
return True
gobject.type_register(Button)
class ImageButton(gtk.Button):
'''
ImageButton class.
'''
def __init__(self,
normal_dpixbuf,
hover_dpixbuf,
press_dpixbuf,
scale_x=False,
content=None,
insensitive_dpixbuf=None):
'''
Initialize ImageButton class.
@param normal_dpixbuf: DynamicPixbuf for button normal status.
@param hover_dpixbuf: DynamicPixbuf for button hover status.
@param press_dpixbuf: DynamicPixbuf for button press status.
@param scale_x: Whether scale horticulturally, default is False.
@param content: Button label content.
@param insensitive_dpixbuf: DyanmicPixbuf for button insensitive status, default is None.
'''
gtk.Button.__init__(self)
cache_pixbuf = CachePixbuf()
draw_button(self,
cache_pixbuf,
normal_dpixbuf,
hover_dpixbuf,
press_dpixbuf,
scale_x,
content,
insensitive_dpixbuf=insensitive_dpixbuf)
def set_active(self, is_active):
'''
Set active status.
@param is_active: Set as True to make ImageButton active.
'''
if is_active:
self.set_state(gtk.STATE_PRELIGHT)
else:
self.set_state(gtk.STATE_NORMAL)
gobject.type_register(ImageButton)
class ThemeButton(gtk.Button):
'''
ThemeButton class.
'''
def __init__(self):
'''
Initialize ThemeButton class.
'''
gtk.Button.__init__(self)
self.cache_pixbuf = CachePixbuf()
draw_button(
self,
self.cache_pixbuf,
ui_theme.get_pixbuf("button/window_theme_normal.png"),
ui_theme.get_pixbuf("button/window_theme_hover.png"),
ui_theme.get_pixbuf("button/window_theme_press.png"))
gobject.type_register(ThemeButton)
class MenuButton(gtk.Button):
'''
MenuButton class.
'''
def __init__(self):
'''
Initialize MenuButton class.
'''
gtk.Button.__init__(self)
self.cache_pixbuf = CachePixbuf()
draw_button(
self,
self.cache_pixbuf,
ui_theme.get_pixbuf("button/window_menu_normal.png"),
ui_theme.get_pixbuf("button/window_menu_hover.png"),
ui_theme.get_pixbuf("button/window_menu_press.png"))
gobject.type_register(MenuButton)
class MinButton(gtk.Button):
'''
MinButton.
'''
def __init__(self):
'''
Initialize MinButton class.
'''
gtk.Button.__init__(self)
self.cache_pixbuf = CachePixbuf()
draw_button(
self,
self.cache_pixbuf,
ui_theme.get_pixbuf("button/window_min_normal.png"),
ui_theme.get_pixbuf("button/window_min_hover.png"),
ui_theme.get_pixbuf("button/window_min_press.png"))
gobject.type_register(MinButton)
class CloseButton(gtk.Button):
'''
CloseButton class.
'''
def __init__(self):
'''
Initialize CloseButton class.
'''
gtk.Button.__init__(self)
self.cache_pixbuf = CachePixbuf()
draw_button(
self,
self.cache_pixbuf,
ui_theme.get_pixbuf("button/window_close_normal.png"),
ui_theme.get_pixbuf("button/window_close_hover.png"),
ui_theme.get_pixbuf("button/window_close_press.png"))
gobject.type_register(CloseButton)
class MaxButton(gtk.Button):
'''
MaxButton class.
'''
def __init__(self,
sub_dir="button",
max_path_prefix="window_max",
unmax_path_prefix="window_unmax"):
'''
Initialize MaxButton class.
@param sub_dir: Subdirectory of button images.
@param max_path_prefix: Image path prefix for maximise status.
@param unmax_path_prefix: Image path prefix for un-maximise status.
'''
gtk.Button.__init__(self)
self.cache_pixbuf = CachePixbuf()
draw_max_button(self, self.cache_pixbuf, sub_dir, max_path_prefix, unmax_path_prefix)
gobject.type_register(MaxButton)
def draw_button(widget,
cache_pixbuf,
normal_dpixbuf,
hover_dpixbuf,
press_dpixbuf,
scale_x=False,
button_label=None,
font_size=DEFAULT_FONT_SIZE,
label_dcolor=ui_theme.get_color("button_default_font"),
insensitive_dpixbuf=None,
):
'''
Draw button.
@param widget: Gtk.Widget instance.
@param cache_pixbuf: CachePixbuf.
@param normal_dpixbuf: DynamicPixbuf of normal status.
@param hover_dpixbuf: DynamicPixbuf of hover status.
@param press_dpixbuf: DynamicPixbuf of press status.
@param scale_x: Whether button scale with content.
@param button_label: Button label, default is None.
@param font_size: Button label font size, default is DEFAULT_FONT_SIZE.
@param label_dcolor: Button label color.
@param insensitive_dpixbuf: DyanmicPixbuf of insensitive status, default is None.
'''
# Init request size.
if scale_x:
request_width = get_content_size(button_label, font_size)[0]
else:
request_width = normal_dpixbuf.get_pixbuf().get_width()
request_height = normal_dpixbuf.get_pixbuf().get_height()
widget.set_size_request(request_width, request_height)
# Expose button.
widget.connect("expose-event", lambda w, e: expose_button(
w, e,
cache_pixbuf,
scale_x, False,
normal_dpixbuf, hover_dpixbuf, press_dpixbuf,
button_label, font_size, label_dcolor, insensitive_dpixbuf))
def expose_button(widget,
event,
cache_pixbuf,
scale_x,
scale_y,
normal_dpixbuf,
hover_dpixbuf,
press_dpixbuf,
button_label,
font_size,
label_dcolor,
insensitive_dpixbuf=None):
'''
Expose callback for L{ I{draw_button} <draw_button>}.
@param widget: Gtk.Widget instance.
@param cache_pixbuf: CachePixbuf.
@param scale_x: Whether button scale width with content.
@param scale_y: Whether button scale height with content.
@param normal_dpixbuf: DynamicPixbuf of normal status.
@param hover_dpixbuf: DynamicPixbuf of hover status.
@param press_dpixbuf: DynamicPixbuf of press status.
@param button_label: Button label, default is None.
@param font_size: Button label font size, default is DEFAULT_FONT_SIZE.
@param label_dcolor: Button label color.
@param insensitive_dpixbuf: DynamicPixbuf of insensitive status.
'''
# Init.
rect = widget.allocation
image = None
# Get pixbuf along with button's sate.
if widget.state == gtk.STATE_NORMAL:
image = normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
image = hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
image = press_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_INSENSITIVE:
if insensitive_dpixbuf == None:
insensitive_dpixbuf = normal_dpixbuf
image = insensitive_dpixbuf.get_pixbuf()
# Init size.
if scale_x:
image_width = widget.allocation.width
else:
image_width = image.get_width()
if scale_y:
image_height = widget.allocation.height
else:
image_height = image.get_height()
# Draw button.
pixbuf = image
if pixbuf.get_width() != image_width or pixbuf.get_height() != image_height:
cache_pixbuf.scale(image, image_width, image_height)
pixbuf = cache_pixbuf.get_cache()
cr = widget.window.cairo_create()
draw_pixbuf(cr, pixbuf, widget.allocation.x, widget.allocation.y)
# Draw font.
if button_label:
draw_text(cr, button_label,
rect.x, rect.y, rect.width, rect.height,
font_size,
label_dcolor.get_color(),
alignment=pango.ALIGN_CENTER
)
# Propagate expose to children.
propagate_expose(widget, event)
return True
def draw_max_button(widget, cache_pixbuf, sub_dir, max_path_prefix, unmax_path_prefix):
'''
Draw maximum button.
@param widget: Gtk.Widget instance.
@param cache_pixbuf: CachePixbuf to avoid unnecessary pixbuf new operation.
@param sub_dir: Subdirectory of button.
@param max_path_prefix: Prefix of maximum image path.
@param unmax_path_prefix: Prefix of un-maximum image path.
'''
# Init request size.
pixbuf = ui_theme.get_pixbuf("%s/%s_normal.png" % (sub_dir, unmax_path_prefix)).get_pixbuf()
widget.set_size_request(pixbuf.get_width(), pixbuf.get_height())
# Redraw.
widget.connect("expose-event", lambda w, e:
expose_max_button(w, e,
cache_pixbuf,
sub_dir, max_path_prefix, unmax_path_prefix))
def expose_max_button(widget, event, cache_pixbuf, sub_dir, max_path_prefix, unmax_path_prefix):
'''
Expose callback for L{ I{draw_max_button} <draw_max_button>}.
@param widget: Gtk.Widget instance.
@param event: Expose event.
@param cache_pixbuf: CachePixbuf to avoid unnecessary new pixbuf operation.
@param sub_dir: Subdirectory for image path.
@param max_path_prefix: Prefix of maximum image path.
@param unmax_path_prefix: Prefix of un-maximum image path.
'''
# Get dynamic pixbuf.
if window_is_max(widget):
normal_dpixbuf = ui_theme.get_pixbuf("%s/%s_normal.png" % (sub_dir, unmax_path_prefix))
hover_dpixbuf = ui_theme.get_pixbuf("%s/%s_hover.png" % (sub_dir, unmax_path_prefix))
press_dpixbuf = ui_theme.get_pixbuf("%s/%s_press.png" % (sub_dir, unmax_path_prefix))
else:
normal_dpixbuf = ui_theme.get_pixbuf("%s/%s_normal.png" % (sub_dir, max_path_prefix))
hover_dpixbuf = ui_theme.get_pixbuf("%s/%s_hover.png" % (sub_dir, max_path_prefix))
press_dpixbuf = ui_theme.get_pixbuf("%s/%s_press.png" % (sub_dir, max_path_prefix))
# Get pixbuf along with button's sate.
if widget.state == gtk.STATE_NORMAL:
image = normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
image = hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
image = press_dpixbuf.get_pixbuf()
# Init size.
image_width = image.get_width()
image_height = image.get_height()
# Draw button.
pixbuf = image
if pixbuf.get_width() != image_width or pixbuf.get_height() != image_height:
cache_pixbuf.scale(image, image_width, image_height)
pixbuf = cache_pixbuf.get_cache()
cr = widget.window.cairo_create()
draw_pixbuf(cr, pixbuf, widget.allocation.x, widget.allocation.y)
# Propagate expose to children.
propagate_expose(widget, event)
return True
class ToggleButton(gtk.ToggleButton):
'''
ToggleButton class.
@undocumented: press_toggle_button
@undocumented: release_toggle_button
@undocumented: expose_toggle_button
@undocumented: set_inactive_pixbuf_group
@undocumented: set_active_pixbuf_group
'''
def __init__(self,
inactive_normal_dpixbuf,
active_normal_dpixbuf,
inactive_hover_dpixbuf=None,
active_hover_dpixbuf=None,
inactive_press_dpixbuf=None,
active_press_dpixbuf=None,
inactive_disable_dpixbuf=None,
active_disable_dpixbuf=None,
button_label=None,
padding_x=0,
font_size=DEFAULT_FONT_SIZE):
'''
Initialize ToggleButton class.
@param inactive_normal_dpixbuf: DynamicPixbuf for inactive normal status.
@param active_normal_dpixbuf: DynamicPixbuf for active normal status.
@param inactive_hover_dpixbuf: DynamicPixbuf for inactive hover status, default is None.
@param active_hover_dpixbuf: DynamicPixbuf for active hover status, default is None.
@param inactive_press_dpixbuf: DynamicPixbuf for inactive press status, default is None.
@param active_press_dpixbuf: DynamicPixbuf for active press status, default is None.
@param inactive_disable_dpixbuf: DynamicPixbuf for inactive disable status, default is None.
@param active_disable_dpixbuf: DynamicPixbuf for active disable status, default is None.
@param button_label: Button label, default is None.
@param padding_x: Padding x, default is 0.
@param font_size: Font size, default is DEFAULT_FONT_SIZE.
'''
gtk.ToggleButton.__init__(self)
self.font_size = font_size
label_dcolor = ui_theme.get_color("button_default_font")
self.button_press_flag = False
self.inactive_pixbuf_group = (inactive_normal_dpixbuf,
inactive_hover_dpixbuf,
inactive_press_dpixbuf,
inactive_disable_dpixbuf)
self.active_pixbuf_group = (active_normal_dpixbuf,
active_hover_dpixbuf,
active_press_dpixbuf,
active_disable_dpixbuf)
# Init request size.
label_width = 0
button_width = inactive_normal_dpixbuf.get_pixbuf().get_width()
button_height = inactive_normal_dpixbuf.get_pixbuf().get_height()
if button_label:
label_width = get_content_size(button_label, self.font_size)[0]
self.set_size_request(button_width + label_width + padding_x * 2,
button_height)
self.connect("button-press-event", self.press_toggle_button)
self.connect("button-release-event", self.release_toggle_button)
# Expose button.
self.connect("expose-event", lambda w, e : self.expose_toggle_button(
w, e,
button_label, padding_x, self.font_size, label_dcolor))
def press_toggle_button(self, widget, event):
'''
Callback for `button-press-event` signal.
@param widget: ToggleButton widget.
@param event: Button press event.
'''
self.button_press_flag = True
self.queue_draw()
def release_toggle_button(self, widget, event):
'''
Callback for `button-press-release` signal.
@param widget: ToggleButton widget.
@param event: Button release event.
'''
self.button_press_flag = False
self.queue_draw()
def expose_toggle_button(self, widget, event,
button_label, padding_x, font_size, label_dcolor):
'''
Callback for `expose-event` signal.
@param widget: ToggleButton widget.
@param event: Expose event.
@param button_label: Button label string.
@param padding_x: horticultural padding value.
@param font_size: Font size.
@param label_dcolor: Label DynamicColor.
'''
# Init.
inactive_normal_dpixbuf, inactive_hover_dpixbuf, inactive_press_dpixbuf, inactive_disable_dpixbuf = self.inactive_pixbuf_group
active_normal_dpixbuf, active_hover_dpixbuf, active_press_dpixbuf, active_disable_dpixbuf = self.active_pixbuf_group
rect = widget.allocation
image = inactive_normal_dpixbuf.get_pixbuf()
# Get pixbuf along with button's sate.
if widget.state == gtk.STATE_INSENSITIVE:
if widget.get_active():
image = active_disable_dpixbuf.get_pixbuf()
else:
image = inactive_disable_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_NORMAL:
image = inactive_normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
if not inactive_hover_dpixbuf and not active_hover_dpixbuf:
if widget.get_active():
image = active_normal_dpixbuf.get_pixbuf()
else:
image = inactive_normal_dpixbuf.get_pixbuf()
else:
if inactive_hover_dpixbuf and active_hover_dpixbuf:
if widget.get_active():
image = active_hover_dpixbuf.get_pixbuf()
else:
image = inactive_hover_dpixbuf.get_pixbuf()
elif inactive_hover_dpixbuf:
image = inactive_hover_dpixbuf.get_pixbuf()
elif active_hover_dpixbuf:
image = active_hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
if inactive_press_dpixbuf and active_press_dpixbuf:
if self.button_press_flag:
if widget.get_active():
image = active_press_dpixbuf.get_pixbuf()
else:
image = inactive_press_dpixbuf.get_pixbuf()
else:
image = active_normal_dpixbuf.get_pixbuf()
else:
image = active_normal_dpixbuf.get_pixbuf()
# Draw button.
cr = widget.window.cairo_create()
draw_pixbuf(cr, image, rect.x + padding_x, rect.y)
# Draw font.
if widget.state == gtk.STATE_INSENSITIVE:
label_color = ui_theme.get_color("disable_text").get_color()
else:
label_color = label_dcolor.get_color()
if button_label:
draw_text(cr, button_label,
rect.x + image.get_width() + padding_x * 2,
rect.y,
rect.width - image.get_width() - padding_x * 2,
rect.height,
font_size,
label_color,
alignment=pango.ALIGN_LEFT
)
# Propagate expose to children.
propagate_expose(widget, event)
return True
def set_inactive_pixbuf_group(self, new_group):
'''
Set inactive pixbuf group.
@param new_group: Inactive pixbuf group.
'''
self.inactive_pixbuf_group = new_group
def set_active_pixbuf_group(self, new_group):
'''
Set inactive pixbuf group.
@param new_group: Active pixbuf group.
'''
self.active_pixbuf_group = new_group
class ActionButton(gtk.Button):
'''
ActionButton class.
@undocumented: expose_action_button
'''
def __init__(self, actions, index=0):
'''
Initialize for ActionButton class.
@param actions: Actions for button.
@param index: Action index, default is 0.
'''
gtk.Button.__init__(self)
self.actions = actions
self.index = index
pixbuf = self.actions[self.index][0][0].get_pixbuf()
self.set_size_request(pixbuf.get_width(), pixbuf.get_height())
self.connect("expose-event", self.expose_action_button)
self.connect("clicked", lambda w: self.update_action_index(w))
def update_action_index(self, widget):
'''
Update action index of ActionButton.
@param widget: ActionButton widget.
'''
# Call click callback.
self.actions[self.index][1](widget)
# Update index.
self.index += 1
if self.index >= len(self.actions):
self.index = 0
# Redraw.
self.queue_draw()
def expose_action_button(self, widget, event):
'''
Callback for `expose-event` signal.
@param widget: ActionButton widget.
@param event: Expose event.
@return: Always return True.
'''
# Init.
cr = widget.window.cairo_create()
rect = widget.allocation
if widget.state == gtk.STATE_NORMAL:
pixbuf = self.actions[self.index][0][0].get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
pixbuf = self.actions[self.index][0][1].get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
pixbuf = self.actions[self.index][0][2].get_pixbuf()
draw_pixbuf(cr, pixbuf, rect.x, rect.y)
# Propagate expose to children.
propagate_expose(widget, event)
return True
gobject.type_register(ActionButton)
class CheckButton(ToggleButton):
'''
CheckButton class.
'''
def __init__(self,
label_text=None,
padding_x=2,
font_size=DEFAULT_FONT_SIZE):
'''
Initialize CheckButton class.
@param label_text: Label text.
@param padding_x: Horticultural padding value, default is 8.
@param font_size: Font size, default is DEFAULT_FONT_SIZE.
'''
ToggleButton.__init__(
self,
ui_theme.get_pixbuf("button/check_button_inactive_normal.png"),
ui_theme.get_pixbuf("button/check_button_active_normal.png"),
ui_theme.get_pixbuf("button/check_button_inactive_hover.png"),
ui_theme.get_pixbuf("button/check_button_active_hover.png"),
ui_theme.get_pixbuf("button/check_button_inactive_press.png"),
ui_theme.get_pixbuf("button/check_button_active_press.png"),
ui_theme.get_pixbuf("button/check_button_inactive_disable.png"),
ui_theme.get_pixbuf("button/check_button_active_disable.png"),
label_text, padding_x, font_size
)
gobject.type_register(CheckButton)
class CheckAllButton(gtk.ToggleButton):
'''
CheckAllButton class.
@undocumented: handle_click_event
@undocumented: press_toggle_button
@undocumented: release_toggle_button
@undocumented: expose_toggle_button
'''
__gsignals__ = {
"active-changed" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
def __init__(self,
inactive_normal_dpixbuf=ui_theme.get_pixbuf("button/check_button_inactive_normal.png"),
active_normal_dpixbuf=ui_theme.get_pixbuf("button/check_button_active_normal.png"),
inactive_hover_dpixbuf=ui_theme.get_pixbuf("button/check_button_inactive_hover.png"),
active_hover_dpixbuf=ui_theme.get_pixbuf("button/check_button_active_hover.png"),
inactive_press_dpixbuf=ui_theme.get_pixbuf("button/check_button_inactive_press.png"),
active_press_dpixbuf=ui_theme.get_pixbuf("button/check_button_active_press.png"),
inactive_disable_dpixbuf=ui_theme.get_pixbuf("button/check_button_inactive_disable.png"),
active_disable_dpixbuf=ui_theme.get_pixbuf("button/check_button_active_disable.png"),
middle_disable_dpixbuf=ui_theme.get_pixbuf("button/check_button_middle_disable.png"),
middle_hover_dpixbuf=ui_theme.get_pixbuf("button/check_button_middle_hover.png"),
middle_normal_dpixbuf=ui_theme.get_pixbuf("button/check_button_middle_normal.png"),
middle_press_dpixbuf=ui_theme.get_pixbuf("button/check_button_middle_press.png"),
button_label=None,
padding_x=8,
font_size=DEFAULT_FONT_SIZE,
):
'''
Initialize for CheckAllButton class.
@param inactive_normal_dpixbuf: DyanmicPixbuf for button inactive normal status, default is None.
@param active_normal_dpixbuf: DyanmicPixbuf for button active normal status, default is None.
@param inactive_hover_dpixbuf: DyanmicPixbuf for button inactive hover status, default is None.
@param active_hover_dpixbuf: DyanmicPixbuf for button active hover status, default is None.
@param inactive_press_dpixbuf: DyanmicPixbuf for button inactive press status, default is None.
@param active_press_dpixbuf: DyanmicPixbuf for button active press status, default is None.
@param inactive_disable_dpixbuf: DyanmicPixbuf for button inactive disable status, default is None.
@param active_disable_dpixbuf: DyanmicPixbuf for button active disable status, default is None.
@param middle_disable_dpixbuf: DyanmicPixbuf for button middle disable status, default is None.
@param middle_hover_dpixbuf: DyanmicPixbuf for button middle hover status, default is None.
@param middle_normal_dpixbuf: DyanmicPixbuf for button middle normal status, default is None.
@param middle_press_dpixbuf: DyanmicPixbuf for button middle press status, default is None.
@param button_label: Button label, default is None.
@param padding_x: Padding x, default is 0.
@param font_size: Button label font size, default is DEFAULT_FONT_SIZE.
'''
gtk.ToggleButton.__init__(self)
self.font_size = font_size
label_dcolor = ui_theme.get_color("button_default_font")
self.button_press_flag = False
self.inactive_pixbuf_group = (inactive_normal_dpixbuf,
inactive_hover_dpixbuf,
inactive_press_dpixbuf,
inactive_disable_dpixbuf)
self.active_pixbuf_group = (active_normal_dpixbuf,
active_hover_dpixbuf,
active_press_dpixbuf,
active_disable_dpixbuf)
self.middle_pixbuf_group = (middle_normal_dpixbuf,
middle_hover_dpixbuf,
middle_press_dpixbuf,
middle_disable_dpixbuf,
)
self.in_half_status = False
# Init request size.
label_width = 0
button_width = inactive_normal_dpixbuf.get_pixbuf().get_width()
button_height = inactive_normal_dpixbuf.get_pixbuf().get_height()
if button_label:
label_width = get_content_size(button_label, self.font_size)[0]
self.set_size_request(button_width + label_width + padding_x * 2,
button_height)
self.connect("button-press-event", self.press_toggle_button)
self.connect("button-release-event", self.release_toggle_button)
# Expose button.
self.connect("expose-event", lambda w, e : self.expose_toggle_button(
w, e,
button_label, padding_x, self.font_size, label_dcolor))
self.connect("clicked", self.handle_click_event)
def update_status(self, actives):
'''
Update status of button.
@param actives: This is boolean list that include all button's active status, CheckAllButton will change status in INACTIVE/ACTIVE/HALF-ACTIVE.
'''
if actives.count(True) == len(actives):
self.set_half_status(False)
self.set_active(True)
elif actives.count(False) == len(actives):
self.set_half_status(False)
self.set_active(False)
else:
self.set_active(True)
self.set_half_status(True)
self.queue_draw()
def set_half_status(self, half_status):
'''
Set half active status.
'''
self.in_half_status = half_status
def handle_click_event(self, widget):
'''
Internal callback for `click` signal.
@param widget: The CheckAllButton widget.
'''
if self.in_half_status:
self.set_active(False)
self.in_half_status = False
self.emit("active-changed", self.get_active())
def press_toggle_button(self, widget, event):
'''
Callback for `button-press-event` signal.
@param widget: ToggleButton widget.
@param event: Button press event.
'''
self.button_press_flag = True
self.queue_draw()
def release_toggle_button(self, widget, event):
'''
Callback for `button-press-release` signal.
@param widget: ToggleButton widget.
@param event: Button release event.
'''
self.button_press_flag = False
self.queue_draw()
def expose_toggle_button(self, widget, event,
button_label, padding_x, font_size, label_dcolor):
'''
Callback for `expose-event` signal.
@param widget: ToggleButton widget.
@param event: Expose event.
@param button_label: Button label string.
@param padding_x: horticultural padding value.
@param font_size: Font size.
@param label_dcolor: Label DynamicColor.
'''
# Init.
inactive_normal_dpixbuf, inactive_hover_dpixbuf, inactive_press_dpixbuf, inactive_disable_dpixbuf = self.inactive_pixbuf_group
active_normal_dpixbuf, active_hover_dpixbuf, active_press_dpixbuf, active_disable_dpixbuf = self.active_pixbuf_group
middle_normal_dpixbuf, middle_hover_dpixbuf, middle_press_dpixbuf, middle_disable_dpixbuf = self.middle_pixbuf_group
rect = widget.allocation
image = inactive_normal_dpixbuf.get_pixbuf()
# Get pixbuf along with button's sate.
if widget.state == gtk.STATE_INSENSITIVE:
if self.in_half_status:
image = middle_disable_dpixbuf.get_pixbuf()
else:
if widget.get_active():
image = active_disable_dpixbuf.get_pixbuf()
else:
image = inactive_disable_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_NORMAL:
image = inactive_normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
if not inactive_hover_dpixbuf and not active_hover_dpixbuf:
if self.in_half_status:
image = middle_normal_dpixbuf.get_pixbuf()
else:
if widget.get_active():
image = active_normal_dpixbuf.get_pixbuf()
else:
image = inactive_normal_dpixbuf.get_pixbuf()
else:
if inactive_hover_dpixbuf and active_hover_dpixbuf:
if self.in_half_status:
image = middle_normal_dpixbuf.get_pixbuf()
else:
if widget.get_active():
image = active_hover_dpixbuf.get_pixbuf()
else:
image = inactive_hover_dpixbuf.get_pixbuf()
elif inactive_hover_dpixbuf:
image = inactive_hover_dpixbuf.get_pixbuf()
elif active_hover_dpixbuf:
if self.in_half_status:
image = middle_hover_dpixbuf.get_pixbuf()
else:
image = active_hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
if inactive_press_dpixbuf and active_press_dpixbuf:
if self.button_press_flag:
if self.in_half_status:
image = middle_normal_dpixbuf.get_pixbuf()
else:
if widget.get_active():
image = active_press_dpixbuf.get_pixbuf()
else:
image = inactive_press_dpixbuf.get_pixbuf()
else:
if self.in_half_status:
image = middle_normal_dpixbuf.get_pixbuf()
else:
image = active_normal_dpixbuf.get_pixbuf()
else:
if self.in_half_status:
image = middle_normal_dpixbuf.get_pixbuf()
else:
image = active_normal_dpixbuf.get_pixbuf()
# Draw button.
cr = widget.window.cairo_create()
draw_pixbuf(cr, image, rect.x + padding_x, rect.y)
# Draw font.
if widget.state == gtk.STATE_INSENSITIVE:
label_color = ui_theme.get_color("disable_text").get_color()
else:
label_color = label_dcolor.get_color()
if button_label:
draw_text(cr, button_label,
rect.x + image.get_width() + padding_x * 2,
rect.y,
rect.width - image.get_width() - padding_x * 2,
rect.height,
font_size,
label_color,
alignment=pango.ALIGN_LEFT
)
# Propagate expose to children.
propagate_expose(widget, event)
return True
gobject.type_register(CheckAllButton)
class CheckButtonBuffer(gobject.GObject):
'''
CheckButtonBuffer class.
Use to render CheckButton in TreeView widget.
@undocumented: render
'''
STATE_NORMAL = 1
STATE_PRELIGHT = 2
STATE_ACTIVE = 3
def __init__(self,
active=False,
render_padding_x=0,
render_padding_y=0,
):
'''
Initialize CheckButtonBuffer class.
@param active: Set True to active buffer status, default is False.
@param render_padding_x: Horizontal padding value, default is 0.
@param render_padding_y: Vertical padding value, default is 0.
'''
gobject.GObject.__init__(self)
self.inactive_normal_dpixbuf = ui_theme.get_pixbuf("button/check_button_inactive_normal.png")
self.active_normal_dpixbuf = ui_theme.get_pixbuf("button/check_button_active_normal.png")
self.inactive_hover_dpixbuf = ui_theme.get_pixbuf("button/check_button_inactive_hover.png")
self.active_hover_dpixbuf = ui_theme.get_pixbuf("button/check_button_active_hover.png")
self.inactive_press_dpixbuf = ui_theme.get_pixbuf("button/check_button_inactive_press.png")
self.active_press_dpixbuf = ui_theme.get_pixbuf("button/check_button_active_press.png")
self.render_padding_x = render_padding_x
self.render_padding_y = render_padding_y
pixbuf = self.inactive_normal_dpixbuf.get_pixbuf()
self.render_width = pixbuf.get_width()
self.render_height = pixbuf.get_height()
self.active = active
self.button_state = self.STATE_NORMAL
def get_active(self):
'''
Get active status of check button buffer.
@return: Return True if buffer is in active status.
'''
return self.active
def is_in_button_area(self, x, y):
'''
Helper function to detect button event is in button area.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
return is_in_rect((x, y), (self.render_padding_x, self.render_padding_y, self.render_width, self.render_height))
def press_button(self, x, y):
'''
Helper function to handle button-press-event.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
if self.is_in_button_area(x, y):
self.button_state = self.STATE_ACTIVE
self.button_press_flag = True
self.active = not self.active
return True
else:
return False
def release_button(self, x, y):
'''
Helper function to handle button-release-event.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
if self.is_in_button_area(x, y):
self.button_state = self.STATE_ACTIVE
self.button_press_flag = False
return True
else:
return False
def motion_button(self, x, y):
'''
Helper function to handle motion-notify event.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
if self.is_in_button_area(x, y):
if self.button_state != self.STATE_PRELIGHT:
self.button_state = self.STATE_PRELIGHT
return True
else:
return False
else:
if self.button_state != self.STATE_NORMAL:
self.button_state = self.STATE_NORMAL
return True
else:
return False
def render(self, cr, rect):
# Get pixbuf along with button's sate.
if self.button_state == self.STATE_NORMAL:
if self.active:
image = self.active_normal_dpixbuf.get_pixbuf()
else:
image = self.inactive_normal_dpixbuf.get_pixbuf()
elif self.button_state == self.STATE_PRELIGHT:
if self.active:
image = self.active_hover_dpixbuf.get_pixbuf()
else:
image = self.inactive_hover_dpixbuf.get_pixbuf()
elif self.button_state == self.STATE_ACTIVE:
if self.button_press_flag:
if self.active:
image = self.inactive_press_dpixbuf.get_pixbuf()
else:
image = self.active_press_dpixbuf.get_pixbuf()
else:
if self.active:
image = self.active_normal_dpixbuf.get_pixbuf()
else:
image = self.inactive_normal_dpixbuf.get_pixbuf()
# Draw button.
draw_pixbuf(
cr,
image,
rect.x + self.render_padding_x,
rect.y + self.render_padding_y)
gobject.type_register(CheckButtonBuffer)
class RadioButton(ToggleButton):
'''
RadioButton class.
@undocumented: click_radio_button
'''
def __init__(self,
label_text=None,
padding_x=2,
font_size=DEFAULT_FONT_SIZE,
):
'''
Initialize RadioButton class.
@param label_text: Label text.
@param padding_x: Horticultural padding value, default is 8.
@param font_size: Font size, default is DEFAULT_FONT_SIZE.
'''
ToggleButton.__init__(
self,
ui_theme.get_pixbuf("button/radio_button_inactive_normal.png"),
ui_theme.get_pixbuf("button/radio_button_active_normal.png"),
ui_theme.get_pixbuf("button/radio_button_inactive_hover.png"),
ui_theme.get_pixbuf("button/radio_button_active_hover.png"),
ui_theme.get_pixbuf("button/radio_button_inactive_press.png"),
ui_theme.get_pixbuf("button/radio_button_active_press.png"),
ui_theme.get_pixbuf("button/radio_button_inactive_disable.png"),
ui_theme.get_pixbuf("button/radio_button_active_disable.png"),
label_text,
padding_x,
font_size
)
self.switch_lock = False
self.connect("clicked", self.click_radio_button)
def click_radio_button(self, widget):
'''
Callback for `clicked` signal.
@param widget: RadioButton widget.
'''
if not self.switch_lock:
for w in get_same_level_widgets(self):
w.switch_lock = True
w.set_active(w == self)
w.switch_lock = False
gobject.type_register(RadioButton)
class RadioButtonBuffer(gobject.GObject):
'''
RaidoButtonBuffer class.
Use to render RaidoButton in TreeView widget.
@undocumented: render
'''
STATE_NORMAL = 1
STATE_PRELIGHT = 2
STATE_ACTIVE = 3
def __init__(self,
active=False,
render_padding_x=0,
render_padding_y=0,
):
'''
Initialize RadioButtonBuffer class.
@param active: Set True to active buffer status, default is False.
@param render_padding_x: Horizontal padding value, default is 0.
@param render_padding_y: Vertical padding value, default is 0.
'''
gobject.GObject.__init__(self)
self.inactive_normal_dpixbuf = ui_theme.get_pixbuf("button/radio_button_inactive_normal.png")
self.active_normal_dpixbuf = ui_theme.get_pixbuf("button/radio_button_active_normal.png")
self.inactive_hover_dpixbuf = ui_theme.get_pixbuf("button/radio_button_inactive_hover.png")
self.active_hover_dpixbuf = ui_theme.get_pixbuf("button/radio_button_active_hover.png")
self.inactive_press_dpixbuf = ui_theme.get_pixbuf("button/radio_button_inactive_press.png")
self.active_press_dpixbuf = ui_theme.get_pixbuf("button/radio_button_active_press.png")
self.render_padding_x = render_padding_x
self.render_padding_y = render_padding_y
pixbuf = self.inactive_normal_dpixbuf.get_pixbuf()
self.render_width = pixbuf.get_width()
self.render_height = pixbuf.get_height()
self.active = active
self.button_state = self.STATE_NORMAL
def get_active(self):
'''
Get active status of raido button buffer.
@return: Return True if buffer is in active status.
'''
return self.active
def set_active(self):
self.button_state = self.STATE_ACTIVE
self.button_press_flag = False
self.active = True
#self.queue_draw()
def is_in_button_area(self, x, y):
'''
Helper function to detect button event is in button area.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
return is_in_rect((x, y), (self.render_padding_x, self.render_padding_y, self.render_width, self.render_height))
def press_button(self, x, y):
'''
Helper function to handle button-press-event.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
if self.is_in_button_area(x, y) and self.active == False:
self.button_state = self.STATE_ACTIVE
self.button_press_flag = True
self.active = True
return True
else:
return False
def release_button(self, x, y):
'''
Helper function to handle button-release-event.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
if self.is_in_button_area(x, y):
self.button_state = self.STATE_ACTIVE
self.button_press_flag = False
return True
else:
return False
def motion_button(self, x, y):
'''
Helper function to handle motion-notify event.
You can add this function in callback function of TreeItem, such as:
- hover/unhover
- motion_notify
- button_press/button_release
- single_click/double_click
@param x: X coordinate of button event.
@param y: Y coordiante of button event.
'''
if self.is_in_button_area(x, y):
if self.button_state != self.STATE_PRELIGHT:
self.button_state = self.STATE_PRELIGHT
return True
else:
return False
else:
if self.button_state != self.STATE_NORMAL:
self.button_state = self.STATE_NORMAL
return True
else:
return False
def render(self, cr, rect):
# Get pixbuf along with button's sate.
if self.button_state == self.STATE_NORMAL:
if self.active:
image = self.active_normal_dpixbuf.get_pixbuf()
else:
image = self.inactive_normal_dpixbuf.get_pixbuf()
elif self.button_state == self.STATE_PRELIGHT:
if self.active:
image = self.active_hover_dpixbuf.get_pixbuf()
else:
image = self.inactive_hover_dpixbuf.get_pixbuf()
elif self.button_state == self.STATE_ACTIVE:
if self.button_press_flag:
if self.active:
image = self.inactive_press_dpixbuf.get_pixbuf()
else:
image = self.active_press_dpixbuf.get_pixbuf()
else:
if self.active:
image = self.active_normal_dpixbuf.get_pixbuf()
else:
image = self.inactive_normal_dpixbuf.get_pixbuf()
# Draw button.
draw_pixbuf(
cr,
image,
rect.x + self.render_padding_x,
rect.y + self.render_padding_y)
gobject.type_register(RadioButtonBuffer)
class DisableButton(gtk.Button):
'''
DisableButton class.
@undocumented: expose_disable_button
'''
def __init__(self, dpixbufs):
'''
Initialize DisableButton class.
@param dpixbufs: DyanmicPixbuf.
'''
gtk.Button.__init__(self)
pixbuf = dpixbufs[0].get_pixbuf()
self.set_size_request(pixbuf.get_width(), pixbuf.get_height())
widget_fix_cycle_destroy_bug(self)
self.connect("expose-event", lambda w, e: self.expose_disable_button(w, e, dpixbufs))
def expose_disable_button(self, widget, event, dpixbufs):
'''
Callback for `expose-event` signal.
@param widget: DisableButton widget.
@param event: Expose event.
@param dpixbufs: DynamicPixbufs.
'''
# Init.
cr = widget.window.cairo_create()
rect = widget.allocation
(normal_dpixbuf, hover_dpixbuf, press_dpixbuf, disable_dpixbuf) = dpixbufs
# Draw.
if widget.state == gtk.STATE_INSENSITIVE:
pixbuf = disable_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_NORMAL:
pixbuf = normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
pixbuf = hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
pixbuf = press_dpixbuf.get_pixbuf()
draw_pixbuf(cr, pixbuf, rect.x, rect.y)
# Propagate expose to children.
propagate_expose(widget, event)
return True
gobject.type_register(DisableButton)
class LinkButton(Label):
'''
LinkButton click to open browser.
'''
def __init__(self,
text,
link,
enable_gaussian=True,
text_color=ui_theme.get_color("link_text"),
):
'''
Initialize LinkButton class.
@param text: Link content.
@param link: Link address.
@param enable_gaussian: To enable gaussian effect on link, default is True.
@param text_color: Link color, just use when option enable_gaussian is False.
'''
Label.__init__(self, text, text_color, enable_gaussian=enable_gaussian, text_size=9,
gaussian_radious=1, border_radious=0)
self.connect("button-press-event", lambda w, e: run_command("xdg-open %s" % link))
set_clickable_cursor(self)
gobject.type_register(LinkButton)
class ComboButton(gtk.Button):
'''
ComboButton class.
@undocumented: expose_combo_button
@undocumented: button_press_combo_button
@undocumented: click_combo_button
'''
__gsignals__ = {
"button-clicked" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"arrow-clicked" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (int, int, int, int)),
}
def __init__(self,
button_normal_dpixbuf,
button_hover_dpixbuf,
button_press_dpixbuf,
button_disable_dpixbuf,
arrow_normal_dpixbuf,
arrow_hover_dpixbuf,
arrow_press_dpixbuf,
arrow_disable_dpixbuf,
):
'''
Initialize ComboButton class.
@param button_normal_dpixbuf: DyanmicPixbuf of button normal status.
@param button_hover_dpixbuf: DyanmicPixbuf of button hover status.
@param button_press_dpixbuf: DyanmicPixbuf of button press status.
@param button_disable_dpixbuf: DyanmicPixbuf of button disable status.
@param arrow_normal_dpixbuf: DyanmicPixbuf of arrow normal status.
@param arrow_hover_dpixbuf: DyanmicPixbuf of arrow hover status.
@param arrow_press_dpixbuf: DyanmicPixbuf of arrow press status.
@param arrow_disable_dpixbuf: DyanmicPixbuf of arrow disable status.
'''
# Init.
gtk.Button.__init__(self)
self.button_normal_dpixbuf = button_normal_dpixbuf
self.button_hover_dpixbuf = button_hover_dpixbuf
self.button_press_dpixbuf = button_press_dpixbuf
self.button_disable_dpixbuf = button_disable_dpixbuf
self.arrow_normal_dpixbuf = arrow_normal_dpixbuf
self.arrow_hover_dpixbuf = arrow_hover_dpixbuf
self.arrow_press_dpixbuf = arrow_press_dpixbuf
self.arrow_disable_dpixbuf = arrow_disable_dpixbuf
button_pixbuf = button_normal_dpixbuf.get_pixbuf()
arrow_pixbuf = arrow_normal_dpixbuf.get_pixbuf()
self.button_width = button_pixbuf.get_width()
self.arrow_width = arrow_pixbuf.get_width()
self.height = button_pixbuf.get_height()
self.set_size_request(self.button_width + self.arrow_width, self.height)
self.in_button = True
self.connect("expose-event", self.expose_combo_button)
self.connect("button-press-event", self.button_press_combo_button)
self.connect("clicked", self.click_combo_button)
def expose_combo_button(self, widget, event):
# Init.
cr = widget.window.cairo_create()
rect = widget.allocation
x, y, w, h = rect.x, rect.y, rect.width, rect.height
# Get pixbuf info.
if widget.state == gtk.STATE_NORMAL:
button_pixbuf = self.button_normal_dpixbuf.get_pixbuf()
arrow_pixbuf = self.arrow_normal_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_PRELIGHT:
button_pixbuf = self.button_hover_dpixbuf.get_pixbuf()
arrow_pixbuf = self.arrow_hover_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_ACTIVE:
if self.in_button:
button_pixbuf = self.button_press_dpixbuf.get_pixbuf()
arrow_pixbuf = self.arrow_hover_dpixbuf.get_pixbuf()
else:
button_pixbuf = self.button_hover_dpixbuf.get_pixbuf()
arrow_pixbuf = self.arrow_press_dpixbuf.get_pixbuf()
elif widget.state == gtk.STATE_INSENSITIVE:
button_pixbuf = self.button_disable_dpixbuf.get_pixbuf()
arrow_pixbuf = self.arrow_disable_dpixbuf.get_pixbuf()
# Draw.
draw_pixbuf(cr, button_pixbuf, rect.x, rect.y)
draw_pixbuf(cr, arrow_pixbuf, rect.x + self.button_width, rect.y)
return True
def button_press_combo_button(self, widget, event):
self.in_button = event.x < self.button_width
def click_combo_button(self, widget):
if self.in_button:
self.emit("button-clicked")
else:
(button_x, button_y) = get_widget_root_coordinate(self, WIDGET_POS_BOTTOM_LEFT)
self.emit("arrow-clicked",
button_x + self.button_width,
button_y,
self.arrow_width,
self.height)
gobject.type_register(ComboButton)
class SwitchButton(ToggleButton):
'''
SwitchButton class.
'''
def __init__(self, active=False, inactive_disable_dpixbuf=None, active_disable_dpixbuf=None):
'''
Initialize SwitchButton class.
@param active: Button active status, default is False.
'''
if inactive_disable_dpixbuf and active_disable_dpixbuf:
ToggleButton.__init__(self,
ui_theme.get_pixbuf("switchbutton/off.png"),
ui_theme.get_pixbuf("switchbutton/on.png"),
inactive_disable_dpixbuf = inactive_disable_dpixbuf,
active_disable_dpixbuf = active_disable_dpixbuf)
else:
ToggleButton.__init__(self,
ui_theme.get_pixbuf("switchbutton/off.png"),
ui_theme.get_pixbuf("switchbutton/on.png"))
self.set_active(active)
gobject.type_register(SwitchButton)
```
#### File: dtk/ui/file_treeview.py
```python
from treeview import TreeItem
import collections
from gio_utils import (get_file_icon_pixbuf, is_directory, get_dir_child_files,
get_gfile_modification_time, get_gfile_size,
get_gfile_type,get_file_type_dict,
get_gfile_name, get_gfile_content_type, sort_file_by_name)
from draw import draw_pixbuf, draw_text, draw_vlinear
from threads import post_gui
from theme import ui_theme
import pango
import gobject
import gio
import threading as td
from deepin_utils.file import format_file_size
from utils import cairo_disable_antialias, get_content_size
import traceback
import sys
ICON_SIZE = 24
ICON_PADDING_LEFT = ICON_PADDING_RIGHT = 4
INDICATOR_PADDING_LEFT = INDICATOR_PADDING_RIGHT = 4
ITEM_PADDING_Y = 2
ITEM_HEIGHT = ICON_SIZE + ITEM_PADDING_Y * 2
COLUMN_OFFSET = 32
MODIFICATION_TIME_PADDING_LEFT = 20
CONTENT_TYPE_PADDING_LEFT = 12
SIZE_PADDING_LEFT = 12
def sort_by_key(items, sort_reverse, sort_key):
if len(items) == 1 and (isinstance(items[0], EmptyItem) or isinstance(items[0], LoadingItem)):
return items
else:
# Init.
item_oreder_dict = collections.OrderedDict(get_file_type_dict())
# Split item with different file type.
for item in items:
item_oreder_dict[item.type].append(item)
# Get sorted item list.
item_list = []
for (file_type, type_items) in item_oreder_dict.items():
item_list += sorted(type_items, key=sort_key, reverse=sort_reverse)
return item_list
def sort_by_name(items, sort_reverse):
return sort_by_key(items, sort_reverse, lambda i: i.name)
def sort_by_size(items, sort_reverse):
return sort_by_key(items, sort_reverse, lambda i: i.size)
def sort_by_type(items, sort_reverse):
return sort_by_key(items, sort_reverse, lambda i: i.content_type)
def sort_by_mtime(items, sort_reverse):
return sort_by_key(items, sort_reverse, lambda i: i.modification_time)
def get_name_width(column_index, name):
expand_indicator_pixbuf = ui_theme.get_pixbuf("treeview/arrow_down.png").get_pixbuf()
return COLUMN_OFFSET * column_index + INDICATOR_PADDING_LEFT + expand_indicator_pixbuf.get_width() + INDICATOR_PADDING_RIGHT + ICON_PADDING_LEFT + ICON_SIZE + ICON_PADDING_RIGHT + get_content_size(name)[0]
def get_modification_time_width(time):
return get_content_size(time)[0] + MODIFICATION_TIME_PADDING_LEFT
def get_type_width(file_type):
return get_content_size(file_type)[0] + CONTENT_TYPE_PADDING_LEFT
def get_size_width(size):
return get_content_size(size)[0] + SIZE_PADDING_LEFT
def render_background(item, cr, rect):
if item.is_select:
draw_vlinear(cr, rect.x ,rect.y, rect.width, rect.height,
ui_theme.get_shadow_color("listview_select").get_color_info())
class LoadingThread(td.Thread):
def __init__(self, dir_item):
td.Thread.__init__(self)
self.setDaemon(True) # make thread exit when main program exit
self.dir_item = dir_item
def run(self):
try:
self.dir_item.load_status = self.dir_item.LOADING_START
self.items = get_dir_items(self.dir_item.gfile.get_path(), self.dir_item.column_index + 1)
if self.items == []:
self.items = [EmptyItem(self.dir_item.column_index + 1)]
for item in self.items:
item.parent_item = self.dir_item
self.dir_item.load_status = self.dir_item.LOADING_FINSIH
self.render_items()
except Exception, e:
print "class LoadingThread got error: %s" % (e)
traceback.print_exc(file=sys.stdout)
@post_gui
def render_items(self):
self.dir_item.delete_chlid_item()
self.dir_item.child_items = self.items
self.dir_item.add_child_item()
class DirItem(TreeItem):
'''
Directory item.
'''
LOADING_INIT = 0
LOADING_START = 1
LOADING_FINSIH = 2
def __init__(self, gfile, column_index=0):
'''
Initialize DirItem class.
'''
# Init.
TreeItem.__init__(self)
self.gfile = gfile
self.type = get_gfile_type(self.gfile)
self.name = get_gfile_name(self.gfile)
self.modification_time = get_gfile_modification_time(self.gfile)
self.content_type = get_gfile_content_type(self.gfile)
self.size = get_gfile_size(self.gfile)
self.size_name = "%s 项" % (self.size)
self.directory_path = gfile.get_path()
self.pixbuf = None
self.column_index = column_index
self.is_expand = False
self.load_status = self.LOADING_INIT
self.name_width = get_name_width(self.column_index, self.name)
self.modification_time_width = get_modification_time_width(self.modification_time)
self.content_type_width = get_type_width(self.content_type)
self.size_width = get_size_width(self.size_name)
def render_name(self, cr, rect):
'''
Render icon and name of DirItem.
'''
if self.pixbuf == None:
self.pixbuf = get_file_icon_pixbuf(self.directory_path, ICON_SIZE)
# Draw select background.
if self.is_select or self.is_highlight:
draw_vlinear(cr, rect.x ,rect.y, rect.width, rect.height,
ui_theme.get_shadow_color("listview_select").get_color_info())
# Draw directory arrow icon.
if self.is_expand:
expand_indicator_pixbuf = ui_theme.get_pixbuf("treeview/arrow_down.png").get_pixbuf()
else:
expand_indicator_pixbuf = ui_theme.get_pixbuf("treeview/arrow_right.png").get_pixbuf()
draw_pixbuf(cr, expand_indicator_pixbuf,
rect.x + COLUMN_OFFSET * self.column_index + INDICATOR_PADDING_LEFT,
rect.y + (rect.height - expand_indicator_pixbuf.get_height()) / 2,
)
# Draw directory icon.
draw_pixbuf(cr, self.pixbuf,
rect.x + COLUMN_OFFSET * self.column_index + INDICATOR_PADDING_LEFT + expand_indicator_pixbuf.get_width() + INDICATOR_PADDING_RIGHT + ICON_PADDING_LEFT,
rect.y + (rect.height - ICON_SIZE) / 2,
)
# Draw directory name.
draw_text(cr, self.name,
rect.x + COLUMN_OFFSET * self.column_index + INDICATOR_PADDING_LEFT + expand_indicator_pixbuf.get_width() + INDICATOR_PADDING_RIGHT + ICON_PADDING_LEFT + ICON_SIZE + ICON_PADDING_RIGHT,
rect.y,
rect.width, rect.height)
# Draw drag line.
if self.drag_line:
with cairo_disable_antialias(cr):
cr.set_line_width(1)
if self.drag_line_at_bottom:
cr.rectangle(rect.x, rect.y + rect.height - 1, rect.width, 1)
else:
cr.rectangle(rect.x, rect.y, rect.width, 1)
cr.fill()
def render_modification_time(self, cr, rect):
'''
Render type of DirItem.
'''
# Draw select background.
if self.is_select or self.is_highlight:
draw_vlinear(cr, rect.x ,rect.y, rect.width, rect.height,
ui_theme.get_shadow_color("listview_select").get_color_info())
# Draw directory type.
draw_text(cr, self.modification_time,
rect.x + MODIFICATION_TIME_PADDING_LEFT,
rect.y,
rect.width, rect.height)
# Draw drag line.
if self.drag_line:
with cairo_disable_antialias(cr):
cr.set_line_width(1)
if self.drag_line_at_bottom:
cr.rectangle(rect.x, rect.y + rect.height - 1, rect.width, 1)
else:
cr.rectangle(rect.x, rect.y, rect.width, 1)
cr.fill()
def render_type(self, cr, rect):
'''
Render type of DirItem.
'''
# Draw select background.
if self.is_select or self.is_highlight:
draw_vlinear(cr, rect.x ,rect.y, rect.width, rect.height,
ui_theme.get_shadow_color("listview_select").get_color_info())
# Draw directory type.
draw_text(cr, self.content_type,
rect.x + CONTENT_TYPE_PADDING_LEFT,
rect.y,
rect.width, rect.height)
# Draw drag line.
if self.drag_line:
with cairo_disable_antialias(cr):
cr.set_line_width(1)
if self.drag_line_at_bottom:
cr.rectangle(rect.x, rect.y + rect.height - 1, rect.width, 1)
else:
cr.rectangle(rect.x, rect.y, rect.width, 1)
cr.fill()
def render_size(self, cr, rect):
'''
Render size of DirItem.
'''
# Draw select background.
if self.is_select or self.is_highlight:
draw_vlinear(cr, rect.x ,rect.y, rect.width, rect.height,
ui_theme.get_shadow_color("listview_select").get_color_info())
# Draw directory size.
draw_text(cr, self.size_name,
rect.x,
rect.y,
rect.width, rect.height,
alignment=pango.ALIGN_RIGHT
)
# Draw drag line.
if self.drag_line:
with cairo_disable_antialias(cr):
cr.set_line_width(1)
if self.drag_line_at_bottom:
cr.rectangle(rect.x, rect.y + rect.height - 1, rect.width, 1)
else:
cr.rectangle(rect.x, rect.y, rect.width, 1)
cr.fill()
def expand(self):
self.is_expand = True
if self.load_status == self.LOADING_INIT:
self.add_loading_item()
elif self.load_status == self.LOADING_FINSIH:
self.add_child_item()
if self.redraw_request_callback:
self.redraw_request_callback(self)
def unexpand(self):
self.is_expand = False
self.delete_chlid_item()
if self.redraw_request_callback:
self.redraw_request_callback(self)
def add_loading_item(self):
loading_item = LoadingItem(self.column_index + 1)
loading_item.parent_item = self
self.child_items = [loading_item]
self.add_child_item()
LoadingThread(self).start()
def add_child_item(self):
self.add_items_callback(self.child_items, self.row_index + 1)
def delete_chlid_item(self):
for child_item in self.child_items:
if isinstance(child_item, DirItem) and child_item.is_expand:
child_item.unexpand()
self.delete_items_callback(self.child_items)
def get_height(self):
return ITEM_HEIGHT
def get_column_widths(self):
return [self.name_width, self.size_width, self.content_type_width, self.modification_time_width]
def get_column_renders(self):
return [self.render_name,
self.render_size,
self.render_type,
self.render_modification_time,
]
def unselect(self):
self.is_select = False
if self.redraw_request_callback:
self.redraw_request_callback(self)
def select(self):
self.is_select = True
if self.redraw_request_callback:
self.redraw_request_callback(self)
def double_click(self, column, offset_x, offset_y):
if self.is_expand:
self.unexpand()
else:
self.expand()
def draw_drag_line(self, drag_line, drag_line_at_bottom=False):
self.drag_line = drag_line
self.drag_line_at_bottom = drag_line_at_bottom
if self.redraw_request_callback:
self.redraw_request_callback(self)
def release_resource(self):
'''
'''
# print "release dir: %s" % self.directory_path
if self.pixbuf:
del self.pixbuf
self.pixbuf = None
return True
def highlight(self):
self.is_highlight = True
if self.redraw_request_callback:
self.redraw_request_callback(self)
def unhighlight(self):
self.is_highlight = False
if self.redraw_request_callback:
self.redraw_request_callback(self)
gobject.type_register(DirItem)
class FileItem(TreeItem):
'''
File item.
'''
def __init__(self, gfile, column_index=0):
'''
Initialize FileItem class.
'''
TreeItem.__init__(self)
self.gfile = gfile
self.type = get_gfile_type(self.gfile)
self.name = get_gfile_name(self.gfile)
self.modification_time = get_gfile_modification_time(self.gfile)
self.content_type = get_gfile_content_type(self.gfile)
self.size = get_gfile_size(self.gfile)
self.size_name = format_file_size(self.size)
self.file_path = gfile.get_path()
self.pixbuf = None
self.column_index = column_index
self.name_width = get_name_width(self.column_index, self.name)
self.modification_time_width = get_modification_time_width(self.modification_time)
self.content_type_width = get_type_width(self.content_type)
self.size_width = get_size_width(self.size_name)
def render_name(self, cr, rect):
'''
Render icon and name of DirItem.
'''
if self.pixbuf == None:
self.pixbuf = get_file_icon_pixbuf(self.file_path, ICON_SIZE)
# Draw select background.
if self.is_select or self.is_highlight:
draw_vlinear(cr, rect.x ,rect.y, rect.width, rect.height,
ui_theme.get_shadow_color("listview_select").get_color_info())
# Init.
expand_indicator_pixbuf = ui_theme.get_pixbuf("treeview/arrow_right.png").get_pixbuf()
# Draw directory icon.
draw_pixbuf(cr, self.pixbuf,
rect.x + COLUMN_OFFSET * self.column_index + INDICATOR_PADDING_LEFT + expand_indicator_pixbuf.get_width() + INDICATOR_PADDING_RIGHT + ICON_PADDING_LEFT,
rect.y + (rect.height - ICON_SIZE) / 2,
)
# Draw directory name.
draw_text(cr, self.name,
rect.x + COLUMN_OFFSET * self.column_index + INDICATOR_PADDING_LEFT + expand_indicator_pixbuf.get_width() + INDICATOR_PADDING_RIGHT + ICON_PADDING_LEFT + ICON_SIZE + ICON_PADDING_RIGHT,
rect.y,
rect.width, rect.height)
# Draw drag line.
if self.drag_line:
with cairo_disable_antialias(cr):
cr.set_line_width(1)
if self.drag_line_at_bottom:
cr.rectangle(rect.x, rect.y + rect.height - 1, rect.width, 1)
else:
cr.rectangle(rect.x, rect.y, rect.width, 1)
cr.fill()
def render_modification_time(self, cr, rect):
'''
Render type of DirItem.
'''
# Draw select background.
if self.is_select or self.is_highlight:
draw_vlinear(cr, rect.x ,rect.y, rect.width, rect.height,
ui_theme.get_shadow_color("listview_select").get_color_info())
# Draw directory type.
draw_text(cr, self.modification_time,
rect.x + MODIFICATION_TIME_PADDING_LEFT,
rect.y,
rect.width, rect.height)
# Draw drag line.
if self.drag_line:
with cairo_disable_antialias(cr):
cr.set_line_width(1)
if self.drag_line_at_bottom:
cr.rectangle(rect.x, rect.y + rect.height - 1, rect.width, 1)
else:
cr.rectangle(rect.x, rect.y, rect.width, 1)
cr.fill()
def render_type(self, cr, rect):
'''
Render type of DirItem.
'''
# Draw select background.
if self.is_select or self.is_highlight:
draw_vlinear(cr, rect.x ,rect.y, rect.width, rect.height,
ui_theme.get_shadow_color("listview_select").get_color_info())
# Draw directory type.
draw_text(cr, self.content_type,
rect.x + CONTENT_TYPE_PADDING_LEFT,
rect.y,
rect.width, rect.height)
# Draw drag line.
if self.drag_line:
with cairo_disable_antialias(cr):
cr.set_line_width(1)
if self.drag_line_at_bottom:
cr.rectangle(rect.x, rect.y + rect.height - 1, rect.width, 1)
else:
cr.rectangle(rect.x, rect.y, rect.width, 1)
cr.fill()
def render_size(self, cr, rect):
'''
Render size of DirItem.
'''
# Draw select background.
if self.is_select or self.is_highlight:
draw_vlinear(cr, rect.x ,rect.y, rect.width, rect.height,
ui_theme.get_shadow_color("listview_select").get_color_info())
# Draw directory size.
draw_text(cr, self.size_name,
rect.x,
rect.y,
rect.width,
rect.height,
alignment=pango.ALIGN_RIGHT,
)
# Draw drag line.
if self.drag_line:
with cairo_disable_antialias(cr):
cr.set_line_width(1)
if self.drag_line_at_bottom:
cr.rectangle(rect.x, rect.y + rect.height - 1, rect.width, 1)
else:
cr.rectangle(rect.x, rect.y, rect.width, 1)
cr.fill()
def expand(self):
pass
def unexpand(self):
pass
def get_height(self):
return ITEM_HEIGHT
def get_column_widths(self):
return [self.name_width, self.size_width, self.content_type_width, self.modification_time_width]
def get_column_renders(self):
return [self.render_name,
self.render_size,
self.render_type,
self.render_modification_time,
]
def unselect(self):
self.is_select = False
if self.redraw_request_callback:
self.redraw_request_callback(self)
def select(self):
self.is_select = True
if self.redraw_request_callback:
self.redraw_request_callback(self)
def double_click(self, column, offset_x, offset_y):
app_info = gio.app_info_get_default_for_type(self.gfile.query_info("standard::content-type").get_content_type(), False)
if app_info:
app_info.launch([self.gfile], None)
else:
print "Don't know how to open file: %s" % (self.name)
def draw_drag_line(self, drag_line, drag_line_at_bottom=False):
self.drag_line = drag_line
self.drag_line_at_bottom = drag_line_at_bottom
if self.redraw_request_callback:
self.redraw_request_callback(self)
def release_resource(self):
'''
'''
# print "release file: %s" % self.file_path
if self.pixbuf:
del self.pixbuf
self.pixbuf = None
return True
def highlight(self):
self.is_highlight = True
if self.redraw_request_callback:
self.redraw_request_callback(self)
def unhighlight(self):
self.is_highlight = False
if self.redraw_request_callback:
self.redraw_request_callback(self)
gobject.type_register(DirItem)
class LoadingItem(TreeItem):
'''
Loadding item.
'''
def __init__(self, column_index=0):
'''
Initialize LoadingItem class.
'''
TreeItem.__init__(self)
self.column_index = column_index
def get_height(self):
return ITEM_HEIGHT
def get_column_widths(self):
return [200, 1, 1]
def get_column_renders(self):
return [self.render,
lambda cr, rect: render_background(self, cr, rect),
lambda cr, rect: render_background(self, cr, rect),
lambda cr, rect: render_background(self, cr, rect),
]
def render(self, cr, rect):
# Draw select background.
if self.is_select or self.is_highlight:
draw_vlinear(cr, rect.x ,rect.y, rect.width, rect.height,
ui_theme.get_shadow_color("listview_select").get_color_info())
# Draw loading text.
draw_text(cr, "正在加载...",
rect.x + COLUMN_OFFSET * self.column_index,
rect.y,
rect.width, rect.height)
def unselect(self):
self.is_select = False
if self.redraw_request_callback:
self.redraw_request_callback(self)
def select(self):
self.is_select = True
if self.redraw_request_callback:
self.redraw_request_callback(self)
def highlight(self):
self.is_highlight = True
if self.redraw_request_callback:
self.redraw_request_callback(self)
def unhighlight(self):
self.is_highlight = False
if self.redraw_request_callback:
self.redraw_request_callback(self)
gobject.type_register(LoadingItem)
class EmptyItem(TreeItem):
'''
Loadding item.
'''
def __init__(self, column_index=0):
'''
Initialize EmptyItem class.
'''
TreeItem.__init__(self)
self.column_index = column_index
def get_height(self):
return ITEM_HEIGHT
def get_column_widths(self):
return [200, 1, 1]
def get_column_renders(self):
return [self.render,
lambda cr, rect: render_background(self, cr, rect),
lambda cr, rect: render_background(self, cr, rect),
lambda cr, rect: render_background(self, cr, rect)
]
def render(self, cr, rect):
# Draw select background.
if self.is_select or self.is_highlight:
draw_vlinear(cr, rect.x ,rect.y, rect.width, rect.height,
ui_theme.get_shadow_color("listview_select").get_color_info())
# Draw loading text.
draw_text(cr, "(空)",
rect.x + COLUMN_OFFSET * self.column_index,
rect.y,
rect.width, rect.height)
def unselect(self):
self.is_select = False
if self.redraw_request_callback:
self.redraw_request_callback(self)
def select(self):
self.is_select = True
if self.redraw_request_callback:
self.redraw_request_callback(self)
def release_resource(self):
'''
'''
return False
def highlight(self):
self.is_highlight = True
if self.redraw_request_callback:
self.redraw_request_callback(self)
def unhighlight(self):
self.is_highlight = False
if self.redraw_request_callback:
self.redraw_request_callback(self)
gobject.type_register(EmptyItem)
def get_dir_items(dir_path, column_index=0, show_hidden=False):
'''
Get children items with given directory path.
'''
items = []
for gfile in get_dir_child_files(dir_path, sort_file_by_name, False, show_hidden):
if is_directory(gfile):
items.append(DirItem(gfile, column_index))
else:
items.append(FileItem(gfile, column_index))
return items
```
#### File: dtk/ui/global_key.py
```python
from Xlib import X
from Xlib.display import Display
from keymap import parse_keyevent_name
from threading import Lock
import gtk
import gtk.gdk as gdk
import threading
import sys
import traceback
global_key_running = True
global_key_lock = Lock()
def enable_global_key():
'''
Enable global key.
'''
global global_key_running
global_key_lock.acquire()
global_key_running = True
global_key_lock.release()
def disable_global_key():
'''
Disable global key.
'''
global global_key_running
global_key_lock.acquire()
global_key_running = False
global_key_lock.release()
class GlobalKey(threading.Thread):
'''
Class to handle global key.
'''
def __init__(self):
'''
Initialize GlobalKey class.
'''
super(GlobalKey, self).__init__()
self.daemon = True
self.display = Display()
self.root = self.display.screen().root
self._binding_map = {}
self.stop = False
self.known_modifiers_mask = 0
gdk_modifiers = (gtk.gdk.CONTROL_MASK,
gtk.gdk.SHIFT_MASK,
gtk.gdk.MOD1_MASK,
gtk.gdk.MOD2_MASK,
gtk.gdk.MOD3_MASK,
gtk.gdk.MOD4_MASK,
gtk.gdk.MOD5_MASK,
gtk.gdk.SUPER_MASK,
gtk.gdk.HYPER_MASK,
)
for mod in gdk_modifiers:
self.known_modifiers_mask |= mod
def bind(self, binding_string, action):
'''
Binding keymap with given action.
@param binding_string: Keymap string, return by function `get_keyevent_name` of module dtk.ui.keymap.
@param action: Callback.
'''
# Get keybinding's keyval and modifiers.
return
keyval, modifiers = parse_keyevent_name(binding_string)
# Get key code.
keycode = gtk.gdk.keymap_get_default().get_entries_for_keyval(keyval)[0][0]
# Binding key.
self._binding_map[(keycode, modifiers)] = action
# Make keybinding can response even user enable Num-Lock key.
num_lock_modifiers = modifiers | gdk.MOD2_MASK
self._binding_map[(keycode, num_lock_modifiers)] = action
# Restart grab keybinding.
self.regrab()
def unbind(self, binding_string):
'''
Unbind keymap.
@param binding_string: Keymap string that return by function `get_keyevent_name` of module dtk.ui.keymap.
'''
# Get keybinding.
keyval, modifiers = parse_keyevent_name(binding_string)
# Get key code.
keycode = gtk.gdk.keymap_get_default().get_entries_for_keyval(keyval)[0][0]
# Get modifiers with Num-Lock mask.
num_lock_modifiers = modifiers | gdk.MOD2_MASK
# Remove keybinding from binding map.
regrab_flag = False
if self._binding_map.has_key((keycode, modifiers)):
del self._binding_map[(keycode, modifiers)]
regrab_flag = True
# Try remove key binding (with Num-Lock mask) from binding map.
if self._binding_map.has_key((keycode, num_lock_modifiers)):
del self._binding_map[(keycode, num_lock_modifiers)]
regrab_flag = True
if regrab_flag:
self.regrab()
def grab(self):
'''
Grab key.
'''
for (keycode, modifiers) in self._binding_map.keys():
try:
self.root.grab_key(keycode, int(modifiers), True, X.GrabModeAsync, X.GrabModeSync)
except Exception, e:
print "function grab got error: %s" % (e)
traceback.print_exc(file=sys.stdout)
def ungrab(self):
'''
Ungrab key.
'''
for (keycode, modifiers) in self._binding_map.keys():
try:
self.root.ungrab_key(keycode, modifiers, self.root)
except Exception, e:
print "function ungrab got error: %s" % (e)
traceback.print_exc(file=sys.stdout)
def regrab(self):
'''
Regrab key.
'''
self.ungrab()
self.grab()
def run(self):
'''
GlobalKey thread loop.
'''
global global_key_running
wait_for_release = False
while not self.stop:
event = self.display.next_event()
if global_key_running:
if event.type == X.KeyPress and not wait_for_release:
keycode = event.detail
modifiers = event.state & self.known_modifiers_mask
try:
action = self._binding_map[(keycode, modifiers)]
except KeyError:
self.display.allow_events(X.ReplayKeyboard, event.time)
else:
wait_for_release = True
self.display.allow_events(X.AsyncKeyboard, event.time)
self._upcoming_action = (keycode, modifiers, action)
elif event.type == X.KeyRelease and wait_for_release and event.detail == self._upcoming_action[0]:
wait_for_release = False
action = self._upcoming_action[2]
del self._upcoming_action
action()
self.display.allow_events(X.AsyncKeyboard, event.time)
else:
self.display.allow_events(X.ReplayKeyboard, event.time)
else:
self.display.allow_events(X.ReplayKeyboard, event.time)
def exit(self):
'''
Exit global key.
'''
self.stop = True
self.ungrab()
self.display.close()
if __name__ == "__main__":
gtk.gdk.threads_init()
def t(*args, **kwargs):
print 'Called!'
manager = GlobalKey()
# manager.bind('Ctrl + Alt + Shift + s', t)
manager.bind('Ctrl + Alt + S', t)
manager.start()
gtk.main()
```
#### File: dtk/ui/iconview.py
```python
import cairo
import math
from draw import draw_vlinear, draw_text
from keymap import get_keyevent_name
from skin_config import skin_config
from theme import ui_theme
from locales import _
import gc
import gobject
import gtk
from utils import (get_match_parent, cairo_state, get_event_coords,
is_left_button, is_double_click, is_right_button,
is_single_click, get_window_shadow_size, get_content_size)
class IconView(gtk.DrawingArea):
'''
Icon view.
@undocumented: realize_icon_view
@undocumented: button_release_scrolled_window
@undocumented: size_allocated_icon_view
@undocumented: expose_icon_view
@undocumented: motion_icon_view
@undocumented: icon_view_get_event_index
@undocumented: button_press_icon_view
@undocumented: button_release_icon_view
@undocumented: leave_icon_view
@undocumented: key_press_icon_view
@undocumented: key_release_icon_view
@undocumented: update_redraw_request_list
@undocumented: redraw_item
@undocumented: get_offset_coordinate
@undocumented: get_render_item_indexes
@undocumented: get_render_item_info
@undocumented: return_item
@undocumented: draw_background
@undocumented: draw_items
@undocumented: draw_row_mask
'''
__gsignals__ = {
"items-change" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"lost-focus-item" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"motion-notify-item" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, int, int)),
"motion-item" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, int, int)),
"highlight-item" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"normal-item" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"button-press-item" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, int, int)),
"button-release-item" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, int, int)),
"single-click-item" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, int, int)),
"double-click-item" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, int, int)),
"right-click-item" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, int, int)),
}
def __init__(self,
padding_x=0,
padding_y=0,
mask_bound_height=12,
):
'''
Initialize IconView class.
@param padding_x: Horizontal padding value.
@param padding_y: Vertical padding value.
@param mask_bound_height: The height of mask bound, default is 12 pixels.
'''
# Init.
gtk.DrawingArea.__init__(self)
self.padding_x = padding_x
self.padding_y = padding_y
self.mask_bound_height = mask_bound_height
self.add_events(gtk.gdk.ALL_EVENTS_MASK)
self.set_can_focus(True) # can focus to response key-press signal
self.items = []
self.focus_index = None
self.highlight_item = None
self.double_click_item = None
self.single_click_item = None
self.right_click_item = None
self.is_loading = False
# Signal.
self.connect("realize", self.realize_icon_view)
self.connect("realize", lambda w: self.grab_focus()) # focus key after realize
self.connect("size-allocate", self.size_allocated_icon_view)
self.connect("expose-event", self.expose_icon_view)
self.connect("motion-notify-event", self.motion_icon_view)
self.connect("button-press-event", self.button_press_icon_view)
self.connect("button-release-event", self.button_release_icon_view)
self.connect("leave-notify-event", self.leave_icon_view)
self.connect("key-press-event", self.key_press_icon_view)
# Add item singal.
self.connect("lost-focus-item", lambda view, item: item.icon_item_lost_focus())
self.connect("motion-notify-item", lambda view, item, x, y: item.icon_item_motion_notify(x, y))
self.connect("highlight-item", lambda view, item: item.icon_item_highlight())
self.connect("normal-item", lambda view, item: item.icon_item_normal())
self.connect("button-press-item", lambda view, item, x, y: item.icon_item_button_press(x, y))
self.connect("button-release-item", lambda view, item, x, y: item.icon_item_button_release(x, y))
self.connect("single-click-item", lambda view, item, x, y: item.icon_item_single_click(x, y))
self.connect("double-click-item", lambda view, item, x, y: item.icon_item_double_click(x, y))
# Redraw.
self.redraw_request_list = []
self.redraw_delay = 50 # 50 milliseconds should be enough for redraw
gtk.timeout_add(self.redraw_delay, self.update_redraw_request_list)
self.keymap = {
"Home" : self.select_first_item,
"End" : self.select_last_item,
"Return" : self.return_item,
"Up" : self.select_up_item,
"Down" : self.select_down_item,
"Left" : self.select_left_item,
"Right" : self.select_right_item,
"Page_Up" : self.scroll_page_up,
"Page_Down" : self.scroll_page_down,
}
def realize_icon_view(self, widget):
'''
Realize icon view.
'''
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
scrolled_window.connect("button-release-event", self.button_release_scrolled_window)
def button_release_scrolled_window(self, widget, event):
'''
Internal callback for `button-release-event` signal of scrolled window.
'''
# Get items information.
(item_width, item_height, columns, start_index, end_index) = self.get_render_item_info()
# Release item resource.
need_gc_collect = False
for item in self.items[0:start_index] + self.items[end_index:-1]:
if hasattr(item, "icon_item_release_resource") and item.icon_item_release_resource():
need_gc_collect = True
# Just do gc work when need collect.
if need_gc_collect:
gc.collect()
def select_first_item(self):
'''
Select first item.
'''
if len(self.items) > 0:
self.clear_focus_item()
self.focus_index = 0
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
# Scroll to top.
vadjust = get_match_parent(self, ["ScrolledWindow"]).get_vadjustment()
vadjust.set_value(vadjust.get_lower())
def select_last_item(self):
'''
Select last item.
'''
if len(self.items) > 0:
self.clear_focus_item()
self.focus_index = len(self.items) - 1
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
# Scroll to bottom.
vadjust = get_match_parent(self, ["ScrolledWindow"]).get_vadjustment()
vadjust.set_value(vadjust.get_upper() - vadjust.get_page_size())
def return_item(self):
'''
Do return action.
This function will emit `double-click-item` signal.
'''
if self.focus_index != None:
self.emit("double-click-item", self.items[self.focus_index], 0, 0)
def select_up_item(self):
'''
Select up row item.
'''
if len(self.items) > 0:
vadjust = get_match_parent(self, ["ScrolledWindow"]).get_vadjustment()
if self.focus_index == None:
self.focus_index = 0
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
# Scroll to top.
vadjust.set_value(vadjust.get_lower())
else:
item_width, item_height = self.items[0].get_width(), self.items[0].get_height()
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
columns = int((scrolled_window.allocation.width - self.padding_x * 2) / item_width)
if self.focus_index - columns >= 0:
self.emit("lost-focus-item", self.items[self.focus_index])
self.focus_index -= columns
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
# Scroll to item.
row = int(self.focus_index / columns)
if vadjust.get_value() - self.padding_y > row * item_height:
vadjust.set_value(vadjust.get_lower() + row * item_height + self.padding_y)
elif vadjust.get_value() - self.padding_y == row * item_height:
vadjust.set_value(vadjust.get_lower())
def select_down_item(self):
'''
Select next row item.
'''
if len(self.items) > 0:
vadjust = get_match_parent(self, ["ScrolledWindow"]).get_vadjustment()
if self.focus_index == None:
self.focus_index = 0
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
# Scroll to top.
vadjust.set_value(vadjust.get_lower())
else:
item_width, item_height = self.items[0].get_width(), self.items[0].get_height()
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
columns = int((scrolled_window.allocation.width - self.padding_x * 2) / item_width)
if self.focus_index + columns <= len(self.items) - 1:
self.emit("lost-focus-item", self.items[self.focus_index])
self.focus_index += columns
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
# Scroll to item.
row = int(self.focus_index / columns)
if vadjust.get_value() + vadjust.get_page_size() - self.padding_y < (row + 1) * item_height:
vadjust.set_value(vadjust.get_lower() + (row + 1) * item_height - vadjust.get_page_size() + self.padding_y)
elif vadjust.get_value() + vadjust.get_page_size() - self.padding_y == (row + 1) * item_height:
vadjust.set_value(vadjust.get_upper() - vadjust.get_page_size())
def select_left_item(self):
'''
Select left item.
'''
if len(self.items) > 0:
vadjust = get_match_parent(self, ["ScrolledWindow"]).get_vadjustment()
if self.focus_index == None:
self.focus_index = 0
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
# Scroll to top.
vadjust.set_value(vadjust.get_lower())
else:
item_width, item_height = self.items[0].get_width(), self.items[0].get_height()
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
columns = int((scrolled_window.allocation.width - self.padding_x * 2) / item_width)
row = int(self.focus_index / columns)
min_index = row * columns
if self.focus_index - 1 >= min_index:
self.emit("lost-focus-item", self.items[self.focus_index])
self.focus_index -= 1
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
def select_right_item(self):
'''
Select right item.
'''
if len(self.items) > 0:
vadjust = get_match_parent(self, ["ScrolledWindow"]).get_vadjustment()
if self.focus_index == None:
self.focus_index = 0
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
# Scroll to top.
vadjust.set_value(vadjust.get_lower())
else:
item_width, item_height = self.items[0].get_width(), self.items[0].get_height()
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
columns = int((scrolled_window.allocation.width - self.padding_x * 2) / item_width)
row = int(self.focus_index / columns)
max_index = min((row + 1) * columns - 1, len(self.items) - 1)
if self.focus_index + 1 <= max_index:
self.emit("lost-focus-item", self.items[self.focus_index])
self.focus_index += 1
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
def scroll_page_up(self):
'''
Scroll page up of iconview.
'''
if len(self.items) > 0:
vadjust = get_match_parent(self, ["ScrolledWindow"]).get_vadjustment()
if self.focus_index == None:
self.focus_index = 0
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
# Scroll to top.
vadjust.set_value(vadjust.get_lower())
else:
item_width, item_height = self.items[0].get_width(), self.items[0].get_height()
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
columns = int((scrolled_window.allocation.width - self.padding_x * 2) / item_width)
column = int(self.focus_index % columns)
if (vadjust.get_value() - vadjust.get_lower()) % item_height == 0:
row = int((vadjust.get_value() - vadjust.get_lower() - self.padding_y) / item_height) - 1
else:
row = int((vadjust.get_value() - vadjust.get_lower() - self.padding_y) / item_height)
if row * columns + column >= 0:
self.emit("lost-focus-item", self.items[self.focus_index])
self.focus_index = row * columns + column
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
else:
self.emit("lost-focus-item", self.items[self.focus_index])
self.focus_index = column
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
vadjust.set_value(max(0, vadjust.get_value() - vadjust.get_page_size() + self.padding_y))
def scroll_page_down(self):
'''
Scroll page down of iconview.
'''
if len(self.items):
vadjust = get_match_parent(self, ["ScrolledWindow"]).get_vadjustment()
if self.focus_index == None:
self.focus_index = len(self.items) - 1
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
# Scroll to top.
vadjust.set_value(vadjust.get_upper() - vadjust.get_page_size())
else:
item_width, item_height = self.items[0].get_width(), self.items[0].get_height()
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
columns = int((scrolled_window.allocation.width - self.padding_x * 2) / item_width)
column = int(self.focus_index % columns)
if (vadjust.get_value() - vadjust.get_lower() + vadjust.get_page_size()) % item_height == 0:
row = int((vadjust.get_value() - vadjust.get_lower() + vadjust.get_page_size() - self.padding_y) / item_height) - 1
else:
row = int((vadjust.get_value() - vadjust.get_lower() + vadjust.get_page_size() - self.padding_y) / item_height)
if row * columns + column <= len(self.items) - 1:
self.emit("lost-focus-item", self.items[self.focus_index])
self.focus_index = row * columns + column
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
else:
self.emit("lost-focus-item", self.items[self.focus_index])
self.focus_index = (row - 1) * columns + column
self.emit("motion-notify-item", self.items[self.focus_index], 0, 0)
vadjust.set_value(min(vadjust.get_upper() - vadjust.get_page_size(),
vadjust.get_value() + vadjust.get_page_size() - self.padding_y))
def set_items(self, items):
'''
Set items of IconView.
@param items: The items that need set.
'''
if items != self.items:
self.items = items
self.emit("items-change")
def add_items(self, items, insert_pos=None):
'''
Add items to iconview.
@param items: A list of item that follow the rule of IconItem.
@param insert_pos: Insert position, default is None to insert new item at B{end} position.
'''
if insert_pos == None:
self.set_items(self.items + items)
else:
self.set_items(self.items[0:insert_pos] + items + self.items[insert_pos::])
for item in items:
item.connect("redraw-request", self.redraw_item)
self.queue_draw()
def delete_items(self, items):
'''
Delete items.
@param items: Items need to remove.
'''
if len(items) > 0:
match_item = False
for item in items:
if item in self.items:
self.items.remove(item)
match_item = True
if match_item:
self.emit("items-change")
self.queue_draw()
def clear(self):
'''
Clear all items.
'''
self.set_items([])
self.queue_draw()
def size_allocated_icon_view(self, widget, rect):
# Cairo render surface.
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
try:
self.render_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, scrolled_window.allocation.width, scrolled_window.allocation.height)
self.render_surface_cr = gtk.gdk.CairoContext(cairo.Context(self.render_surface))
except Exception:
pass
def set_loading(self, is_loading):
'''
Set loading status of icon view.
@param is_loading: Set as True to make loading status active.
'''
self.is_loading = is_loading
self.queue_draw()
def expose_icon_view(self, widget, event):
'''
Internal callback for `expose-event` signal.
'''
# Update vadjustment.
self.update_vadjustment()
# Init.
cr = widget.window.cairo_create()
rect = widget.allocation
# Get offset.
(offset_x, offset_y, viewport) = self.get_offset_coordinate(widget)
# Draw background on widget cairo.
self.draw_background(widget, cr)
# Draw mask on widget cairo.
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
vadjust = scrolled_window.get_vadjustment()
vadjust_value = int(vadjust.get_value())
hadjust = scrolled_window.get_hadjustment()
hadjust_value = int(hadjust.get_value())
self.draw_mask(cr, hadjust_value, vadjust_value, viewport.allocation.width, viewport.allocation.height)
# We need clear render surface every time.
with cairo_state(self.render_surface_cr):
self.render_surface_cr.set_operator(cairo.OPERATOR_CLEAR)
self.render_surface_cr.paint()
if self.is_loading:
load_text = _("Loading...")
load_width, load_height = get_content_size(load_text)
draw_text(cr,
load_text,
rect.x + (rect.width - load_width) / 2,
rect.y + rect.height - load_height,
rect.width,
load_height)
# Draw items on surface cairo.
self.draw_items(self.render_surface_cr, rect)
# Draw bound mask.
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
width = scrolled_window.allocation.width
height = scrolled_window.allocation.height
vadjust_value = int(scrolled_window.get_vadjustment().get_value())
vadjust_upper = int(scrolled_window.get_vadjustment().get_upper())
vadjust_page_size = int(scrolled_window.get_vadjustment().get_page_size())
hadjust_value = int(scrolled_window.get_hadjustment().get_value())
if vadjust_value == 0:
with cairo_state(cr):
cr.rectangle(hadjust_value, vadjust_value, width, self.mask_bound_height)
cr.clip()
cr.set_source_surface(self.render_surface, hadjust_value, vadjust_value)
cr.paint()
elif self.mask_bound_height > 0:
i = 0
while (i <= self.mask_bound_height):
with cairo_state(cr):
cr.rectangle(hadjust_value, vadjust_value + i, width, 1)
cr.clip()
cr.set_source_surface(self.render_surface, hadjust_value, vadjust_value)
cr.paint_with_alpha(math.sin(i * math.pi / 2 / self.mask_bound_height))
i += 1
with cairo_state(cr):
cr.rectangle(hadjust_value, vadjust_value + self.mask_bound_height, width, height - self.mask_bound_height * 2)
cr.clip()
cr.set_source_surface(self.render_surface, hadjust_value, vadjust_value)
cr.paint()
if vadjust_value + vadjust_page_size == vadjust_upper:
with cairo_state(cr):
cr.rectangle(hadjust_value, vadjust_value + height - self.mask_bound_height, width, self.mask_bound_height)
cr.clip()
cr.set_source_surface(self.render_surface, hadjust_value, vadjust_value)
cr.paint()
elif self.mask_bound_height > 0:
i = 0
while (i < self.mask_bound_height):
with cairo_state(cr):
cr.rectangle(hadjust_value, vadjust_value + height - self.mask_bound_height + i, width, 1)
cr.clip()
cr.set_source_surface(self.render_surface, hadjust_value, vadjust_value)
cr.paint_with_alpha(1.0 - (math.sin(i * math.pi / 2 / self.mask_bound_height)))
i += 1
return False
def draw_background(self, widget, cr):
with cairo_state(cr):
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
(shadow_x, shadow_y) = get_window_shadow_size(self.get_toplevel())
(offset_x, offset_y) = self.translate_coordinates(self.get_toplevel(), 0, 0)
vadjust = scrolled_window.get_vadjustment()
vadjust_value = int(vadjust.get_value())
hadjust = scrolled_window.get_hadjustment()
hadjust_value = int(hadjust.get_value())
x = shadow_x - offset_x
y = shadow_y - offset_y
cr.rectangle(hadjust_value, vadjust_value, scrolled_window.allocation.width, scrolled_window.allocation.height)
cr.clip()
cr.translate(x, y)
skin_config.render_background(cr, widget, 0, 0)
def draw_mask(self, cr, x, y, w, h):
'''
Draw mask interface.
@param cr: Cairo context.
@param x: X coordiante of draw area.
@param y: Y coordiante of draw area.
@param w: Width of draw area.
@param h: Height of draw area.
'''
draw_vlinear(cr, x, y, w, h,
ui_theme.get_shadow_color("linear_background").get_color_info()
)
def draw_items(self, cr, rect):
# Draw items.
if len(self.items) > 0:
with cairo_state(cr):
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
vadjust_value = int(scrolled_window.get_vadjustment().get_value())
hadjust_value = int(scrolled_window.get_hadjustment().get_value())
# Draw on drawing area.
(item_width, item_height, columns, start_index, end_index) = self.get_render_item_info()
for (index, item) in enumerate(self.items[start_index:end_index]):
row = int((start_index + index) / columns)
column = (start_index + index) % columns
render_x = self.padding_x + column * item_width - hadjust_value
render_y = self.padding_y + row * item_height - vadjust_value
# Draw row background.
self.draw_row_mask(cr, gtk.gdk.Rectangle(render_x, render_y, rect.width, item_height), row)
item.row_index = row
with cairo_state(cr):
# Don't allow draw out of item area.
cr.rectangle(render_x, render_y, item_width, item_height)
cr.clip()
item.render(cr, gtk.gdk.Rectangle(render_x, render_y, item_width, item_height))
def draw_row_mask(self, cr, rect, row):
pass
def get_render_item_info(self):
'''
Internal function to get information of render items.
'''
# Get offset.
(offset_x, offset_y, viewport) = self.get_offset_coordinate(self)
# Get item size.
item_width = 1
item_height = 1
if len(self.items):
item_width, item_height = self.items[0].get_width(), self.items[0].get_height()
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
columns = int((scrolled_window.allocation.width - self.padding_x * 2) / item_width)
# Get viewport index.
start_y = offset_y - self.padding_y
start_row = max(int(start_y / item_height), 0)
start_index = start_row * columns
end_y = offset_y - self.padding_y + viewport.allocation.height
if end_y % item_height == 0:
end_row = end_y / item_height - 1
else:
end_row = end_y / item_height
end_index = min((end_row + 1) * columns, len(self.items))
return (item_width, item_height, columns, start_index, end_index)
def clear_focus_item(self):
'''
Clear item's focus status.
'''
if self.focus_index != None:
if 0 <= self.focus_index < len(self.items):
self.emit("lost-focus-item", self.items[self.focus_index])
self.focus_index = None
def motion_icon_view(self, widget, event):
'''
Internal callback for `motion-notify-event` signal.
'''
if len(self.items) > 0:
index_info = self.icon_view_get_event_index(event)
if index_info == None:
self.clear_focus_item()
else:
(row_index, column_index, item_index, offset_x, offset_y) = index_info
# Don't clear focus item when motion index is current one.
'''
TODO: it need to consider about self.focus_index == None
otherwisese it acts like lian lian kan
'''
if self.focus_index != item_index:
self.clear_focus_item()
self.focus_index = item_index
'''
TODO: get rid of list index out of range when self.focus_index < 0
'''
if self.focus_index >= 0:
self.emit("motion-notify-item", self.items[self.focus_index], offset_x - self.padding_x, offset_y - self.padding_y)
self.emit("motion-item",
self.items[self.focus_index],
event.x_root - (offset_x - self.padding_x),
event.y_root - (offset_y - self.padding_y))
def icon_view_get_event_index(self, event):
'''
Internal function to get item index at event coordinate..
'''
if len(self.items) > 0:
(event_x, event_y) = get_event_coords(event)
item_width, item_height = self.items[0].get_width(), self.items[0].get_height()
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
columns = int((scrolled_window.allocation.width - self.padding_x * 2) / item_width)
if columns == 0:
return None
if len(self.items) % max(columns, 1) == 0:
rows = int(len(self.items) / columns)
else:
rows = int(len(self.items) / columns) + 1
if event_x > columns * item_width + self.padding_x:
return None
elif event_y > rows * item_height + self.padding_y:
return None
else:
'''
TODO: total_width % item_width is item count in the row, but when padding_x reduce the total_width,
event_x need to -self.padding_x
'''
padding_event_x = event_x - self.padding_x
padding_event_y = event_y - self.padding_y
if padding_event_x % item_width == 0:
column_index = max(padding_event_x / item_width - 1, 0)
else:
column_index = min(padding_event_x / item_width, columns - 1)
if padding_event_y % item_height == 0:
row_index = max(padding_event_y / item_height - 1, 0)
else:
row_index = min(padding_event_y / item_height, rows - 1)
item_index = row_index * columns + column_index
if item_index > len(self.items) - 1:
return None
else:
'''
TODO: it need to use event_x NOT padding_event_x return the item pos_x
'''
return (row_index, column_index, item_index,
event_x - column_index * item_width,
event_y - row_index * item_height)
def button_press_icon_view(self, widget, event):
'''
Internal callback for `button-press-event` signal.
'''
# Grab focus when button press, otherwise key-press signal can't response.
self.grab_focus()
if len(self.items) > 0:
index_info = self.icon_view_get_event_index(event)
if index_info:
(row_index, column_index, item_index, offset_x, offset_y) = index_info
if is_left_button(event):
self.emit("button-press-item", self.items[item_index], offset_x - self.padding_x, offset_y - self.padding_y)
if is_double_click(event):
if index_info:
self.double_click_item = index_info[2]
else:
self.double_click_item = None
elif is_single_click(event):
if index_info:
self.single_click_item = index_info[2]
else:
self.single_click_item = None
elif is_right_button(event):
if index_info:
self.right_click_item = index_info[2]
else:
self.right_click_item = None
# Set highlight.
if index_info:
self.clear_highlight()
self.set_highlight(self.items[index_info[2]])
def set_highlight(self, item):
'''
Set highlight status with given item.
@param item: Item need highlight.
'''
self.highlight_item = item
self.emit("highlight-item", self.highlight_item)
def clear_highlight(self):
'''
Clear all highlight status.
'''
if self.highlight_item != None:
self.emit("normal-item", self.highlight_item)
self.highlight_item = None
def button_release_icon_view(self, widget, event):
'''
Internal callback for `button-release-event` signal.
'''
if len(self.items) > 0:
index_info = self.icon_view_get_event_index(event)
if index_info:
(row_index, column_index, item_index, offset_x, offset_y) = index_info
if is_left_button(event):
self.emit("button-release-item", self.items[item_index], offset_x - self.padding_x, offset_y - self.padding_y)
if self.double_click_item == item_index:
self.emit("double-click-item", self.items[self.double_click_item], offset_x - self.padding_x, offset_y - self.padding_y)
elif self.single_click_item == item_index:
self.emit("single-click-item", self.items[self.single_click_item], offset_x - self.padding_x, offset_y - self.padding_y)
elif is_right_button(event):
if self.right_click_item == item_index:
self.emit("right-click-item", self.items[self.right_click_item], event.x_root, event.y_root)
self.double_click_item = None
self.single_click_item = None
self.right_click_item = None
def leave_icon_view(self, widget, event):
'''
Internal callback for `leave-notify` signal.
'''
self.clear_focus_item()
def key_press_icon_view(self, widget, event):
'''
Internal callback for `key-press-event` signal.
'''
key_name = get_keyevent_name(event)
if self.keymap.has_key(key_name):
self.keymap[key_name]()
return True
def key_release_icon_view(self, widget, event):
'''
Internal callback for `key-release-event` signal.
'''
pass
def update_redraw_request_list(self):
'''
Internal function to update redraw request list.
'''
# Redraw when request list is not empty.
if len(self.redraw_request_list) > 0:
# Get offset.
(offset_x, offset_y, viewport) = self.get_offset_coordinate(self)
# Get viewport index.
item_width, item_height = self.items[0].get_width(), self.items[0].get_height()
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
columns = int((scrolled_window.allocation.width - self.padding_x * 2) / item_width)
start_y = offset_y - self.padding_y
start_row = max(int(start_y / item_height), 0)
start_index = start_row * columns
end_y = offset_y - self.padding_y + viewport.allocation.height
if end_y % item_height == 0:
end_row = end_y / item_height - 1
else:
end_row = end_y / item_height
end_index = min((end_row + 1) * columns, len(self.items))
# Redraw whole viewport area once found any request item in viewport.
for item in self.redraw_request_list:
if item in self.items[start_index:end_index]:
self.queue_draw()
break
# Clear redraw request list.
self.redraw_request_list = []
return True
def redraw_item(self, list_item):
'''
Internal function to redraw item.
'''
self.redraw_request_list.append(list_item)
def get_offset_coordinate(self, widget):
'''
Internal function to get offset coordinate.
'''
# Init.
rect = widget.allocation
# Get coordinate.
viewport = get_match_parent(widget, "Viewport")
if viewport:
coordinate = widget.translate_coordinates(viewport, rect.x, rect.y)
if len(coordinate) == 2:
(offset_x, offset_y) = coordinate
return (-offset_x, -offset_y, viewport)
else:
return (0, 0, viewport)
else:
return (0, 0, viewport)
def update_vadjustment(self):
'''
Update vertical adjustment.
'''
scrolled_window = get_match_parent(self, ["ScrolledWindow"])
if len(self.items) > 0:
item_width, item_height = self.items[0].get_width(), self.items[0].get_height()
columns = int((scrolled_window.allocation.width - self.padding_x * 2) / item_width)
if columns > 0:
if len(self.items) % columns == 0:
view_height = int(len(self.items) / columns) * item_height
else:
view_height = (int(len(self.items) / columns) + 1) * item_height
self.set_size_request(columns * item_width + self.padding_x * 2,
view_height + self.padding_y * 2)
if scrolled_window != None:
vadjust = scrolled_window.get_vadjustment()
vadjust.set_upper(max(view_height + self.padding_y * 2,
scrolled_window.allocation.height))
else:
self.set_size_request(scrolled_window.allocation.width,
scrolled_window.allocation.height)
vadjust = scrolled_window.get_vadjustment()
vadjust.set_upper(scrolled_window.allocation.height)
gobject.type_register(IconView)
class IconItem(gobject.GObject):
'''
Icon item.
'''
__gsignals__ = {
"redraw-request" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self):
'''
Initialize ItemIcon class.
'''
gobject.GObject.__init__(self)
self.hover_flag = False
self.highlight_flag = False
def emit_redraw_request(self):
'''
Emit `redraw-request` signal.
This is IconView interface, you should implement it.
'''
self.emit("redraw-request")
def get_width(self):
'''
Get item width.
This is IconView interface, you should implement it.
'''
pass
def get_height(self):
'''
Get item height.
This is IconView interface, you should implement it.
'''
pass
def render(self, cr, rect):
'''
Render item.
This is IconView interface, you should implement it.
'''
pass
def icon_item_motion_notify(self, x, y):
'''
Handle `motion-notify-event` signal.
This is IconView interface, you should implement it.
'''
self.hover_flag = True
self.emit_redraw_request()
def icon_item_lost_focus(self):
'''
Lost focus.
This is IconView interface, you should implement it.
'''
self.hover_flag = False
self.emit_redraw_request()
def icon_item_highlight(self):
'''
Highlight item.
This is IconView interface, you should implement it.
'''
self.highlight_flag = True
self.emit_redraw_request()
def icon_item_normal(self):
'''
Set item with normal status.
This is IconView interface, you should implement it.
'''
self.highlight_flag = False
self.emit_redraw_request()
def icon_item_button_press(self, x, y):
'''
Handle button-press event.
This is IconView interface, you should implement it.
'''
pass
def icon_item_button_release(self, x, y):
'''
Handle button-release event.
This is IconView interface, you should implement it.
'''
pass
def icon_item_single_click(self, x, y):
'''
Handle single click event.
This is IconView interface, you should implement it.
'''
pass
def icon_item_double_click(self, x, y):
'''
Handle double click event.
This is IconView interface, you should implement it.
'''
pass
def icon_item_release_resource(self):
'''
Release item resource.
If you have pixbuf in item, you should release memory resource like below code:
>>> if self.pixbuf:
>>> del self.pixbuf
>>> self.pixbuf = None
>>>
>>> return True
This is IconView interface, you should implement it.
@return: Return True if do release work, otherwise return False.
When this function return True, IconView will call function gc.collect() to release object to release memory.
'''
return False
gobject.type_register(IconItem)
```
#### File: dtk/ui/mplayer_view.py
```python
import gobject
import gtk
class MplayerView(gtk.DrawingArea):
'''
View to offer a drawing area for mplayer.
MplayerView default disable double buffered to avoid video blinking when mplayer draw on it.
@undocumented: realize_mplayer_view
'''
__gsignals__ = {
"get-xid" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (long,))
}
def __init__(self):
'''
Initialize MplayerView class.
'''
# Init.
gtk.DrawingArea.__init__(self)
self.unset_flags(gtk.DOUBLE_BUFFERED) # disable double buffered to avoid video blinking
# Handle signal.
self.connect("realize", self.realize_mplayer_view)
def realize_mplayer_view(self, widget):
'''
Internal callback for `realize` signal.
'''
if self.get_window() and self.get_window().xid:
self.emit("get-xid", self.get_window().xid)
gobject.type_register(MplayerView)
```
#### File: dtk/ui/paned.py
```python
from draw import draw_pixbuf
from utils import is_in_rect, color_hex_to_cairo
from constant import PANED_HANDLE_SIZE
import gobject
import gtk
import math
from theme import ui_theme
# Load customize rc style before any other.
gtk.rc_parse_string("style 'my_style' {\n GtkPaned::handle-size = %s\n }\nwidget '*' style 'my_style'" % (PANED_HANDLE_SIZE))
class Paned(gtk.Paned):
'''
Paned.
@undocumented: do_enter_notify_event
@undocumented: do_button_press_event
@undocumented: do_size_allocate
@undocumented: do_enter_notify_event
@undocumented: is_in_button
@undocumented: draw_handle
@undocumented: do_expose_event
gtk.Paned with custom better apperance.
'''
def __init__(self,
shrink_first,
enable_animation=False,
always_show_button=False,
enable_drag=False,
handle_color=ui_theme.get_color("paned_line")
):
'''
Initialize Paned class.
'''
gtk.Paned.__init__(self)
self.shrink_first = shrink_first
self.enable_animation = enable_animation
self.always_show_button = always_show_button
self.enable_drag = enable_drag
self.handle_color = handle_color
self.bheight = ui_theme.get_pixbuf("paned/paned_up_normal.png").get_pixbuf().get_width()
self.saved_position = -1
self.handle_size = PANED_HANDLE_SIZE - 1
self.show_button = False
self.init_button("normal")
self.animation_delay = 20 # milliseconds
self.animation_times = 10
self.animation_position_frames = []
self.press_coordinate = None
def init_button(self, status):
if self.get_orientation() == gtk.ORIENTATION_HORIZONTAL:
if self.shrink_first:
self.button_pixbuf = ui_theme.get_pixbuf("paned/paned_left_%s.png" % status).get_pixbuf()
else:
self.button_pixbuf = ui_theme.get_pixbuf("paned/paned_right_%s.png" % status).get_pixbuf()
else:
if self.shrink_first:
self.button_pixbuf = ui_theme.get_pixbuf("paned/paned_up_%s.png" % status).get_pixbuf()
else:
self.button_pixbuf = ui_theme.get_pixbuf("paned/paned_down_%s.png" % status).get_pixbuf()
def do_expose_event(self, e):
'''
To intercept the default expose event and draw custom handle
after the **gtk.Container** expose evetn.
So the gtk.Paned's expose event callback is ignore.
'''
gtk.Container.do_expose_event(self, e)
self.draw_handle(e)
return False
def draw_handle(self, e):
'''
Draw the cusom handle apperance.
'''
handle = self.get_handle_window()
line_width = 1
cr = handle.cairo_create()
cr.set_source_rgb(*color_hex_to_cairo(self.handle_color.get_color()))
(width, height) = handle.get_size()
if self.get_orientation() == gtk.ORIENTATION_HORIZONTAL:
if self.shrink_first:
if self.get_position() != 0:
cr.rectangle(0, 0, line_width, height)
cr.fill()
if self.always_show_button or self.show_button:
if self.get_position() == 0:
pixbuf = ui_theme.get_pixbuf("paned/paned_right_normal.png").get_pixbuf()
else:
pixbuf = ui_theme.get_pixbuf("paned/paned_left_normal.png").get_pixbuf()
draw_pixbuf(cr,
pixbuf,
0,
(height - self.bheight) / 2)
else:
cr.rectangle(width - line_width, 0, line_width, height)
cr.fill()
if self.always_show_button or self.show_button:
if self.get_position() == 0:
pixbuf = ui_theme.get_pixbuf("paned/paned_left_normal.png").get_pixbuf()
else:
pixbuf = ui_theme.get_pixbuf("paned/paned_right_normal.png").get_pixbuf()
draw_pixbuf(cr,
pixbuf,
0,
(height - self.bheight) / 2)
else:
if self.shrink_first:
cr.rectangle(0, 0, width, line_width)
cr.fill()
if self.always_show_button or self.show_button:
if self.get_position() == 0:
pixbuf = ui_theme.get_pixbuf("paned/paned_down_normal.png").get_pixbuf()
else:
pixbuf = ui_theme.get_pixbuf("paned/paned_up_normal.png").get_pixbuf()
draw_pixbuf(cr,
pixbuf,
(width - self.bheight) / 2,
0)
else:
cr.rectangle(0, height - line_width, width, line_width)
cr.fill()
if self.always_show_button or self.show_button:
if self.get_position() == 0:
pixbuf = ui_theme.get_pixbuf("paned/paned_up_normal.png").get_pixbuf()
else:
pixbuf = ui_theme.get_pixbuf("paned/paned_down_normal.png").get_pixbuf()
draw_pixbuf(cr,
pixbuf,
(width - self.bheight) / 2,
0)
def is_in_button(self, x, y):
'''
Detection of wheter the mouse pointer is in the handler's button.
'''
handle = self.get_handle_window()
(width, height) = handle.get_size()
if self.get_orientation() == gtk.ORIENTATION_HORIZONTAL:
rect = (0, (height - self.bheight) / 2, width, self.bheight)
else:
rect = ((width - self.bheight) / 2, 0, self.bheight, height)
return is_in_rect((x, y), rect)
def do_enter_notify_event(self, e):
self.show_button = True
self.queue_draw()
def do_leave_notify_event(self, e):
self.show_button = False
self.init_button("normal")
self.queue_draw()
def do_motion_notify_event(self, e):
'''
change the cursor style when move in handler
'''
# Reset press coordinate if motion mouse after press event.
self.press_coordinate = None
handle = self.get_handle_window()
(width, height) = handle.get_size()
if self.is_in_button(e.x, e.y):
handle.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))
self.init_button("hover")
else:
if self.enable_drag:
handle.set_cursor(self.cursor_type)
gtk.Paned.do_motion_notify_event(self, e)
else:
handle.set_cursor(None)
self.init_button("normal")
def do_button_press_event(self, e):
'''
when press the handler's button change the position.
'''
handle = self.get_handle_window()
if e.window == handle:
if self.is_in_button(e.x, e.y):
self.init_button("press")
self.do_press_actoin()
else:
(width, height) = handle.get_size()
if is_in_rect((e.x, e.y), (0, 0, width, height)):
self.press_coordinate = (e.x, e.y)
gtk.Paned.do_button_press_event(self, e)
else:
gtk.Paned.do_button_press_event(self, e)
return True
def do_button_release_event(self, e):
'''
docs
'''
gtk.Paned.do_button_release_event(self, e)
# Do press event if not in button and finish `click` event.
if (not self.is_in_button(e.x, e.y)) and self.press_coordinate == (e.x, e.y):
self.do_press_actoin()
return True
def do_press_actoin(self):
'''
docs
'''
if self.saved_position == -1:
self.saved_position = self.get_position()
if self.shrink_first:
self.change_position(0)
else:
if self.get_orientation() == gtk.ORIENTATION_HORIZONTAL:
self.change_position(self.allocation.width)
else:
self.change_position(self.allocation.height)
else:
self.change_position(self.saved_position)
self.saved_position = -1
def change_position(self, new_position):
current_position = self.get_position()
if self.enable_animation:
if new_position != current_position:
for i in range(0, self.animation_times + 1):
step = int(math.sin(math.pi * i / 2 / self.animation_times) * (new_position - current_position))
self.animation_position_frames.append(current_position + step)
if self.animation_position_frames[-1] != new_position:
self.animation_position_frames.append(new_position)
gtk.timeout_add(self.animation_delay, self.update_position)
else:
self.set_position(new_position)
def update_position(self):
self.set_position(self.animation_position_frames.pop(0))
if self.animation_position_frames == []:
return False
else:
return True
def do_size_allocate(self, e):
gtk.Paned.do_size_allocate(self, e)
if self.shrink_first:
child = self.get_child2()
else:
child = self.get_child1()
if child == None: return
rect = child.allocation
offset = self.handle_size
if self.get_orientation() == gtk.ORIENTATION_HORIZONTAL:
if self.shrink_first:
rect.x -= offset
rect.width += offset
else:
rect.width += offset
else:
if self.shrink_first:
rect.y -= offset
rect.height += offset
else:
rect.height += offset
child.size_allocate(rect)
class HPaned(Paned):
def __init__(self,
shrink_first=True,
enable_animation=False,
always_show_button=False,
enable_drag=False,
handle_color=ui_theme.get_color("paned_line")
):
Paned.__init__(self, shrink_first, enable_animation, always_show_button, enable_drag, handle_color)
self.set_orientation(gtk.ORIENTATION_HORIZONTAL)
self.cursor_type = gtk.gdk.Cursor(gtk.gdk.SB_H_DOUBLE_ARROW)
class VPaned(Paned):
def __init__(self,
shrink_first=True,
enable_animation=False,
always_show_button=False,
enable_drag=False,
handle_color=ui_theme.get_color("paned_line")
):
Paned.__init__(self, shrink_first, enable_animation, always_show_button, enable_drag, handle_color)
self.set_orientation(gtk.ORIENTATION_VERTICAL)
self.cursor_type = gtk.gdk.Cursor(gtk.gdk.SB_V_DOUBLE_ARROW)
gobject.type_register(Paned)
gobject.type_register(HPaned)
gobject.type_register(VPaned)
if __name__ == '__main__':
w = gtk.Window()
w.set_size_request(700, 400)
#w.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('yellow'))
box = gtk.VBox()
p = VPaned()
c1 = gtk.Button("11111111111111111111111")
c1.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('blue'))
c2 = gtk.Button("122222222222222222222222")
c1.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('red'))
p.add1(c1)
p.add2(c2)
box.pack_start(p)
p = HPaned()
c1 = gtk.Button("11111111111111111111111")
c1.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('blue'))
c2 = gtk.Button("122222222222222222222222")
c1.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('red'))
p.add1(c1)
p.add2(c2)
box.pack_start(p)
w.add(box)
w.connect('destroy', gtk.main_quit)
w.show_all()
gtk.main()
```
#### File: dtk/ui/scrolled_window.py
```python
from gtk import gdk
from theme import ui_theme
from utils import remove_signal_id, color_hex_to_cairo
import gobject
import gtk
__all__ = ['ScrolledWindow']
# the p_range is the virtual width/height, it's value is smaller than
# the allocation.width/height when scrollbar's width/height smaller than
# the minimum scrollbar length.
# p_range = allocation.width/height - (min_bar_len - *bar_len*)
# the *bar_len* = (adj.page_size / adj.upper) * allocation.width/height
# by this processing, 0~(adj.upper-adj.page_size) will be mapped to 0~p_range.
def value2pos(value, p_range, upper):
'''
Compute the scrollbar position by the adjustment value.
'''
if upper == 0: return 0
return p_range * float(value) / upper
def pos2value(pos, p_range, upper):
'''
Compute the adjustment value by the scrollbar position.
'''
if p_range == 0 : return 0
return pos * upper / p_range
class ScrolledWindow(gtk.Bin):
'''
The scrolled window with deepin's custom scrollbar.
@undocumented: do_enter_notify_event
@undocumented: _test_calc
@undocumented: do_remove
@undocumented: do_unmap
@undocumented: do_map
@undocumented: set_policy
@undocumented: set_shadow_type
@undocumented: do_realize
@undocumented: do_size_request
@undocumented: do_unrealize
@undocumented: update_scrollbar
@undocumented: do_size_allocate
@undocumented: do_add
@undocumented: hadjustment_changed
@undocumented: vadjustment_changed
@undocumented: calc_hbar_allocation
@undocumented: calc_hbar_length
@undocumented: calc_vbar_allocation
@undocumented: calc_vbar_length
@undocumented: do_motion_notify_event
@undocumented: do_leave_notify_event
@undocumented: do_scroll_event
@undocumented: make_bar_bigger
@undocumented: make_bar_smaller
@undocumented: do_button_release_event
@undocumented: draw_vbar
@undocumented: draw_hbar
@undocumented: do_expose_event
'''
__gsignals__ = {
"vscrollbar-state-changed":(gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,(gobject.TYPE_STRING,))
}
def __init__(self,
right_space=2,
top_bottom_space=3,
):
'''
Init scrolled window.
@param right_space: the space between right border and the vertical scrollbar.
@param top_bottom_space: the space between top border and the vertical scrollbar.
'''
gtk.Bin.__init__(self)
self.bar_min_length = 50 #scrollbar smallest height
self.bar_small_width = 7
self.bar_width = 14 #normal scrollbar width
self.bar_background = ui_theme.get_color("scrolledbar")
self.right_space = right_space
self.top_bottom_space = top_bottom_space
self.h_value_change_id = None
self.h_change_id = None
self.v_value_change_id = None
self.v_change_id = None
self.vscrollbar_state = None
class Record():
def __init__(self):
self.bar_len = 0 # scrollbar length
self.last_pos = 0 # last mouse motion pointer's position (x or y)
# Last mouse motion times-tamp, if user moved the window
# then the last_pos is likely become invalid so we need "last_time"
# to deal with this situation.
self.last_time = 0
self.virtual_len = 0 # the virtual window height or width length
self.bar_pos = 0 # the scrollbar top-corner/left-corner position
self.is_inside = False # is pointer in the scrollbar region?
self.in_motion = False # is user is dragging scrollbar?
self.policy = gtk.POLICY_AUTOMATIC
self.need_update_region = False # update gdk.Window's shape_region when need
self._horizaontal = Record()
self._vertical = Record()
self.set_can_focus(True)
self.vallocation = gdk.Rectangle()
self.hallocation = gdk.Rectangle()
self.set_vadjustment(gtk.Adjustment())
self.set_hadjustment(gtk.Adjustment())
self.set_has_window(False)
def do_expose_event(self, e):
if e.window == self.vwindow:
self.draw_vbar()
return True
elif e.window == self.hwindow:
self.draw_hbar()
return True
else:
return False
def draw_vbar(self):
cr = self.vwindow.cairo_create()
cr.set_source_rgb(*color_hex_to_cairo(self.bar_background.get_color()))
cr.rectangle(0, 0, self.vallocation.width, self.vallocation.height)
cr.fill()
def draw_hbar(self):
cr = self.hwindow.cairo_create()
cr.set_source_rgb(*color_hex_to_cairo(self.bar_background.get_color()))
cr.rectangle(0, 0, self.hallocation.width, self.hallocation.height)
cr.fill()
def do_button_press_event(self, e):
if 0 <= e.x - self.vallocation.x <= self.bar_width:
# Button press on vadjustment.
press_pos = e.y - self.vadjustment.value
value = pos2value(press_pos - self.vallocation.height / 2, self._vertical.virtual_len, self.vadjustment.upper)
value = max(0, min(value, self.vadjustment.upper - self.vadjustment.page_size))
if press_pos < self.vallocation.y:
if self.vadjustment.value - value > self.vadjustment.page_size:
self.vadjustment.set_value(max(self.vadjustment.value - self.vadjustment.page_size,
0))
else:
self.vadjustment.set_value(value)
return True
elif press_pos > self.vallocation.y + self.vallocation.height:
if value - self.vadjustment.value > self.vadjustment.page_size:
self.vadjustment.set_value(min(self.vadjustment.value + self.vadjustment.page_size,
self.vadjustment.upper - self.vadjustment.page_size))
else:
self.vadjustment.set_value(value)
return True
else:
return False
elif 0 <= e.y - self.hallocation.y <= self.bar_width:
# Button press on hadjustment.
press_pos = e.x - self.hadjustment.value
value = pos2value(press_pos - self.hallocation.width / 2, self._horizaontal.virtual_len, self.hadjustment.upper)
value = max(0, min(value, self.hadjustment.upper - self.hadjustment.page_size))
if press_pos < self.hallocation.x:
if self.hadjustment.value - value > self.hadjustment.page_size:
self.hadjustment.set_value(max(self.hadjustment.value - self.hadjustment.page_size,
0))
else:
self.hadjustment.set_value(value)
return True
elif press_pos > self.hallocation.x + self.hallocation.width:
if value - self.hadjustment.value > self.hadjustment.page_size:
self.hadjustment.set_value(min(self.hadjustment.value + self.hadjustment.page_size,
self.hadjustment.upper - self.hadjustment.page_size))
else:
self.hadjustment.set_value(value)
return True
else:
return False
else:
return False
def do_button_release_event(self, e):
if e.window == self.hwindow:
self._horizaontal.in_motion = False
if not self._horizaontal.is_inside:
self.make_bar_smaller(gtk.ORIENTATION_HORIZONTAL)
return True
elif e.window == self.vwindow:
self._vertical.in_motion = False
if not self._vertical.is_inside:
self.make_bar_smaller(gtk.ORIENTATION_VERTICAL)
return True
else:
return False
def make_bar_smaller(self, orientation):
if orientation == gtk.ORIENTATION_HORIZONTAL:
bar_len = self._horizaontal.bar_len
if bar_len == 0:
self._horizaontal.need_update_region = True
return
region = gdk.region_rectangle(gdk.Rectangle(0, 0, int(bar_len), self.bar_small_width))
if self.hallocation.x == 0:
self.hwindow.shape_combine_region(region, self.top_bottom_space, self.bar_width - self.bar_small_width -self.right_space)
else:
self.hwindow.shape_combine_region(region, -self.top_bottom_space, self.bar_width - self.bar_small_width -self.right_space)
elif orientation == gtk.ORIENTATION_VERTICAL:
bar_len = self._vertical.bar_len
if bar_len == 0:
self._vertical.need_update_region = True
return
region = gdk.region_rectangle(gdk.Rectangle(0, 0, self.bar_small_width, int(bar_len)))
if self.vallocation.y == 0:
self.vwindow.shape_combine_region(region, self.bar_width-self.bar_small_width - self.right_space, self.top_bottom_space)
else:
self.vwindow.shape_combine_region(region, self.bar_width-self.bar_small_width - self.right_space, -self.top_bottom_space)
else:
raise "make_bar_smaller's orientation must be gtk.ORIENTATION_VERTICAL or gtk.ORIENTATION_HORIZONTAL"
return False
def make_bar_bigger(self, orientation):
if orientation == gtk.ORIENTATION_HORIZONTAL:
region = gdk.region_rectangle(gdk.Rectangle(0, 0, int(self._horizaontal.bar_len), self.bar_width))
if self.hallocation.x == 0:
self.hwindow.shape_combine_region(region, self.top_bottom_space, -self.right_space)
else:
self.hwindow.shape_combine_region(region, -self.top_bottom_space, -self.right_space)
elif orientation == gtk.ORIENTATION_VERTICAL:
region = gdk.region_rectangle(gdk.Rectangle(0, 0, self.bar_width, int(self._vertical.bar_len)))
if self.vallocation.y == 0:
self.vwindow.shape_combine_region(region, -self.right_space, self.top_bottom_space)
else:
self.vwindow.shape_combine_region(region, -self.right_space, -self.top_bottom_space)
else:
raise "make_bar_bigger's orientation must be gtk.ORIENTATION_VERTICAL or gtk.ORIENTATION_HORIZONTAL"
def do_scroll_event(self, e):
value = self.vadjustment.value
step = self.vadjustment.step_increment
upper = self.vadjustment.upper
page_size = self.vadjustment.page_size
# Emit signal 'vscrollbar_state_changed'.
self.emit_vscrollbar_state_changed(e)
if e.direction == gdk.SCROLL_DOWN:
self.vadjustment.set_value(min(upper-page_size-1, value+step))
elif e.direction == gdk.SCROLL_UP:
self.vadjustment.set_value(max(0, value-step))
# WARNING: We need always return False here, otherwise nesting scrolled window can't work correctly.
return False
def do_leave_notify_event(self, e):
if e.window == self.hwindow :
self._horizaontal.is_inside = False
if not self._horizaontal.in_motion:
self.make_bar_smaller(gtk.ORIENTATION_HORIZONTAL)
return True
elif e.window == self.vwindow:
self._vertical.is_inside = False
if not self._vertical.in_motion:
self.make_bar_smaller(gtk.ORIENTATION_VERTICAL)
return True
else:
return False
def do_enter_notify_event(self, e):
if e.window == self.hwindow:
self.make_bar_bigger(gtk.ORIENTATION_HORIZONTAL)
self._horizaontal.is_inside = True
return True
elif e.window == self.vwindow:
self.make_bar_bigger(gtk.ORIENTATION_VERTICAL)
self._vertical.is_inside = True
return True
else:
return False
def do_visibility_notify_event(self, e):
self.make_bar_smaller(gtk.ORIENTATION_HORIZONTAL)
self.make_bar_smaller(gtk.ORIENTATION_VERTICAL)
return False
def do_motion_notify_event(self, e):
if not (e.window == self.hwindow or e.window == self.vwindow): return False
if e.window == self.hwindow and (e.state & gtk.gdk.BUTTON1_MASK) == gtk.gdk.BUTTON1_MASK:
self.make_bar_bigger(gtk.ORIENTATION_HORIZONTAL)
if self._horizaontal.last_time == 0:
self._horizaontal.last_time = e.time
elif e.time - self._horizaontal.last_time > 1000:
self._horizaontal.last_time = 0
self._horizaontal.last_pos = 0
if self._horizaontal.last_pos == 0 or self._horizaontal.last_time == 0:
self._horizaontal.last_pos = e.x_root
return True
deltaX = e.x_root - self._horizaontal.last_pos
upper = self.hadjustment.upper
# The pos maybe beyond the effective range,
# but we will immediately corrected it's value.
# the "invariant" is the "value" always in the effective range.
value = pos2value(self._horizaontal.bar_pos+deltaX, self._horizaontal.virtual_len, upper)
value = max(0, min(value, self.hadjustment.upper-self.hadjustment.page_size))
self.hadjustment.set_value(value)
self._horizaontal.last_pos = e.x_root
self._horizaontal.last_time = e.time
self._horizaontal.in_motion = True
return True
elif e.window == self.vwindow and (e.state & gtk.gdk.BUTTON1_MASK) == gtk.gdk.BUTTON1_MASK:
self.make_bar_bigger(gtk.ORIENTATION_VERTICAL)
if self._vertical.last_time == 0:
self._vertical.last_time = e.time
elif e.time - self._vertical.last_time > 1000:
self._vertical.last_time = 0
self._vertical.last_pos = 0
if self._vertical.last_pos == 0 or self._vertical.last_time == 0:
self._vertical.last_pos = e.y_root
return True
upper = self.vadjustment.upper
deltaY = e.y_root - self._vertical.last_pos
value = pos2value(self._vertical.bar_pos+deltaY, self._vertical.virtual_len, upper)
value = max(0, min(value, self.vadjustment.upper-self.vadjustment.page_size))
self.vadjustment.set_value(value)
self._vertical.last_pos = e.y_root
self._vertical.last_time = e.time
self._vertical.in_motion = True
return True
def calc_vbar_length(self):
self._vertical.virtual_len = self.allocation.height
if self.vadjustment.upper <= 1 or self._vertical.policy == gtk.POLICY_NEVER:
self._vertical.bar_len = 0
return
ratio = float(self.vadjustment.page_size) / (self.vadjustment.upper-self.vadjustment.lower)
if ratio == 1:
self._vertical.bar_len = 0
else:
bar_len = self._vertical.virtual_len * ratio
if bar_len < self.bar_min_length:
self._vertical.virtual_len -= (self.bar_min_length - bar_len)
self._vertical.bar_len = max(bar_len, self.bar_min_length)
def calc_vbar_allocation(self):
bar_len = int(self._vertical.bar_len)
if bar_len == 0:
self.vallocation = gdk.Rectangle(0, 0, 0, 0)
self.vwindow.hide()
else:
self.vwindow.show()
self.vallocation = gdk.Rectangle(
self.allocation.width - self.bar_width, int(self._vertical.bar_pos),
self.bar_width, bar_len)
def calc_hbar_length(self):
self._horizaontal.virtual_len = self.allocation.width
if self.hadjustment.upper <= 1 or self._horizaontal.policy == gtk.POLICY_NEVER:
self._horizaontal.bar_len = 0
return
ratio = float(self.hadjustment.page_size) / (self.hadjustment.upper-self.hadjustment.lower)
if ratio == 1:
self._horizaontal.bar_len = 0
else:
bar_len = self._horizaontal.virtual_len * ratio
if bar_len < self.bar_min_length:
self._horizaontal.virtual_len -= (self.bar_min_length - bar_len)
self._horizaontal.bar_len = max(bar_len, self.bar_min_length)
def calc_hbar_allocation(self):
bar_len = int(self._horizaontal.bar_len)
if bar_len == 0:
self.hallocation = gdk.Rectangle(0, 0, 0, 0)
self.hwindow.hide()
else:
self.hwindow.show()
self.hallocation = gdk.Rectangle(
int(self._horizaontal.bar_pos), self.allocation.height - self.bar_width,
bar_len, self.bar_width)
def vadjustment_changed(self, adj):
if self.get_realized():
upper = self.vadjustment.upper
self._vertical.bar_pos = value2pos(adj.value, self._vertical.virtual_len, upper)
self.calc_vbar_allocation()
self.vwindow.move_resize(*self.vallocation)
self.emit_vscrollbar_state_changed()
self.queue_draw()
def hadjustment_changed(self, adj):
if self.get_realized():
upper = self.hadjustment.upper
self._horizaontal.bar_pos = value2pos(adj.value, self._horizaontal.virtual_len, upper)
self.calc_hbar_allocation()
self.hwindow.move_resize(*self.hallocation)
self.queue_draw()
def add_with_viewport(self, child):
'''
Used to add children without native scrolling capabilities.
If a child has native scrolling, use ScrolledWindow.add() instead
of this function.
@param child: the child without native scrolling.
'''
vp = gtk.Viewport()
vp.set_shadow_type(gtk.SHADOW_NONE)
vp.add(child)
vp.show()
self.add(vp)
def add_child(self, child):
'''
Add the child to this ScrolledWindow.The child should have
native scrolling capabilities.
@param child: the child with native scrolling.
'''
self.add_with_viewport(child)
def do_add(self, child):
self.child = None
gtk.Bin.do_add(self, child)
child.set_scroll_adjustments(self.hadjustment, self.vadjustment)
def do_size_request(self, requisition):
if self.child:
self.child.do_size_request(self.child, requisition)
def do_size_allocate(self, allocation):
self.allocation = allocation
if self.get_realized():
self.binwindow.move_resize(*self.allocation)
# Must before calc_xxx_length, because we need child to compute the adjustment value.
if self.child:
(allocation.x, allocation.y) = (0, 0)
self.child.do_size_allocate(self.child, allocation)
self.update_scrollbar()
if self.get_realized():
self.make_bar_smaller(gtk.ORIENTATION_VERTICAL)
self.make_bar_smaller(gtk.ORIENTATION_HORIZONTAL)
def update_scrollbar(self, *arg, **argk):
if self.get_realized():
self.calc_vbar_length()
self.calc_hbar_length()
self.vadjustment.emit('value-changed')
self.hadjustment.emit('value-changed')
if self._horizaontal.need_update_region:
self.make_bar_smaller(gtk.ORIENTATION_HORIZONTAL)
self._horizaontal.need_update_region = False
self.hwindow.show()
if self._vertical.need_update_region:
self.make_bar_smaller(gtk.ORIENTATION_VERTICAL)
self._vertical.need_update_region = False
self.vwindow.show()
def do_unrealize(self):
self.binwindow.set_user_data(None)
self.binwindow.destroy()
self.binwindow = None
self.vwindow.set_user_data(None)
self.vwindow.destroy()
self.vwindow = None
self.hwindow.set_user_data(None)
self.hwindow.destroy()
self.hwindow = None
gtk.Bin.do_unrealize(self)
def do_realize(self):
gtk.Bin.do_realize(self)
self.binwindow = gtk.gdk.Window(self.get_parent_window(),
x=self.allocation.x,
y=self.allocation.y,
width=self.allocation.width,
height=self.allocation.height,
window_type=gtk.gdk.WINDOW_CHILD,
wclass=gtk.gdk.INPUT_OUTPUT,
event_mask=(self.get_events()| gdk.EXPOSURE_MASK | gdk.VISIBILITY_NOTIFY_MASK),
visual=self.get_visual(),
colormap=self.get_colormap(),
)
self.binwindow.set_user_data(self)
self.vwindow = gtk.gdk.Window(self.binwindow,
x=self.vallocation.x,
y=self.vallocation.y,
width=self.vallocation.width,
height=self.vallocation.height,
window_type=gtk.gdk.WINDOW_CHILD,
wclass=gtk.gdk.INPUT_OUTPUT,
visual=self.get_visual(),
colormap=self.get_colormap(),
event_mask=(self.get_events()
| gdk.EXPOSURE_MASK
| gdk.ENTER_NOTIFY_MASK | gdk.LEAVE_NOTIFY_MASK | gdk.BUTTON_RELEASE_MASK
| gdk.BUTTON_MOTION_MASK
| gdk.POINTER_MOTION_HINT_MASK | gdk.BUTTON_PRESS_MASK
)
)
self.vwindow.set_user_data(self)
self.hwindow = gtk.gdk.Window(self.binwindow,
x=self.hallocation.x,
y=self.hallocation.y,
width=self.hallocation.width,
height=self.hallocation.height,
window_type=gtk.gdk.WINDOW_CHILD,
wclass=gtk.gdk.INPUT_OUTPUT,
colormap=self.get_colormap(),
visual=self.get_visual(),
event_mask=(self.get_events()
| gdk.EXPOSURE_MASK
| gdk.ENTER_NOTIFY_MASK | gdk.LEAVE_NOTIFY_MASK | gdk.BUTTON_RELEASE_MASK
| gdk.BUTTON_MOTION_MASK
| gdk.POINTER_MOTION_HINT_MASK | gdk.BUTTON_PRESS_MASK
)
)
self.hwindow.set_user_data(self)
if self.child:
self.child.set_parent_window(self.binwindow)
self.queue_resize()
def set_shadow_type(self, t):
return
def set_policy(self, h, v):
'''
Set the policy of ScrolledWindow's scrollbar
@param h: the horizontal scrollbar policy
@param v: the vertical scrollbar policy
'''
self._horizaontal.policy = h
self._vertical.policy = v
return
def do_map(self):
gtk.Bin.do_map(self) # must before self.xwindow.show(), didn't know the reason.
self.binwindow.show()
if self.child and not self.child.get_mapped() and self.child.get_visible():
self.child.do_map(self.child)
def do_unmap(self):
self.binwindow.hide()
self.hwindow.hide()
self.vwindow.hide()
gtk.Bin.do_unmap(self)
def do_remove(self, child):
gtk.Bin.do_remove(self, child)
def get_vadjustment(self):
'''
Returns the vertical scrollbar's adjustment,
used to connect the vertical scrollbar to the child widget's
vertical scroll functionality.
'''
return self.vadjustment
def get_hadjustment(self):
'''
Returns the horizontal scrollbar's adjustment,
used to connect the horizontal scrollbar to the child
widget's horizontal scroll functionality.
'''
return self.hadjustment
def set_hadjustment(self, adj):
'''
Sets the gtk.Adjustment for the horizontal scrollbar.
@param adj: horizontal scroll adjustment
'''
remove_signal_id(self.h_value_change_id)
remove_signal_id(self.h_change_id)
self.hadjustment = adj
h_value_change_handler_id = self.hadjustment.connect('value-changed', self.hadjustment_changed)
h_change_handler_id = self.hadjustment.connect('changed', self.update_scrollbar)
self.h_value_change_id = (self.hadjustment, h_value_change_handler_id)
self.h_change_id = (self.hadjustment, h_change_handler_id)
def set_vadjustment(self, adj):
'''
Sets the gtk.Adjustment for the vertical scrollbar.
@param adj: vertical scroll adjustment
'''
remove_signal_id(self.v_value_change_id)
remove_signal_id(self.v_change_id)
self.vadjustment = adj
v_value_change_handler_id = self.vadjustment.connect('value-changed', self.vadjustment_changed)
v_change_handler_id = self.vadjustment.connect('changed', self.update_scrollbar)
self.v_value_change_id = (self.vadjustment, v_value_change_handler_id)
self.v_change_id = (self.vadjustment, v_change_handler_id)
def _test_calc(self):
for i in xrange(0, int(self.vadjustment.upper-self.vadjustment.page_size), 30):
pos = value2pos(i, self._vertical.virtual_len, self.vadjustment.upper)
print "value:%f --> pos:%d" % (i, pos),
assert(pos <= self.allocation.height-self._vertical.bar_len),\
"pos(%f) should small than(%f)" % (pos, self.allocation.height-self._vertical.bar_len)
value = pos2value(pos, self._vertical.virtual_len, self.vadjustment.upper)
print "\t pos:%d -->value:%f" % (pos, value)
print "v_len:%f, height:%f, vir_bar_len:%d" % ( self._vertical.virtual_len,
self.allocation.height, self._vertical.bar_len)
def emit_vscrollbar_state_changed(self, e=None):
value = self.vadjustment.value
page_size = self.vadjustment.page_size
upper = self.vadjustment.upper
if e == None:
bottom_value = upper - page_size
elif e.type == gtk.gdk.MOTION_NOTIFY:
bottom_value = upper - page_size
elif e.type == gtk.gdk.SCROLL:
bottom_value = upper - page_size - 1
if upper != page_size:
if value == 0 and self.vscrollbar_state != "top":
self.vscrollbar_state = "top"
self.emit("vscrollbar-state-changed", self.vscrollbar_state)
elif value > 0 and value < bottom_value and self.vscrollbar_state != "center":
self.vscrollbar_state = "center"
self.emit("vscrollbar-state-changed", self.vscrollbar_state)
elif value == bottom_value and self.vscrollbar_state != "bottom":
self.vscrollbar_state = "bottom"
self.emit("vscrollbar-state-changed", self.vscrollbar_state)
gobject.type_register(ScrolledWindow)
```
#### File: dtk/ui/skin_config.py
```python
from dominant_color import get_dominant_color
from cache_pixbuf import CachePixbuf
from deepin_utils.config import Config
from constant import SHADOW_SIZE, COLOR_SEQUENCE
from draw import draw_pixbuf, draw_vlinear, draw_hlinear
from deepin_utils.file import create_directory, remove_file, touch_file, remove_directory
from utils import color_hex_to_cairo, find_similar_color
import shutil
import gobject
import gtk
import os
import tarfile
import uuid
import sys
import traceback
class SkinConfig(gobject.GObject):
'''
SkinConfig class.
@undocumented: update_image_size
@undocumented: get_skin_file_path
@undocumented: is_skin_exist
@undocumented: get_default_skin
@undocumented: get_skin_dir
@undocumented: save_skin_name
@undocumented: reload_skin
@undocumented: load_skin
@undocumented: save_skin
@undocumented: change_theme
@undocumented: apply_skin
@undocumented: add_theme
@undocumented: remove_theme
@undocumented: wrap_skin_window
@undocumented: add_skin_window
@undocumented: remove_skin_window
@undocumented: reset
@undocumented: auto_resize
@undocumented: vertical_mirror_background
@undocumented: horizontal_mirror_background
@undocumented: render_background
@undocumented: export_skin
@undocumented: load_skin_from_image
@undocumented: load_skin_from_package
'''
__gsignals__ = {
"theme-changed" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
def __init__(self):
'''
Initialize SkinConfig class.
'''
# Init.
gobject.GObject.__init__(self)
self.cache_pixbuf = CachePixbuf()
self.theme_list = []
self.window_list = []
def set_application_window_size(self, app_window_width, app_window_height):
'''
Set application window with given size.
@param app_window_width: Application window width.
@param app_window_height: Application window height.
'''
self.app_window_width = app_window_width
self.app_window_height = app_window_height
def update_image_size(self, x, y, scale_x, scale_y):
'''
Internal function to update image size.
'''
self.x = x
self.y = y
self.scale_x = scale_x
self.scale_y = scale_y
def get_skin_file_path(self, filename):
'''
Internal function to get skin file path.
'''
skin_file_dir = None
for skin_dir in [self.system_skin_dir, self.user_skin_dir]:
if os.path.exists(skin_dir):
if self.skin_name in os.listdir(os.path.expanduser(skin_dir)):
skin_file_dir = skin_dir
break
if skin_file_dir:
return os.path.join(skin_file_dir, self.skin_name, filename)
else:
return None
def is_skin_exist(self, skin_name, system_skin_dir, user_skin_dir):
'''
Internal function to is skin exist in skin directories.
'''
for skin_dir in [system_skin_dir, user_skin_dir]:
if os.path.exists(skin_dir):
if skin_name in os.listdir(os.path.expanduser(skin_dir)):
return True
return False
def get_default_skin(self, system_skin_dir, user_skin_dir):
'''
Internal function to get default skin.
'''
for skin_dir in [system_skin_dir, user_skin_dir]:
if os.path.exists(skin_dir):
skin_list = os.listdir(os.path.expanduser(skin_dir))
if len(skin_list) > 0:
return skin_list[0]
return None
def get_skin_dir(self):
'''
Internal function to get skin dir.
'''
for skin_dir in [self.system_skin_dir, self.user_skin_dir]:
if os.path.exists(skin_dir):
if self.skin_name in os.listdir(os.path.expanduser(skin_dir)):
return os.path.join(skin_dir, self.skin_name)
return None
def init_skin(self,
skin_name,
system_skin_dir,
user_skin_dir,
skin_config_file,
app_given_id,
app_given_version):
'''
Init skin.
@param skin_name: Skin name.
@param system_skin_dir: Default skin directory.
@param user_skin_dir: User's skin directory, generic use ~/.config/project-name/skin
@param skin_config_file: Skin's config filepath, generic use ~/.config/project-name/skin_config.ini
@param app_given_id: Project name.
@param app_given_version: Project version.
'''
self.skin_config_file = skin_config_file
if os.path.exists(skin_config_file):
# Read skin name from config file.
skin_config = Config(skin_config_file)
skin_config.load()
# Load skin.
init_skin_name = skin_config.get("skin", "skin_name")
else:
# Create skin config if it not exists.
touch_file(self.skin_config_file)
init_skin_name = skin_name
if self.is_skin_exist(init_skin_name, system_skin_dir, user_skin_dir):
self.load_skin(init_skin_name, system_skin_dir, user_skin_dir)
else:
# Try load default skin if user's select skin not exists.
default_skin_name = self.get_default_skin(system_skin_dir, user_skin_dir)
assert(default_skin_name != None)
self.load_skin(default_skin_name, system_skin_dir, user_skin_dir)
self.app_given_id = app_given_id
self.app_given_version = app_given_version
def save_skin_name(self):
'''
Internal function to save skin name.
'''
skin_config = Config(self.skin_config_file)
skin_config.load()
if skin_config.get("skin", "skin_name") != self.skin_name:
skin_config.set("skin", "skin_name", self.skin_name)
skin_config.write(self.skin_config_file)
def reload_skin(self, skin_name=None):
'''
Internal function to reload skin.
'''
if skin_name:
return self.load_skin(skin_name)
else:
return self.load_skin(self.skin_name)
def load_skin(self, skin_name, system_skin_dir=None, user_skin_dir=None):
'''
Internal function to Load skin.
@return: Return True if load finish, otherwise return False.
'''
try:
# Save skin dir.
self.skin_name = skin_name
if system_skin_dir:
self.system_skin_dir = system_skin_dir
create_directory(self.system_skin_dir)
if user_skin_dir:
self.user_skin_dir = user_skin_dir
create_directory(self.user_skin_dir)
self.skin_dir = self.get_skin_dir()
# Load config file.
self.config = Config(self.get_skin_file_path("config.ini"))
self.config.load()
# Get theme config.
self.theme_name = self.config.get("theme", "theme_name")
# Get application config.
self.app_id = self.config.get("application", "app_id")
self.app_version = self.config.getfloat("application", "app_version")
# Get background config.
self.image = self.config.get("background", "image")
self.x = self.config.getfloat("background", "x")
self.y = self.config.getfloat("background", "y")
self.scale_x = self.config.getfloat("background", "scale_x")
self.scale_y = self.config.getfloat("background", "scale_y")
self.dominant_color = self.config.get("background", "dominant_color")
# Get action config.
self.deletable = self.config.getboolean("action", "deletable")
self.editable = self.config.getboolean("action", "editable")
self.vertical_mirror = self.config.getboolean("action", "vertical_mirror")
self.horizontal_mirror = self.config.getboolean("action", "horizontal_mirror")
# Generate background pixbuf.
self.background_pixbuf = gtk.gdk.pixbuf_new_from_file(self.get_skin_file_path(self.image))
# Save skin name.
self.save_skin_name()
return True
except Exception, e:
print "function load_skin got error: %s" % (e)
traceback.print_exc(file=sys.stdout)
return False
def save_skin(self, given_filepath=None):
'''
Internal function to save skin.
'''
self.config.set("theme", "theme_name", self.theme_name)
self.config.set("background", "x", self.x)
self.config.set("background", "y", self.y)
self.config.set("background", "scale_x", self.scale_x)
self.config.set("background", "scale_y", self.scale_y)
self.config.set("action", "vertical_mirror", self.vertical_mirror)
self.config.set("action", "horizontal_mirror", self.horizontal_mirror)
self.config.write(given_filepath)
def change_theme(self, theme_name):
'''
Internal function to change theme.
'''
self.theme_name = theme_name
self.apply_skin()
def apply_skin(self):
'''
Internal function to apply skin.
'''
# Change theme.
for theme in self.theme_list:
if theme.theme_name != self.theme_name:
theme.change_theme(self.theme_name)
# Redraw application.
for window in self.window_list:
window.queue_draw()
# Emit `theme-changed` signal.
self.emit("theme-changed", self.theme_name)
def add_theme(self, theme):
'''
Internal function to add theme.
'''
if not theme in self.theme_list:
self.theme_list.append(theme)
def remove_theme(self, theme):
'''
Internal function to remove theme.
'''
if theme in self.theme_list:
self.theme_list.remove(theme)
def wrap_skin_window(self, window):
'''
Internal function to wrap skin window.
'''
self.add_skin_window(window)
window.connect("destroy", lambda w: self.remove_skin_window(w))
def add_skin_window(self, window):
'''
Internal function to add skin window.
'''
if not window in self.window_list:
self.window_list.append(window)
def remove_skin_window(self, window):
'''
Internal function to remove skin window.
'''
if window in self.window_list:
self.window_list.remove(window)
def reset(self):
'''
Internal function to reset.
'''
self.x = 0
self.y = 0
self.scale_x = 1.0
self.scale_y = 1.0
self.vertical_mirror = False
self.horizontal_mirror = False
def auto_resize(self):
'''
Internal function to auto resize.
'''
self.x = 0
self.y = 0
pixbuf = gtk.gdk.pixbuf_new_from_file(self.get_skin_file_path(self.image))
if self.app_window_width > self.app_window_height:
self.scale_x = self.scale_y = float(self.app_window_height) / pixbuf.get_height()
else:
self.scale_x = self.scale_y = float(self.app_window_width) / pixbuf.get_width()
self.vertical_mirror = False
self.horizontal_mirror = False
def vertical_mirror_background(self):
'''
Internal function to vertical mirror background.
'''
self.vertical_mirror = not self.vertical_mirror
self.apply_skin()
def horizontal_mirror_background(self):
'''
Internal function to horizontal mirror background.
'''
self.horizontal_mirror = not self.horizontal_mirror
self.apply_skin()
def render_background(self, cr, widget, x, y,
translate_width=0,
translate_height=0):
'''
Internal function to render background.
'''
# Init.
toplevel_rect = widget.get_toplevel().allocation
render_width = toplevel_rect.width + translate_width
render_height = toplevel_rect.height + translate_height
# Draw background.
background_x = int(self.x * self.scale_x)
background_y = int(self.y * self.scale_y)
background_width = int(self.background_pixbuf.get_width() * self.scale_x)
background_height = int(self.background_pixbuf.get_height() * self.scale_y)
self.cache_pixbuf.scale(self.background_pixbuf, background_width, background_height,
self.vertical_mirror, self.horizontal_mirror)
draw_pixbuf(
cr,
self.cache_pixbuf.get_cache(),
x + background_x,
y + background_y)
# Draw dominant color if necessarily.
if ((background_width + background_x) < render_width
and (background_height + background_y) < render_height):
cr.set_source_rgb(*color_hex_to_cairo(self.dominant_color))
cr.rectangle(
x + background_x + background_width,
y + background_y + background_height,
render_width - (background_width + background_x),
render_height - (background_height + background_y))
cr.fill()
if (background_width + background_x) < render_width:
draw_hlinear(
cr,
x + (background_width + background_x) - SHADOW_SIZE,
y,
SHADOW_SIZE,
(background_height + background_y),
[(0, (self.dominant_color, 0)),
(1, (self.dominant_color, 1))])
cr.set_source_rgb(*color_hex_to_cairo(self.dominant_color))
cr.rectangle(
x + (background_width + background_x),
y,
render_width - (background_width + background_x),
(background_height + background_y))
cr.fill()
if (background_height + background_y) < render_height:
draw_vlinear(
cr,
x,
y + (background_height + background_y) - SHADOW_SIZE,
(background_width + background_x),
SHADOW_SIZE,
[(0, (self.dominant_color, 0)),
(1, (self.dominant_color, 1))])
cr.set_source_rgb(*color_hex_to_cairo(self.dominant_color))
cr.rectangle(
x,
y + (background_height + background_y),
(background_width + background_x),
render_height - (background_height + background_y))
cr.fill()
def export_skin(self, filepath):
'''
Internal function to export skin.
'''
# Build temp config file.
config_filepath = os.path.join("/tmp/%s", str(uuid.uuid4()))
touch_file(config_filepath)
self.save_skin(config_filepath)
# Build skin package.
with tarfile.open("%s.tar.gz" % filepath, "w:gz") as tar:
# Add config file.
tar.add(config_filepath, "config.ini", False)
# Add background image file.
tar.add(self.get_skin_file_path(self.image), self.image, False)
# Copy theme files is theme is not standard theme.
if not self.theme_name in COLOR_SEQUENCE:
tar.add(os.path.join(self.ui_theme_dir, self.theme_name), os.path.join("ui_theme", self.theme_name))
if self.app_theme_dir != None:
tar.add(os.path.join(self.app_theme_dir, self.theme_name), os.path.join("app_theme", self.theme_name))
# Remove temp config file.
remove_file(config_filepath)
def load_themes(self, ui_theme, app_theme=None):
'''
Load theme from given directories.
@param ui_theme: dtk.ui.theme.ui_theme.
@param app_theme: Theme instance, build it like below, set as None if you don't want build your own theme.
>>> app_theme = Theme(
>>> os.path.join(get_parent_dir(__file__), "app_theme"),
>>> os.path.expanduser("~/.config/project-name/theme")
>>> )
'''
# Load theme.
ui_theme.load_theme()
if app_theme != None:
app_theme.load_theme()
# Init theme directories.
self.ui_theme_dir = ui_theme.user_theme_dir
if app_theme != None:
self.app_theme_dir = app_theme.user_theme_dir
else:
self.app_theme_dir = None
def load_skin_from_image(self, filepath):
'''
Load theme from given image.
@param filepath: The file path of image.
'''
# Init.
skin_dir = os.path.join(self.user_skin_dir, str(uuid.uuid4()))
skin_image_file = os.path.basename(filepath)
config_file = os.path.join(skin_dir, "config.ini")
dominant_color = get_dominant_color(filepath)
similar_color = find_similar_color(dominant_color)[0]
default_config = [
("theme", [("theme_name", similar_color)]),
("application", [("app_id", self.app_given_id),
("app_version", self.app_given_version)]),
("background", [("image", skin_image_file),
("x", "0"),
("y", "0"),
("scale_x", "1.0"),
("scale_y", "1.0"),
("dominant_color", dominant_color)]),
("action", [("deletable", "True"),
("editable", "True"),
("vertical_mirror", "False"),
("horizontal_mirror", "False")])]
# Create skin directory.
create_directory(skin_dir, True)
# Copy skin image file.
shutil.copy(filepath, skin_dir)
# Touch skin config file.
touch_file(config_file)
# Write default skin config information.
Config(config_file, default_config).write()
if self.reload_skin(os.path.basename(skin_dir)):
self.apply_skin()
return (True, skin_dir, skin_image_file)
else:
return (False, skin_dir, skin_image_file)
def load_skin_from_package(self, filepath):
'''
Load theme from given package.
@param filepath: The file path of package.
'''
# Init.
skin_dir = os.path.join(self.user_skin_dir, str(uuid.uuid4()))
# Create skin directory.
create_directory(skin_dir, True)
# Extract skin package.
tar = tarfile.open(filepath, "r:gz")
tar.extractall(skin_dir)
# Get skin image file.
config = Config(os.path.join(skin_dir, "config.ini"))
config.load()
# Move theme files to given directory if theme is not in default theme list.
skin_theme_name = config.get("theme", "theme_name")
if not skin_theme_name in COLOR_SEQUENCE:
# Check version when package have special theme that not include in standard themes.
app_id = config.get("application", "app_id")
app_version = config.get("application", "app_version")
if app_id == self.app_given_id and app_version == self.app_given_version:
# Remove same theme from given directories.
remove_directory(os.path.join(self.ui_theme_dir, skin_theme_name))
if self.app_theme_dir != None:
remove_directory(os.path.join(self.app_theme_dir, skin_theme_name))
# Move new theme files to given directories.
shutil.move(os.path.join(skin_dir, "ui_theme", skin_theme_name), self.ui_theme_dir)
if self.app_theme_dir != None:
shutil.move(os.path.join(skin_dir, "app_theme", skin_theme_name), self.app_theme_dir)
# Remove temp theme directories under skin directory.
remove_directory(os.path.join(skin_dir, "ui_theme"))
remove_directory(os.path.join(skin_dir, "app_theme"))
else:
# Remove skin directory if version mismatch.
remove_directory(skin_dir)
return False
# Apply new skin.
skin_image_file = config.get("background", "image")
if self.reload_skin(os.path.basename(skin_dir)):
self.apply_skin()
return (True, skin_dir, skin_image_file)
else:
return (False, skin_dir, skin_image_file)
gobject.type_register(SkinConfig)
skin_config = SkinConfig()
```
#### File: dtk/ui/slider.py
```python
import gtk
import gobject
from timeline import Timeline, CURVE_SINE
from draw import draw_pixbuf
from utils import set_cursor
from theme import ui_theme
from window import Window
from utils import is_in_rect
class HSlider(gtk.Viewport):
'''
HSlider class.
@undocumented: slide_to_page
@undocumented: set_to_page
@undocumented: append_page
'''
__gsignals__ = {
"start_slide" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"completed_slide" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self,
slide_time=200,
):
'''
Initialize HSlider class.
@param slide_time: The animation of slide time, default is 200ms.
'''
gtk.Viewport.__init__(self)
self.set_shadow_type(gtk.SHADOW_NONE)
self.fixed = gtk.Fixed()
self.add(self.fixed)
self.slide_time = slide_time
self.pre_widget = None
self.active_widget = None
self.connect("realize", self._update_size)
self.connect("size_allocate", self._update_size)
self.page_width = 0
self.page_height = 0
self.in_sliding = False
def _update_size(self, w=None, _w=None):
self.page_width = self.allocation.width
self.page_height = self.allocation.height
if self.active_widget:
self.active_widget.set_size_request(self.page_width, self.page_height)
if self.pre_widget:
self.pre_widget.set_size_request(self.page_width, self.page_height)
self.show_all()
def _to_right(self, percent):
self.offset = int(round(percent * self.page_width))
if self.pre_widget:
self.fixed.move(self.pre_widget, - self.offset, 0)
self.fixed.move(self.active_widget, self.page_width - self.offset, 0)
def _to_left(self, percent):
self.offset = int(round(percent * self.page_width))
if self.pre_widget:
self.fixed.move(self.pre_widget, self.offset, 0)
self.fixed.move(self.active_widget, self.offset - self.page_width, 0)
def _no_effect(self):
self.offset = self.page_width
if self.pre_widget:
self.fixed.remove(self.pre_widget)
self.fixed.move(self.active_widget, 0, 0)
def to_page(self, w, direction):
'''
Slide to given page.
@param w: gtk.Widget to slide.
@param direction: The direction of slide animation, can use below value:
- \"right\" slide from right to left
- \"left\" slide from left to right
- None no animation effect, slide directly
'''
if self.in_sliding:
return
if w != self.active_widget:
w.set_size_request(self.page_width, self.page_height)
if w.parent != self.fixed:
self.fixed.put(w, self.page_width, 0)
self.active_widget = w
self.timeline = Timeline(self.slide_time, CURVE_SINE)
if direction == "right":
self.timeline.connect('update', lambda source, status: self._to_right(status))
elif direction == "left":
self.timeline.connect('update', lambda source, status: self._to_left(status))
else:
self._no_effect()
self.timeline.connect("start", lambda source: self._start())
self.timeline.connect("completed", lambda source: self._completed())
self.timeline.run()
self.in_sliding = True
self.show_all()
def _start(self):
self.emit("start_slide")
def _completed(self):
if self.pre_widget and self.pre_widget.parent == self.fixed:
self.fixed.remove(self.pre_widget)
self.pre_widget = self.active_widget
#print "Pre: " + str(self.pre_widget) + " act: " + str(self.active_widget) + "children: " + str(self.get_children())
self.show_all()
self.in_sliding = False
self.emit("completed_slide")
def to_page_now(self, w, d=None):
'''
Slide to given page immediately.
@param w: gtk.Widget to slide.
'''
self.to_page(w, d)
def slide_to_page(self, w, d):
self.to_page(w, d)
def set_to_page(self, w):
self.to_page_now(w)
def append_page(self, w):
pass
gobject.type_register(HSlider)
class WizardBox(gtk.EventBox):
'''
WizardBox class.
@undocumented: init_size
@undocumented: on_expose_event
@undocumented: handle_animation
@undocumented: on_motion_notify
@undocumented: on_enter_notify
@undocumented: on_leave_notify
@undocumented: on_button_press
@undocumented: auto_animation
@undocumented: start_animation
@undocumented: update_animation
@undocumented: completed_animation
'''
__gsignals__ = {
'close': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self,
slider_images=None,
pointer_images=None,
button_images=None,
show_button=True,
slide_delay=10000,
):
'''
Initialize WizardBox class.
@param slider_images: Slider images, default is None.
@param pointer_images: Pointer images, default is None.
@param pointer_images: Button images, default is None.
@param show_button: Set as True to show button.
@param slide_delay: The time of delay between slider image, default is 10000ms.
'''
gtk.EventBox.__init__(self)
self.set_visible_window(False)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.ENTER_NOTIFY_MASK |
gtk.gdk.LEAVE_NOTIFY_MASK
)
self.connect("expose-event", self.on_expose_event)
self.connect("motion-notify-event", self.on_motion_notify)
self.connect("button-press-event", self.on_button_press)
# Init images.
self.slider_pixbufs = map(gtk.gdk.pixbuf_new_from_file, slider_images)
self.slider_numuber = len(slider_images)
self.dot_normal_pixbuf, self.dot_active_pixbuf = map(gtk.gdk.pixbuf_new_from_file, pointer_images)
self.button_normal_pixbuf, self.button_press_pixbuf = map(gtk.gdk.pixbuf_new_from_file, button_images)
self.close_dpixbuf = ui_theme.get_pixbuf("button/window_close_normal.png")
self.show_button = show_button
# Init sizes.
self.init_size()
self.pointer_coords = {}
# Move animation.
self.active_index = 0
self.target_index = None
self.active_alpha = 1.0
self.target_index = 0.0
self.active_x = 0
self.target_x = None
self.slider_y = 0
self.auto_animation_id = None
self.auto_animation_timeout = slide_delay # millisecond.
self.slider_timeout = 1000 # millisecond.
self.in_animation = False
self.motion_index = None
self.auto_animation()
def init_size(self):
slider_pixbuf = self.slider_pixbufs[0]
self.slider_width = slider_pixbuf.get_width()
self.slider_height = slider_pixbuf.get_height()
self.set_size_request(self.slider_width, self.slider_height)
self.dot_width = self.dot_normal_pixbuf.get_width()
self.dot_height = self.dot_normal_pixbuf.get_height()
dot_spacing = 10
self.dot_width_offset = self.dot_width + dot_spacing
dot_area_width = self.dot_width * self.slider_numuber + dot_spacing * (self.slider_numuber - 1)
dot_offset_y = 40
self.dot_start_x = (self.slider_width - dot_area_width) / 2
self.dot_y = self.slider_height - dot_offset_y
close_spacing = 0
close_x = self.slider_width - self.close_dpixbuf.get_pixbuf().get_width() - close_spacing
close_y = close_spacing
self.close_rect = gtk.gdk.Rectangle(close_x, close_y,
self.close_dpixbuf.get_pixbuf().get_width(),
self.close_dpixbuf.get_pixbuf().get_height())
button_bottom_size = 55
button_width = self.button_normal_pixbuf.get_width()
button_height = self.button_normal_pixbuf.get_height()
button_x = (self.slider_width - button_width) / 2
button_y = self.slider_height - button_height - button_bottom_size
self.button_rect = gtk.gdk.Rectangle(button_x, button_y, button_width, button_height)
def on_expose_event(self, widget, event):
cr = widget.window.cairo_create()
rect = widget.allocation
cr.save()
draw_pixbuf(cr, self.slider_pixbufs[self.active_index], rect.x + self.active_x,
rect.x + self.slider_y, self.active_alpha)
if self.target_index != None and self.target_x != None:
draw_pixbuf(cr, self.slider_pixbufs[self.target_index], rect.x + self.target_x,
rect.y + self.slider_y, self.target_alpha)
cr.restore()
# Draw select pointer.
dot_start_x = rect.x + self.dot_start_x
for index in range(self.slider_numuber):
if self.target_index == None:
if self.active_index == index:
dot_pixbuf = self.dot_active_pixbuf
else:
dot_pixbuf = self.dot_normal_pixbuf
else:
if self.target_index == index:
dot_pixbuf = self.dot_active_pixbuf
else:
dot_pixbuf = self.dot_normal_pixbuf
pointer_rect = gtk.gdk.Rectangle(
dot_start_x, rect.y + self.dot_y,
self.dot_width, self.dot_height)
self.pointer_coords[index] = pointer_rect
draw_pixbuf(cr, dot_pixbuf, dot_start_x, rect.y + self.dot_y)
dot_start_x += self.dot_width_offset
# Draw close pixbuf.
draw_pixbuf(cr, self.close_dpixbuf.get_pixbuf(),
rect.x + self.close_rect.x, rect.y + self.close_rect.y)
if self.show_button and self.target_index == self.slider_numuber - 1:
if self.button_hover_flag:
pixbuf = self.button_press_pixbuf
else:
pixbuf = self.button_normal_pixbuf
draw_pixbuf(cr, pixbuf, rect.x + self.button_rect.x, rect.y + self.button_rect.y)
return True
def handle_animation(self, widget, event):
self.motion_index = None
for index, rect in self.pointer_coords.items():
if rect.x <= event.x <= rect.x + rect.width and rect.y <= event.y <= rect.y + rect.height:
set_cursor(widget, gtk.gdk.HAND2)
self.motion_index = index
break
else:
self.motion_index = None
set_cursor(widget, None)
def on_motion_notify(self, widget, event):
self.handle_animation(widget, event)
if is_in_rect((event.x, event.y), self.button_rect):
self.button_hover_flag = True
else:
self.button_hover_flag = False
self.queue_draw()
def on_enter_notify(self, widget, event):
if self.auto_animation_id is not None:
gobject.source_remove(self.auto_animation_id)
self.auto_animation_id = None
def on_leave_notify(self, widget, event):
self.auto_animation()
set_cursor(widget, None)
def on_button_press(self, widget, event):
if self.motion_index != None:
self.start_animation(self.slider_timeout, self.motion_index)
if is_in_rect((event.x, event.y), self.close_rect):
self.emit("close")
if is_in_rect((event.x, event.y), self.button_rect):
self.emit("close")
def auto_animation(self):
self.auto_animation_id = gobject.timeout_add(self.auto_animation_timeout,
lambda : self.start_animation(self.slider_timeout))
def start_animation(self, animation_time, target_index=None, direction="left"):
if target_index is None:
if self.active_index >= self.slider_numuber - 1:
return False
target_index = 0
else:
target_index = self.active_index + 1
else:
if target_index < self.active_index:
direction = "right"
if not self.in_animation:
self.in_animation = True
self.target_index = target_index
self.timeline = Timeline(animation_time, CURVE_SINE)
self.timeline.connect("update", lambda source, status: self.update_animation(source, status, direction))
self.timeline.connect("completed", lambda source: self.completed_animation(source, target_index))
self.timeline.run()
return True
def update_animation(self, source, status, direction):
self.active_alpha = 1.0 - status
self.target_alpha = status
if direction == "right":
self._to_right(status)
else:
self._to_left(status)
self.queue_draw()
def completed_animation(self, source, index):
self.active_index = index
self.active_alpha = 1.0
# self.target_index = None
self.target_alpha = 0.0
self.in_animation = False
self.active_x = 0
self.target_x = None
self.queue_draw()
def _to_right(self, status):
self.active_x = self.slider_width * status
self.target_x = 0
def _to_left(self, status):
self.active_x = 0 - (self.slider_width * status)
self.target_x = 0
class Wizard(Window):
'''
Wizard class.
@undocumented: destroy_wizard
'''
def __init__(self,
slider_files,
pointer_files,
button_files,
show_button=True,
finish_callback=None,
slide_delay=8000,
):
'''
Initialize Wizard class.
@param slider_files: The slider image files.
@param pointer_files: The pointer image files.
@param button_files: The button image files.
@param show_button: if True will at last page show start button.
@param finish_callback: The callback call when slider finish, this callback don't need input argument, default is None.
@param slide_delay: The delay between slider images, default is 8000ms.
'''
Window.__init__(self)
self.finish_callback = finish_callback
self.set_position(gtk.WIN_POS_CENTER)
self.set_resizable(False)
self.wizard_box = WizardBox(slider_files, pointer_files, button_files, show_button, slide_delay)
self.wizard_box.connect("close", lambda widget: self.destroy())
self.connect("destroy", self.destroy_wizard)
self.window_frame.add(self.wizard_box)
self.add_move_event(self.wizard_box)
def destroy_wizard(self, widget):
if self.finish_callback:
self.finish_callback()
if __name__ == "__main__":
s = HSlider()
w = gtk.Window()
w.set_size_request(300, 300)
h = gtk.HBox()
w1 = gtk.Button("Widget 1")
w2 = gtk.Button("Widget 2")
s.to_page_now(w1)
b = gtk.Button("to1")
b.connect("clicked", lambda w: s.to_page(w1, "right" ))
h.add(b)
b = gtk.Button("to2")
b.connect("clicked", lambda w: s.to_page(w2, "left"))
h.add(b)
v = gtk.VBox()
v.add(h)
v.add(s)
w.add(v)
w.show_all()
w.connect("destroy", gtk.main_quit)
gtk.main()
```
#### File: dtk/ui/slide_switcher.py
```python
from box import EventBox
from draw import draw_pixbuf
from timeline import Timeline, CURVE_SINE
from utils import remove_timeout_id, is_in_rect
from utils import set_cursor
from constant import ALIGN_START, ALIGN_MIDDLE
from theme import ui_theme
import gobject
import gtk
class SlideSwitcher(EventBox):
'''
SlideSwitcher class.
@undocumented: expose_slide_switcher
@undocumented: enter_notify_slide_switcher
@undocumented: leave_notify_slide_switcher
@undocumented: update_animation
@undocumented: completed_animation
@undocumented: motion_notify_slide_switcher
@undocumented: handle_animation
@undocumented: start_animation
@undocumented: to_left_animation
@undocumented: to_right_animation
@undocumented: start_auto_slide
@undocumented: stop_auto_slide
'''
__gsignals__ = {
"motion-notify-index" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (int,)),
"button-press-index" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (int,)),
"leave-notify-index" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (int,)),
}
def __init__(self,
images,
pointer_offset_x=-130,
pointer_offset_y=-20,
pointer_padding=20,
hover_animation_time=500,
auto_animation_time=2000,
auto_slide_timeout=5000,
horizontal_align=ALIGN_START,
vertical_align=ALIGN_START,
height_offset=0,
hover_switch=True,
auto_switch=True,
navigate_switch=False,
active_dpixbuf=ui_theme.get_pixbuf("slide_switcher/active.png"),
inactive_dpixbuf=ui_theme.get_pixbuf("slide_switcher/inactive.png"),
):
'''
Initialize SlideSwitcher class.
@param images: The image list of sliders.
@param pointer_offset_x: The offset x of pointer relative to right edge of slider image, default is -130 pixels.
@param pointer_offset_y: The offset y of pointer relative to bottom edge of slider image, default is -20 pixels.
@param pointer_padding: The padding between pointers, default is 20 pixels.
@param hover_animation_time: The animation time of hover operation, default is 500 milliseconds.
@param auto_animation_time: The animation time of automatic play, default is 2000 milliseconds.
@param auto_slide_timeout: The slide timeout of automatic play, default is 2000 milliseconds.
@param horizontal_align: The horizontal alignment, default is ALIGN_START.
@param vertical_align: The vertical alignment, default is ALIGN_START.
@param height_offset: The height offset, default is 0 pixels.
@param hover_switch: Set as True to make slider switch when hover operation active.
@param auto_switch: Set as True to make slider play automatically.
@param navigate_switch: Set as True to make slider switch navigate.
@param active_dpixbuf: The dynamic pixbuf of active status.
@param inactive_dpixbuf: The dynamic pixbuf of inactive status.
'''
EventBox.__init__(self)
self.add_events(gtk.gdk.ALL_EVENTS_MASK)
self.slide_images = images
self.image_number = len(self.slide_images)
self.active_index = 0
self.motion_index = None
self.target_index = None
self.active_alpha = 1.0
self.target_alpha = 0.0
self.in_animiation = False
self.hover_animation_time = hover_animation_time # animiation time of hover, in milliseconds
self.auto_animation_time = auto_animation_time # animiation time automatically, in milliseconds
self.auto_slide_timeout = auto_slide_timeout # slide timeout, in milliseconds
self.auto_slide_timeout_id = None
self.horizontal_align = horizontal_align
self.vertical_align = vertical_align
self.hover_switch = hover_switch
self.auto_switch = auto_switch
self.navigate_switch = navigate_switch
self.in_right_nav = False
self.in_left_nav = False
self.active_dpixbuf = active_dpixbuf
self.inactive_dpixbuf = inactive_dpixbuf
size_pixbuf = self.slide_images[0]
self.pointer_offset_x = pointer_offset_x
self.pointer_offset_y = pointer_offset_y
self.pointer_radious = self.active_dpixbuf.get_pixbuf().get_width() / 2
self.pointer_padding = pointer_padding
self.set_size_request(-1, size_pixbuf.get_height() + height_offset)
self.connect("expose-event", self.expose_slide_switcher)
self.connect("motion-notify-event", self.motion_notify_slide_switcher)
self.connect("leave-notify-event", self.leave_notify_slide_switcher)
self.connect("enter-notify-event", self.enter_notify_slide_switcher)
self.connect("button-press-event", lambda w, e: self.handle_animation(w, e, True))
self.start_auto_slide()
def expose_slide_switcher(self, widget, event):
# Init.
cr = widget.window.cairo_create()
rect = widget.allocation
# Draw background.
if self.active_alpha > 0.0:
active_pixbuf = self.slide_images[self.active_index]
if self.horizontal_align == ALIGN_START:
render_x = rect.x
elif self.horizontal_align == ALIGN_MIDDLE:
render_x = rect.x + (rect.width - active_pixbuf.get_width()) / 2
else:
render_x = rect.x + rect.width - active_pixbuf.get_width()
if self.vertical_align == ALIGN_START:
render_y = rect.y
elif self.vertical_align == ALIGN_MIDDLE:
render_y = rect.y + (rect.height - active_pixbuf.get_height()) / 2
else:
render_y = rect.y + rect.height - active_pixbuf.get_height()
draw_pixbuf(cr,
active_pixbuf,
render_x,
render_y,
self.active_alpha)
if self.target_index != None and self.target_alpha > 0.0:
target_pixbuf = self.slide_images[self.target_index]
if self.horizontal_align == ALIGN_START:
render_x = rect.x
elif self.horizontal_align == ALIGN_MIDDLE:
render_x = rect.x + (rect.width - target_pixbuf.get_width()) / 2
else:
render_x = rect.x + rect.width - target_pixbuf.get_width()
if self.vertical_align == ALIGN_START:
render_y = rect.y
elif self.vertical_align == ALIGN_MIDDLE:
render_y = rect.y + (rect.height - target_pixbuf.get_height()) / 2
else:
render_y = rect.y + rect.height - target_pixbuf.get_height()
draw_pixbuf(cr,
target_pixbuf,
render_x,
render_y,
self.target_alpha)
# Draw select pointer.
if self.image_number > 1:
for index in range(0, self.image_number):
if self.target_index == None:
if self.active_index == index:
pixbuf = self.active_dpixbuf.get_pixbuf()
else:
pixbuf = self.inactive_dpixbuf.get_pixbuf()
else:
if self.target_index == index:
pixbuf = self.active_dpixbuf.get_pixbuf()
else:
pixbuf = self.inactive_dpixbuf.get_pixbuf()
draw_pixbuf(cr,
pixbuf,
rect.x + rect.width + self.pointer_offset_x + index * self.pointer_padding,
rect.y + rect.height + self.pointer_offset_y
)
return True
def enter_notify_slide_switcher(self, widget, event):
self.stop_auto_slide()
def leave_notify_slide_switcher(self, widget, event):
rect = widget.allocation
if is_in_rect((event.x, event.y), (0, 0, rect.width, rect.height)):
self.handle_animation(widget, event)
else:
self.start_auto_slide()
set_cursor(widget, None)
self.emit("leave-notify-index", self.active_index)
def update_animation(self, source, status):
self.active_alpha = 1.0 - status
self.target_alpha = status
self.queue_draw()
def completed_animation(self, source, index):
self.active_index = index
self.active_alpha = 1.0
self.target_index = None
self.target_alpha = 0.0
self.in_animiation = False
self.queue_draw()
# Start new animiation when cursor at new index when animiation completed.
if self.motion_index:
if self.active_index != self.motion_index:
self.start_animation(self.hover_animation_time, self.motion_index)
def motion_notify_slide_switcher(self, widget, event):
self.handle_animation(widget, event)
def handle_animation(self, widget, event, button_press=False):
# Init.
rect = widget.allocation
start_x = rect.width + self.pointer_offset_x - self.pointer_radious
start_y = rect.height + self.pointer_offset_y
self.left_retangle = (0, 0, rect.width/3, rect.height - self.pointer_offset_y)
self.right_retangle = (0 + 2*rect.width/3, 0, rect.width/3, rect.height - self.pointer_offset_y)
if self.image_number > 1 and (start_y - 4 * self.pointer_radious < event.y < start_y + self.pointer_radious * 6
and start_x - 2 * self.pointer_radious < event.x < start_x + 4 * self.pointer_padding + 4 * self.pointer_radious):
set_cursor(widget, gtk.gdk.HAND2)
if self.hover_switch or button_press:
self.motion_index = None
for index in range(0, self.image_number):
if start_x + index * self.pointer_padding < event.x < start_x + (index + 1) * self.pointer_padding:
self.motion_index = index
if self.active_index != index:
self.start_animation(self.hover_animation_time, index)
break
elif self.image_number > 1 and is_in_rect((event.x, event.y), self.left_retangle) and self.navigate_switch:
if not self.in_left_nav:
self.in_left_nav = True
self.queue_draw()
set_cursor(widget, gtk.gdk.SB_LEFT_ARROW)
if button_press:
self.to_left_animation()
elif self.image_number > 1 and is_in_rect((event.x, event.y), self.right_retangle) and self.navigate_switch:
if not self.in_right_nav:
self.in_right_nav = True
self.queue_draw()
set_cursor(widget, gtk.gdk.SB_RIGHT_ARROW)
if button_press:
self.to_right_animation()
else:
if self.in_left_nav or self.in_right_nav:
self.in_left_nav = False
self.in_right_nav = False
self.queue_draw()
set_cursor(widget, None)
if button_press:
self.emit("button-press-index", self.active_index)
else:
self.emit("motion-notify-index", self.active_index)
def start_animation(self, animation_time, index=None):
# Update ticker with active index if option index is None.
if index == None:
if self.active_index >= self.image_number - 1:
index = 0
else:
index = self.active_index + 1
if not self.in_animiation:
self.in_animiation = True
self.target_index = index
timeline = Timeline(animation_time, CURVE_SINE)
timeline.connect('update', self.update_animation)
timeline.connect("completed", lambda source: self.completed_animation(source, index))
timeline.run()
return True
def to_left_animation(self, animation_time=500):
if self.active_index == 0:
index = self.image_number - 1
else:
index = self.active_index - 1
self.start_animation(animation_time, index)
def to_right_animation(self, animation_time=500):
self.start_animation(animation_time)
def start_auto_slide(self):
if self.auto_switch:
self.auto_slide_timeout_id = gtk.timeout_add(self.auto_slide_timeout, lambda : self.start_animation(self.auto_animation_time))
def stop_auto_slide(self):
if self.auto_slide_timeout_id:
remove_timeout_id(self.auto_slide_timeout_id)
gobject.type_register(SlideSwitcher)
```
#### File: dtk/ui/tab_window.py
```python
from button import Button
from constant import DEFAULT_FONT_SIZE
from dialog import DialogBox, DIALOG_MASK_TAB_PAGE
from draw import draw_text, draw_round_rectangle
from skin_config import skin_config
from theme import ui_theme
from locales import _
import gobject
import gtk
from utils import (container_remove_all, get_content_size,
color_hex_to_cairo, alpha_color_hex_to_cairo,
cairo_disable_antialias, is_in_rect, cairo_state,
get_window_shadow_size)
class TabBox(gtk.VBox):
'''
Tab box.
@undocumented: press_tab_title_box
@undocumented: get_close_button_at_event
@undocumented: motion_notify_tab_title_box
@undocumented: update_tab_title_widths
@undocumented: draw_title_background
@undocumented: expose_tab_title_box
@undocumented: expose_tab_content_align
@undocumented: expose_tab_content_box
'''
__gsignals__ = {
"switch-tab" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (int,)),
}
def __init__(self,
can_close_tab=False,
dockfill=False,
current_tab_index=-1,
):
'''
Initialize TabBox class.
@param can_close_tab: Whether display close button on tab, default is False.
@param dockfill: Whether make tab's width fill with TabBox's width.
@param current_tab_index: The index of current tab, default is -1.
'''
# Init.
gtk.VBox.__init__(self)
self.tab_height = 29
self.tab_padding_x = 19
self.tab_padding_y = 9
self.tab_select_bg_color = ui_theme.get_color("tab_select_bg")
self.tab_select_frame_color = ui_theme.get_color("tab_select_frame")
self.tab_unselect_bg_color = ui_theme.get_color("tab_unselect_bg")
self.tab_unselect_frame_color = ui_theme.get_color("tab_unselect_bg")
self.can_close_tab = can_close_tab
self.close_button_size = 6
self.close_button_frame_size = 3
self.close_button_padding_x = 4
self.close_button_padding_y = 6
self.close_button_select_background_color = "#EE0000"
self.close_button_select_foreground_color = "#FFFFFF"
self.close_button_color = "#666666"
self.hover_close_button_index = None
self.dockfill = dockfill
self.tab_box_width = -1
self.current_tab_index = current_tab_index
self.tab_title_box = gtk.DrawingArea()
self.tab_title_box.add_events(gtk.gdk.ALL_EVENTS_MASK)
self.tab_title_box.set_size_request(-1, self.tab_height)
self.tab_title_align = gtk.Alignment()
self.tab_title_align.set(0.0, 0.0, 1.0, 1.0)
self.tab_title_align.set_padding(0, 0, 0, 0)
self.tab_title_align.add(self.tab_title_box)
self.tab_content_align = gtk.Alignment()
self.tab_content_align.set(0.0, 0.0, 1.0, 1.0)
self.tab_content_align.set_padding(0, 0, 0, 0)
self.tab_content_box = gtk.VBox()
self.tab_content_align.add(self.tab_content_box)
self.tab_items = []
self.tab_title_widths = []
self.tab_index = -1
self.default_widget = None
self.pack_start(self.tab_title_align, False, False)
self.pack_start(self.tab_content_align, True, True)
self.tab_title_box.connect("button-press-event", self.press_tab_title_box)
self.tab_title_box.connect("expose-event", self.expose_tab_title_box)
self.tab_title_box.connect("motion-notify-event", self.motion_notify_tab_title_box)
self.tab_content_align.connect("expose-event", self.expose_tab_content_align)
self.tab_content_box.connect("expose-event", self.expose_tab_content_box)
def show_default_page(self):
'''
Show default page.
'''
if self.default_widget != None and len(self.tab_items) == 0:
container_remove_all(self.tab_content_box)
self.tab_content_box.add(self.default_widget)
self.tab_title_box.queue_draw()
self.tab_content_box.queue_draw()
self.show_all()
def set_default_widget(self, widget):
'''
Set default page of tab window.
@param widget: Widget for default page.
'''
self.default_widget = widget
self.show_default_page()
def add_items(self, items, default_index=0):
'''
Add items.
@param items: A list of tab item, tab item format: (tab_name, tab_widget)
@param default_index: Initialize index, default is 0.
'''
self.tab_items += items
for item in items:
self.tab_title_widths.append(get_content_size(item[0], DEFAULT_FONT_SIZE)[0] + self.tab_padding_x * 2)
if self.current_tab_index < 0:
self.switch_content(default_index)
else:
self.switch_content(self.current_tab_index)
def delete_items(self, items):
'''
Delete given items.
@param items: The item list need to delete.
'''
item_indexs = map(lambda item: self.tab_items.index(item), items)
for item in items:
self.tab_items.remove(item)
for title_width in map(lambda item_index: self.tab_title_widths[item_index], item_indexs):
self.tab_title_widths.remove(title_width)
if len(self.tab_items) == 0:
self.show_default_page()
print self.tab_items
def set_current_tab(self, index):
'''
Set current tab with given index.
@param index: The index of current tab.
'''
self.switch_content(index)
def switch_content(self, index):
'''
Switch content with given index.
@param index: Tab index.
'''
if self.tab_index != index and len(self.tab_items):
self.tab_index = index
widget = self.tab_items[index][1]
container_remove_all(self.tab_content_box)
self.tab_content_box.add(widget)
self.tab_title_box.queue_draw()
self.tab_content_box.queue_draw()
self.show_all()
def press_tab_title_box(self, widget, event):
'''
Internal callback for `button-press-event` signal.
'''
close_tab_index = self.get_close_button_at_event(event)
if close_tab_index != None:
self.delete_items([self.tab_items[close_tab_index]])
else:
for (index, item) in enumerate(self.tab_items):
if is_in_rect((event.x, event.y),
(sum(self.tab_title_widths[0:index]),
0,
self.tab_title_widths[index],
self.tab_height)):
self.switch_content(index)
self.emit("switch-tab", index)
break
def get_close_button_at_event(self, event):
if self.can_close_tab:
hover_index = None
for (index, item) in enumerate(self.tab_items):
button_x = sum(self.tab_title_widths[0:index + 1]) - self.close_button_padding_x - self.close_button_size
button_y = self.close_button_padding_y
if is_in_rect((event.x, event.y), (button_x, button_y, self.close_button_size, self.close_button_size)):
hover_index = index
break
return hover_index
# Return None if option can_close_tab is False.
else:
return None
def motion_notify_tab_title_box(self, widget, event):
hover_index = self.get_close_button_at_event(event)
if hover_index != self.hover_close_button_index:
self.hover_close_button_index = hover_index
widget.queue_draw()
def update_tab_title_widths(self, width):
i = 0
tab_title_len = len(self.tab_title_widths)
for new_width in self.tab_title_widths:
self.tab_title_widths[i] = width / tab_title_len
i = i + 1
def draw_title_background(self, cr, widget):
(offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(), 0, 0)
with cairo_state(cr):
cr.translate(-offset_x, -offset_y)
(shadow_x, shadow_y) = get_window_shadow_size(self.get_toplevel())
skin_config.render_background(cr, widget, shadow_x, shadow_y)
def expose_tab_title_box(self, widget, event):
'''
Internal callback for `expose-event` signal.
'''
cr = widget.window.cairo_create()
rect = widget.allocation
if self.dockfill:
self.update_tab_title_widths(rect.width)
# Draw background.
self.draw_title_background(cr, widget)
if len(self.tab_items) > 0:
# Draw title unselect tab.
tab_title_width = sum(self.tab_title_widths)
with cairo_state(cr):
with cairo_disable_antialias(cr):
cr.set_source_rgba(*alpha_color_hex_to_cairo((self.tab_unselect_bg_color.get_color(), 0.7)))
cr.rectangle(1, 1, tab_title_width, self.tab_height)
cr.fill()
cr.set_line_width(1)
cr.set_source_rgba(*alpha_color_hex_to_cairo((self.tab_unselect_frame_color.get_color(), 1.0)))
cr.rectangle(1, 1, tab_title_width, self.tab_height)
cr.stroke()
for (index, width) in enumerate(self.tab_title_widths[:-1]):
cr.set_source_rgba(*alpha_color_hex_to_cairo((self.tab_unselect_frame_color.get_color(), 1.0)))
cr.rectangle(1 + sum(self.tab_title_widths[0:index]) + width,
1,
1,
self.tab_height)
cr.fill()
cr.set_source_rgb(*color_hex_to_cairo(self.tab_select_frame_color.get_color()))
cr.rectangle(0,
rect.height - 1,
sum(self.tab_title_widths[0:self.tab_index]),
1)
cr.fill()
cr.set_source_rgb(*color_hex_to_cairo(self.tab_select_frame_color.get_color()))
cr.rectangle(1 + sum(self.tab_title_widths[0:self.tab_index]),
rect.height - 1,
rect.width - sum(self.tab_title_widths[0:self.tab_index]),
1)
cr.fill()
for (index, item) in enumerate(self.tab_items):
# Draw title background.
title = item[0]
# Draw title tab.
with cairo_disable_antialias(cr):
if index == self.tab_index:
# Draw title select tab.
cr.set_source_rgba(*alpha_color_hex_to_cairo((self.tab_select_bg_color.get_color(), 0.93)))
if index == 0:
cr.rectangle(sum(self.tab_title_widths[0:index]),
1,
self.tab_title_widths[index] + 1,
self.tab_height)
else:
cr.rectangle(1 + sum(self.tab_title_widths[0:index]),
1,
self.tab_title_widths[index],
self.tab_height)
cr.fill()
if index == 0:
cr.rectangle(0,
0,
rect.width,
self.tab_height)
cr.clip()
cr.set_line_width(1)
cr.set_source_rgb(*color_hex_to_cairo(self.tab_select_frame_color.get_color()))
if index == 0:
cr.rectangle(sum(self.tab_title_widths[0:index]),
1,
self.tab_title_widths[index] + 2,
self.tab_height)
else:
cr.rectangle(1 + sum(self.tab_title_widths[0:index]),
1,
self.tab_title_widths[index] + 1,
self.tab_height)
cr.stroke()
draw_text(cr, title,
sum(self.tab_title_widths[0:index]) + self.tab_padding_x,
self.tab_padding_y,
self.tab_title_widths[index] - self.tab_padding_x * 2,
self.tab_height - self.tab_padding_y * 2,
)
# Draw close button.
if self.can_close_tab:
button_x = sum(self.tab_title_widths[0:index + 1]) - self.close_button_padding_x - self.close_button_size
button_y = self.close_button_padding_y
if self.hover_close_button_index == index:
cr.set_source_rgb(*color_hex_to_cairo(self.close_button_select_background_color))
draw_round_rectangle(
cr,
button_x - self.close_button_frame_size,
button_y - self.close_button_frame_size,
self.close_button_size + self.close_button_frame_size * 2,
self.close_button_size + self.close_button_frame_size * 2,
2
)
cr.fill()
cr.set_line_width(1.5)
if self.hover_close_button_index == index:
cr.set_source_rgb(*color_hex_to_cairo(self.close_button_select_foreground_color))
else:
cr.set_source_rgb(*color_hex_to_cairo(self.close_button_color))
cr.move_to(button_x, button_y)
cr.line_to(button_x + self.close_button_size, button_y + self.close_button_size)
cr.stroke()
cr.move_to(button_x + self.close_button_size, button_y)
cr.line_to(button_x, button_y + self.close_button_size)
cr.stroke()
else:
cr.set_source_rgba(*alpha_color_hex_to_cairo((self.tab_select_bg_color.get_color(), 0.93)))
cr.rectangle(0, 0, rect.width, rect.height)
cr.fill()
def expose_tab_content_align(self, widget, event):
'''
Internal function to `expose-event` signal.
'''
cr = widget.window.cairo_create()
rect = widget.allocation
with cairo_disable_antialias(cr):
cr.rectangle(rect.x, rect.y, sum(self.tab_title_widths[0:self.tab_index]), rect.height)
cr.rectangle(rect.x + sum(self.tab_title_widths[0:self.tab_index + 1]),
rect.y,
rect.width - sum(self.tab_title_widths[0:self.tab_index + 1]),
rect.height)
cr.clip()
cr.set_source_rgb(*color_hex_to_cairo(self.tab_select_frame_color.get_color()))
cr.rectangle(rect.x, rect.y, rect.width, rect.height)
cr.stroke()
def expose_tab_content_box(self, widget, event):
'''
Internal function to `expose-event` signal.
'''
# Init.
cr = widget.window.cairo_create()
rect = widget.allocation
self.tab_box_width = rect.width
# Draw background.
toplevel = widget.get_toplevel()
coordinate = widget.translate_coordinates(toplevel, 0, 0)
(offset_x, offset_y) = coordinate
with cairo_state(cr):
cr.rectangle(rect.x, rect.y, rect.width, rect.height)
cr.clip()
(shadow_x, shadow_y) = get_window_shadow_size(self.get_toplevel())
skin_config.render_background(cr, self, shadow_x, shadow_y)
# Draw mask.
cr.set_source_rgba(*alpha_color_hex_to_cairo((self.tab_select_bg_color.get_color(), 0.93)))
cr.rectangle(rect.x, rect.y, rect.width, rect.height)
cr.fill()
gobject.type_register(TabBox)
class TabWindow(DialogBox):
'''
TabWindow class.
@undocumented: click_confirm_button
@undocumented: click_cancel_button
@undocumented: switched_tab
'''
def __init__(self, title, items,
confirm_callback=None,
cancel_callback=None,
window_width=458,
window_height=472,
dockfill=False,
current_tab_index=-1,
):
'''
Initialize TabWindow clas.
@param title: Tab window title.
@param items: A list of tab item, tab item format: (tab_name, tab_widget)
@param confirm_callback: Callback when user click ok button.
@param cancel_callback: Callback when user click cancel button.
@param window_width: Default window width.
@param window_height: Default window height.
@param dockfill: Fill the tab items
@param current_tab_index: The index of current tab, default is -1.
'''
DialogBox.__init__(self,
title,
window_width,
window_height,
mask_type=DIALOG_MASK_TAB_PAGE)
self.confirm_callback = confirm_callback
self.cancel_callback = cancel_callback
self.window_box = gtk.VBox()
self.tab_window_width = window_width
self.tab_window_height = window_height
self.tab_box = TabBox(can_close_tab=True,
dockfill=dockfill,
current_tab_index=current_tab_index)
self.tab_box.add_items(items)
self.tab_box.connect("switch-tab", self.switched_tab)
self.tab_align = gtk.Alignment()
self.tab_align.set(0.5, 0.5, 1.0, 1.0)
self.tab_align.set_padding(8, 0, 0, 0)
self.tab_align.add(self.tab_box)
self.confirm_button = Button(_("OK"))
self.cancel_button = Button(_("Cancel"))
self.window_box.pack_start(self.tab_align, True, True)
self.confirm_button.connect("clicked", lambda w: self.click_confirm_button())
self.cancel_button.connect("clicked", lambda w: self.click_cancel_button())
self.connect("destroy", lambda w: self.destroy())
self.body_box.pack_start(self.window_box, True, True)
self.right_button_box.set_buttons([self.confirm_button, self.cancel_button])
def set_current_tab(self, index):
'''
Set current tab with given index.
@param index: The index of current tab.
'''
self.tab_box.switch_content(index)
def switched_tab(self, widget, index):
pass
def click_confirm_button(self):
'''
Internal function to response when user click confirm button.
'''
if self.confirm_callback != None:
self.confirm_callback()
self.destroy()
def click_cancel_button(self):
'''
Internal function to response when user click cancel button.
'''
if self.cancel_callback != None:
self.cancel_callback()
self.destroy()
gobject.type_register(TabWindow)
```
#### File: dtk/ui/timeline.py
```python
import gobject
import math
CURVE_LINEAR = lambda x: x
CURVE_SINE = lambda x: math.sin(math.pi / 2 * x)
FRAMERATE = 30.0
class Timeline(gobject.GObject):
'''
Timeline class.
'''
__gtype_name__ = 'Timeline'
__gsignals__ = {
'start': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
'update': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_FLOAT,)),
'stop': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
'completed': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self,
duration,
curve,
):
'''
Initialize Timeline class.
@param duration: Animation duration.
@param curve: Animation curve.
'''
gobject.GObject.__init__(self)
self.duration = duration
self.curve = curve
self._states = []
self._stopped = False
self._started = False
def run(self):
'''
Run.
'''
n_frames = (self.duration / 1000.0) * FRAMERATE
while len(self._states) <= n_frames:
self._states.append(self.curve(len(self._states) * (1.0 / n_frames)))
self._states.reverse()
self._started = True
gobject.timeout_add(int(self.duration / n_frames), self.update)
def stop(self):
'''
Stop.
'''
self._stopped = True
self._started = False
def update(self):
'''
Update.
'''
if self._started:
self.emit("start")
self._started = False
if self._stopped:
self.emit('stop')
return False
else:
self.emit('update', self._states.pop())
if len(self._states) == 0:
self.emit('completed')
return False
return True
gobject.type_register(Timeline)
```
#### File: dtk/ui/tooltip_test.py
```python
import pseudo_skin
import gtk
from color_selection import ColorButton
from gtk import gdk
import tooltip as TT
__all__ = []
def customTooltip_cb():
box = gtk.VBox()
#box.set_size_request(800, 400)
b = gtk.Button("abcdsdf")
l = gtk.Label("huhuhuhuhuhulabellooooooooooooooooooooooooooooooooooooooooooooA")
#b.connect('destroy', show_d)
#l.connect('destroy', show_d)
box.add(b)
box.add(l)
return box
def show_d(w, e):
print "destroing..", type(w), id(w)
def gen_control(widget):
box = gtk.VBox()
t = gtk.CheckButton("NeedShadow")
t.set_active(True)
t.connect('toggled', lambda w: TT.has_shadow(widget, w.get_active()))
box.pack_start(t, False, False)
TT.text(t, "toggle the shadow")
winfo = TT.WidgetInfo.get_info(widget)
t1 = gtk.Entry()
t1.set_text(winfo.text or "")
t1.connect('activate', lambda w: TT.text(widget, w.get_text()))
box.pack_start(t1, False, False)
t2 = gtk.SpinButton()
t2.set_range(0, 10)
t2.set_value((winfo.show_delay / 1000))
t2.connect('value-changed', lambda w: TT.show_delay(widget, w.get_value_as_int() * 1000 + 100))
box.pack_start(t2, False, False)
t3 = ColorButton()
t3.set_color(str(winfo.background))
t3.connect('color-select', lambda w, e: TT.background(widget, gtk.gdk.Color(w.get_color())))
box.pack_start(t3)
t4 = gtk.SpinButton()
t4.set_range(0, 100)
t4.set_value(winfo.padding_r)
t4.connect('value-changed', lambda w: TT.padding(widget, -1, -1, -1, w.get_value()))
box.pack_start(t4, False, False)
t5 = gtk.CheckButton("disable")
t5.set_active(False)
t5.connect('toggled', lambda w: TT.disable(widget, w.get_active()))
box.pack_start(t5, False, False)
#----------------------------------------------------------------------#
TT.text(t1, "The text value if tooltip didn't has custom property")\
(t2, "The show delay value")\
(t3, "The background color")\
(t4, "The pading right value")\
(t5, "tmp disable tooltip")\
.show_delay([t1,t2,t3,t4,t5], 200)\
.background([t1,t2], gdk.Color("red"))\
.set_value([t1,t2,t3,t4,t5], {'text_kargs': {"text_size":15}})
#_____________________________________________________________________#
return box
w = gtk.Window()
w.set_size_request(500, 500)
box = gtk.VBox()
b1 = gtk.Button("button")
b2 = gtk.Button("button1")
ls = gtk.HBox()
l1 = gtk.Label("label1")
l2 = gtk.Label("label2")
ls.add(l1)
ls.add(l2)
#----------------------how to use tooltip api-------------------------#
TT.show_delay([b1,b2,l1,l2], 1000)\
.background(b1, gdk.Color("yellow"))(b2, gdk.Color("#95BE0D"))(l1,gdk.Color("blue"))\
.custom(b1, customTooltip_cb)\
.text([l1, l2], "tooliiiiit")(b2, "button2222", enable_gaussian=True)\
.padding(l1, -1, -1, -1, 50)(b2, -1, -1, -1, 0)(b1, 0, 50, 50, 50)
#_____________________________________________________________________#
b1c = gen_control(b1)
b = gtk.HBox()
b.add(b1)
b.pack_start(b1c, False)
box.pack_start(b)
b2c = gen_control(b2)
b = gtk.HBox()
b.add(b2)
b.pack_start(b2c, False)
box.pack_start(b)
lc = gen_control(l1)
b = gtk.HBox()
b.add(ls)
b.pack_start(lc, False)
box.pack_start(b)
w.add(box)
w.connect('destroy', gtk.main_quit)
w.show_all()
gtk.main()
#run_with_profile(gtk.main, '/dev/shm/ttt')
```
#### File: dtk/ui/utils.py
```python
from deepin_utils import core, file, process, ipc, date_time, net
from deepin_utils.core import merge_list
from contextlib import contextmanager
import cairo
import gobject
import gtk
import gio
import os
import pango
import pangocairo
import traceback
import sys
import time
from constant import (WIDGET_POS_TOP_LEFT, WIDGET_POS_TOP_RIGHT,
WIDGET_POS_TOP_CENTER, WIDGET_POS_BOTTOM_LEFT,
WIDGET_POS_BOTTOM_CENTER, WIDGET_POS_BOTTOM_RIGHT,
WIDGET_POS_LEFT_CENTER, WIDGET_POS_RIGHT_CENTER,
WIDGET_POS_CENTER, DEFAULT_FONT, COLOR_NAME_DICT,
BLACK_COLOR_MAPPED, WHITE_COLOR_MAPPED, SIMILAR_COLOR_SEQUENCE,
DEFAULT_FONT_SIZE)
def repeat(msg, num):
return ' '.join([msg] * num)
def get_entry_text(entry):
'''
Get text of entry.
@param entry: Gtk.Entry instance.
@return: Return text of entry.
'''
return entry.get_text().split(" ")[0]
def set_cursor(cursor_widget, cursor_type=None):
'''
Set cursor type with given widget.
@param cursor_widget: Gtk.Widget or Gdk.Window instance.
@param cursor_type: The cursor type of gtk.gdk.Cursor, please set with None if you want reset widget's cursor as default.
@return: Always return False
'''
if isinstance(cursor_widget, gtk.Widget):
cursor_window = cursor_widget.window
elif isinstance(cursor_widget, gtk.gdk.Window):
cursor_window = cursor_widget
else:
print "set_cursor: impossible!"
if cursor_type == None:
cursor_window.set_cursor(None)
else:
cursor_window.set_cursor(gtk.gdk.Cursor(cursor_type))
return False
def set_clickable_cursor(widget):
'''
Show gtk.gdk.HAND2 cursor when mouse hover widget.
@param widget: Gtk.Widget instance.
'''
set_hover_cursor(widget, gtk.gdk.HAND2)
def set_hover_cursor(widget, cursor_type):
'''
Set cursor type when mouse hover widget.
@param widget: Gtk.Widget instance.
@param cursor_type: The cursor type of gtk.gdk.Cursor.
'''
widget.connect("enter-notify-event", lambda w, e: set_cursor(w, cursor_type))
widget.connect("leave-notify-event", lambda w, e: set_cursor(w))
def get_widget_root_coordinate(widget, pos_type=WIDGET_POS_BOTTOM_CENTER, translate_coordinate=True):
'''
Get root coordinate with given widget.
@param widget: Gtk.Widget instance.
@param pos_type: The position of widget's area, you can set with below constants:
- WIDGET_POS_TOP_LEFT
- WIDGET_POS_TOP_RIGHT
- WIDGET_POS_TOP_CENTER
- WIDGET_POS_BOTTOM_LEFT
- WIDGET_POS_BOTTOM_RIGHT
- WIDGET_POS_BOTTOM_CENTER
- WIDGET_POS_LEFT_CENTER
- WIDGET_POS_RIGHT_CENTER
- WIDGET_POS_CENTER
@return: Return (x, y) as root coordination.
'''
# Get coordinate.
(wx, wy) = widget.window.get_origin()
toplevel_window = widget.get_toplevel()
if translate_coordinate and toplevel_window:
'''
FIXME: translate_coordinates wrong toward ComboBox
'''
(x, y) = widget.translate_coordinates(toplevel_window, wx, wy)
else:
(x, y) = (wx, wy)
# Get offset.
rect = widget.allocation
if pos_type == WIDGET_POS_TOP_LEFT:
offset_x = 0
offset_y = 0
elif pos_type == WIDGET_POS_TOP_RIGHT:
offset_x = rect.width
offset_y = 0
elif pos_type == WIDGET_POS_TOP_CENTER:
offset_x = rect.width / 2
offset_y = 0
elif pos_type == WIDGET_POS_BOTTOM_LEFT:
offset_x = 0
offset_y = rect.height
elif pos_type == WIDGET_POS_BOTTOM_RIGHT:
offset_x = rect.width
offset_y = rect.height
elif pos_type == WIDGET_POS_BOTTOM_CENTER:
offset_x = rect.width / 2
offset_y = rect.height
elif pos_type == WIDGET_POS_LEFT_CENTER:
offset_x = 0
offset_y = rect.height / 2
elif pos_type == WIDGET_POS_RIGHT_CENTER:
offset_x = rect.width
offset_y = rect.height / 2
elif pos_type == WIDGET_POS_CENTER:
offset_x = rect.width / 2
offset_y = rect.height / 2
return (x + offset_x, y + offset_y)
def get_event_root_coords(event):
'''
Get root coordinate with given event.
@param event: Gdk.Event instance, general, we get event instance from gtk signal callback.
@return: Return (x, y) as event's root coordination.
'''
(rx, ry) = event.get_root_coords()
return (int(rx), int(ry))
def get_event_coords(event):
'''
Get coordinate with given event.
@param event: Gdk.Event instance, general, we get event instance from gtk signal callback.
@return: Return (x, y) as event's coordination.
'''
(rx, ry) = event.get_coords()
return (int(rx), int(ry))
def propagate_expose(widget, event):
'''
Propagate expose to children.
General, this function use at last position of `expose_event` callback to make child redraw after parent widget.
And you must put \"return True\" after \"propagate_expose(widget, event)\".
Example:
>>> def expose_event_callback(widget, event):
>>> # Do something.
>>>
>>> propagate_expose(widget, event)
>>> return True
@param widget: Gtk.Container instance.
This function do nothing if widget is not Gtk.Container instance or haven't any child widget.
@param event: Gdk.Event instance.
'''
if hasattr(widget, "get_child") and widget.get_child() != None:
widget.propagate_expose(widget.get_child(), event)
def move_window(widget, event, window):
'''
Move window with given widget and event.
This function generic use for move window when mouse drag on target widget.
@param widget: Gtk.Widget instance to drag.
@param event: Gdk.Event instance, generic, event come from gtk signal callback.
@param window: Gtk.Window instance.
'''
if is_left_button(event):
widget.set_can_focus(True)
widget.grab_focus()
window.begin_move_drag(
event.button,
int(event.x_root),
int(event.y_root),
event.time)
return False
def resize_window(widget, event, window, edge):
'''
Resize window with given widget and event.
This function generic use for resize window when mouse drag on target widget.
@param widget: Gtk.Widget instance to drag.
@param event: Gdk.Event instance, generic, event come from gtk signal callback.
@param window: Gtk.Window instance.
'''
if is_left_button(event):
window.begin_resize_drag(
edge,
event.button,
int(event.x_root),
int(event.y_root),
event.time)
return False
def add_in_scrolled_window(scrolled_window, widget, shadow_type=gtk.SHADOW_NONE):
'''
Add widget in scrolled_window.
Wrap function `add_with_viewport` with shadow type of Gtk.Viewport.
@param scrolled_window: Gtk.ScrolledWindow instance.
@param widget: Gtk.Widget instance.
@param shadow_type: Shadow type of Viewport, default is gtk.SHADOW_NONE.
'''
scrolled_window.add_with_viewport(widget)
viewport = scrolled_window.get_child()
if viewport != None:
viewport.set_shadow_type(shadow_type)
else:
print "add_in_scrolled_window: Impossible, no viewport widget in ScrolledWindow!"
def is_single_click(event):
'''
Whether an event is single click event.
@param event: gtk.gdk.BUTTON_PRESS event.
@return: Return True if event is single click event.
'''
return event.button == 1 and event.type == gtk.gdk.BUTTON_PRESS
def is_double_click(event):
'''
Whether an event is double click event.
@param event: gtk.gdk.BUTTON_PRESS event.
@return: Return True if event is double click event.
'''
return event.button == 1 and event.type == gtk.gdk._2BUTTON_PRESS
def is_left_button(event):
'''
Whether event is left button event.
@param event: gtk.gdk.BUTTON_PRESS event.
@return: Return True if event is left button event.
'''
return event.button == 1
def is_right_button(event):
'''
Whether event is right button event.
@param event: gtk.gdk.BUTTON_PRESS event.
@return: Return True if event is right button event.
'''
return event.button == 3
def is_middle_button(event):
'''
Whether event is middle button event.
@param event: gtk.gdk.BUTTON_PRESS event.
@return: Return True if event is middle button event.
'''
return event.button == 2
def foreach_container(widget, callback):
'''
Make callback call for all children of widget.
@param widget: Gtk.Container instance.
@param callback: Callback.
'''
callback(widget)
if isinstance(widget, gtk.Container):
foreach_recursive(widget, callback)
def foreach_recursive(container, callback):
'''
Helper function for L{ I{foreach_container} <foreach_container>}.
@param container: Gtk.Container instance.
@param callback: Callback.
'''
container.foreach(lambda w: foreach_container(w, callback))
def container_remove_all(container):
'''
Handy function to remove all children widget from container.
@param container: Gtk.Container instance.
'''
container.foreach(lambda widget: container.remove(widget))
def get_screen_size(widget):
'''
Get screen size from the toplevel window associated with widget.
@param widget: Gtk.Widget instance.
@return: Return screen size as (screen_width, screen_height)
'''
screen = widget.get_screen()
width = screen.get_width()
height = screen.get_height()
return (width, height)
def is_in_rect((tx, ty), rect):
'''
Whether target coordinate in given rectangle.
@param tx: Target x coordinate.
@param ty: Target y coordinate.
@param rect: The rectangle to test.
@return: Return True if target coordinate in given rectangle.
'''
if isinstance(rect, gtk.gdk.Rectangle):
x, y, w, h = rect.x, rect.y, rect.width, rect.height
else:
x, y, w, h = rect
return (tx >= x and tx <= x + w and ty >= y and ty <= y + h)
def scroll_to_top(scrolled_window):
'''
Scroll scrolled_window to top position.
@param scrolled_window: Gtk.ScrolledWindow instance.
'''
scrolled_window.get_vadjustment().set_value(0)
def scroll_to_bottom(scrolled_window):
'''
Scroll scrolled_window to bottom position.
@param scrolled_window: Gtk.ScrolledWindow instance.
'''
vadjust = scrolled_window.get_vadjustment()
vadjust.set_value(vadjust.get_upper() - vadjust.get_page_size())
def get_content_size(text, text_size=DEFAULT_FONT_SIZE, text_font=DEFAULT_FONT, wrap_width=None):
'''
Get text size, in pixel.
@param text: String or markup string.
@param text_size: Text size, in pixel.
@param text_font: Text font.
@param wrap_width: The width of wrap rule, default don't wrap.
@return: Return text size as (text_width, text_height), return (0, 0) if occur error.
'''
if text:
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 0, 0) # don't need give size
cr = cairo.Context(surface)
context = pangocairo.CairoContext(cr)
layout = context.create_layout()
layout.set_font_description(pango.FontDescription("%s %s" % (text_font, text_size)))
layout.set_markup(text)
if wrap_width == None:
layout.set_single_paragraph_mode(True)
else:
layout.set_width(wrap_width * pango.SCALE)
layout.set_single_paragraph_mode(False)
layout.set_wrap(pango.WRAP_WORD)
return layout.get_pixel_size()
else:
return (0, 0)
def get_os_version():
'''
Get OS version with command `lsb_release -i`.
@return: Return OS version string.
'''
version_infos = get_command_output_first_line(["lsb_release", "-i"]).split()
if len(version_infos) > 0:
return version_infos[-1]
else:
return ""
def print_env():
'''
Print environment variable.
'''
for param in os.environ.keys():
print "*** %20s %s" % (param,os.environ[param])
def get_font_families():
'''
Get all font families in system.
@return: Return font families list in current system.
'''
fontmap = pangocairo.cairo_font_map_get_default()
return map (lambda f: f.get_name(), fontmap.list_families())
def add_color_stop_rgba(pat, pos, color_info):
'''
Add color stop as rgba format.
@param pat: Pattern.
@param pos: Stop position.
@param color_info: (color, alpha), color is hex value, alpha value range: [0, 1]
'''
# Pick color.
(color, alpha) = color_info
(r, g, b) = color_hex_to_cairo(color)
pat.add_color_stop_rgba(pos, r, g, b, alpha)
def alpha_color_hex_to_cairo((color, alpha)):
'''
Convert alpha color (color, alpha) to cairo color (r, g, b, alpha).
@param color: Hex color.
@param alpha: Alpha value.
@return: Return cairo value (red, green, blue, alpha).
'''
(r, g, b) = color_hex_to_cairo(color)
return (r, g, b, alpha)
def color_hex_to_rgb(color):
'''
Convert hex color to cairo color (r, g, b).
@param color: Hex color value.
@return: Return cairo value, (red, green, blue)
'''
if color[0] == '#':
color = color[1:]
return (int(color[:2], 16), int(color[2:4], 16), int(color[4:], 16))
def color_hex_to_cairo(color):
'''
Convert a HTML (hex) RGB value to cairo color.
@param color: The color to convert.
@return: A color in cairo format, (red, green, blue).
'''
gdk_color = gtk.gdk.color_parse(color)
return (gdk_color.red / 65535.0, gdk_color.green / 65535.0, gdk_color.blue / 65535.0)
def color_rgb_to_hex(rgb_color):
'''
Convert cairo color to hex color.
@param rgb_color: (red, green, blue)
@return: Return hex color.
'''
return "#%02X%02X%02X" % rgb_color
def color_rgb_to_cairo(color):
'''
Convert a 8 bit RGB value to cairo color.
@type color: a triple of integers between 0 and 255
@param color: The color to convert.
@return: A color in cairo format.
'''
return (color[0] / 255.0, color[1] / 255.0, color[2] / 255.0)
def get_match_parent(widget, match_types):
'''
Get parent widget match given type.
@param widget: Gtk.Widget instance.
@param match_types: A list gtk widget types.
@return: Return first parent widget match with given types.
Return None if nothing match.
'''
parent = widget.get_parent()
if parent == None:
return None
elif type(parent).__name__ in match_types:
return parent
else:
return get_match_parent(parent, match_types)
def get_match_children(widget, child_type):
'''
Get all child widgets that match given widget type.
@param widget: The container to search.
@param child_type: The widget type of search.
@return: Return all child widgets that match given widget type, or return empty list if nothing to find.
'''
child_list = widget.get_children()
if child_list:
match_widget_list = filter(lambda w: isinstance(w, child_type), child_list)
match_children = (merge_list(map(
lambda w: get_match_children(w, child_type),
filter(
lambda w: isinstance(w, gtk.Container),
child_list))))
return match_widget_list + match_children
else:
return []
def widget_fix_cycle_destroy_bug(widget):
'''
Fix bug that PyGtk destroys cycle too early.
@param widget: Gtk.Widget instance.
'''
# This code to fix PyGtk bug <<Pygtk destroys cycle too early>>,
# The cycle is wrongly freed
# by Python's GC because Pygobject does not tell Python that the widget's
# wrapper object is referenced by the underlying GObject. As you have
# found, in order to break the cycle Python zeros out the callback
# closure's captured free variables, which is what causes the "referenced
# before assignment" exception.
# detail see: https://bugzilla.gnome.org/show_bug.cgi?id=546802 .
#
# Otherwise, will got error : "NameError: free variable 'self' referenced before assignment in enclosing scope".
widget.__dict__
def get_same_level_widgets(widget):
'''
Get same type widgets that in same hierarchy level.
@param widget: Gtk.Widget instance to search.
@return: Return a list that type match given widget at same hierarchy level.
'''
parent = widget.get_parent()
if parent == None:
return []
else:
return filter(lambda w:type(w).__name__ == type(widget).__name__, parent.get_children())
def window_is_max(widget):
'''
Whether window is maximized.
@param widget: Gtk.Widget instance.
@return: Return True if widget's toplevel window is maximized.
'''
toplevel_window = widget.get_toplevel()
if toplevel_window.window.get_state() & gtk.gdk.WINDOW_STATE_MAXIMIZED == gtk.gdk.WINDOW_STATE_MAXIMIZED:
return True
else:
return False
@contextmanager
def cairo_state(cr):
'''
Protected cairo context state for operate cairo safety.
@param cr: Cairo context.
'''
cr.save()
try:
yield
except Exception, e:
print 'function cairo_state got error: %s' % e
traceback.print_exc(file=sys.stdout)
else:
cr.restore()
@contextmanager
def cairo_disable_antialias(cr):
'''
Disable cairo antialias temporary.
@param cr: Cairo context.
'''
# Save antialias.
antialias = cr.get_antialias()
cr.set_antialias(cairo.ANTIALIAS_NONE)
try:
yield
except Exception, e:
print 'function cairo_disable_antialias got error: %s' % e
traceback.print_exc(file=sys.stdout)
else:
# Restore antialias.
cr.set_antialias(antialias)
def remove_timeout_id(callback_id):
'''
Remove callback id.
@param callback_id: Callback id.
'''
if callback_id:
gobject.source_remove(callback_id)
callback_id = None
def remove_signal_id(signal_id):
'''
Remove signal id.
@param signal_id: Signal id that return by function gobject.connect.
'''
if signal_id:
(signal_object, signal_handler_id) = signal_id
if signal_object.handler_is_connected(signal_handler_id):
signal_object.disconnect(signal_handler_id)
signal_id = None
def print_callback_args(*args):
'''
Print callback arguments.
Usage:
>>> some_widget.connect(\"signal\", print_callback_args)
'''
print "Print callback argument: %s" % (args)
def enable_shadow(widget):
'''
Whether widget is support composited.
@param widget: Gtk.Widget instance.
@return: Return True if widget is support composited.
'''
return widget.is_composited()
def rgb2hsb(r_value, g_value, b_value):
'''
Convert color from RGB to HSB format.
@param r_value: Red.
@param g_value: Green.
@param b_value: Blue.
@return: Return color with HSB (h, s, b) format.
'''
r = r_value
g = g_value
b = b_value
max_v = max(r, g, b)
min_v = min(r, g, b)
h = 0.0
if max_v == min_v:
h = 0
elif max_v == r and g >= b:
h = 60 * (g - b) / (max_v - min_v)
elif max_v == r and g < b:
h = 60 * (g - b) / (max_v - min_v) + 360
elif max_v == g:
h = 60 * (b - r) / (max_v - min_v) + 120
elif max_v == b:
h = 60 * (r - g) / (max_v - min_v) + 240
if max_v == 0:
s = 0.0
else:
s = 1.0 - min_v / max_v
b = max_v
return (h, s, b)
def find_similar_color(search_color):
'''
Find similar color match search_color.
@param search_color: Color to search.
@return: Return similar color name and value, (color_name, color_value).
'''
(search_h, search_s, search_b) = rgb2hsb(*color_hex_to_cairo(search_color))
hsb_colors = map(lambda name: (name, rgb2hsb(*color_hex_to_cairo(COLOR_NAME_DICT[name]))), SIMILAR_COLOR_SEQUENCE)
# Debug.
# print (search_h, search_s, search_b)
similar_color_name = None
similar_color_value = None
# Return black color if brightness (height) < 0.35
if search_b < 0.35:
similar_color_name = BLACK_COLOR_MAPPED
# Return white color if saturation (radius) < 0.05
elif search_s < 0.05:
similar_color_name = WHITE_COLOR_MAPPED
# Otherwise find nearest color in hsb color space.
else:
min_color_distance = None
for (color_name, (h, s, b)) in hsb_colors:
color_distance = abs(h - search_h)
if min_color_distance == None or color_distance < min_color_distance:
min_color_distance = color_distance
similar_color_name = color_name
similar_color_value = COLOR_NAME_DICT[similar_color_name]
return (similar_color_name, similar_color_value)
def place_center(refer_window, place_window):
'''
Place place_window in center of refer_window.
@param refer_window: Reference window.
@param place_window: Place window.
'''
self_size = place_window.get_size()
refer_window_pos = refer_window.get_position()
refer_window_rect = refer_window.get_allocation()
place_window.move(
(refer_window_pos[0] + refer_window_rect.width / 2) - (self_size[0] / 2),
(refer_window_pos[1] + refer_window_rect.height / 2) - (self_size[1] / 2))
def get_system_icon_info(icon_theme="Deepin", icon_name="NULL", size=48):
'''
Get system level icon info
@param icon_theme: Gtk Icon Theme, for example, Deepin
@param icon_name: the name of the icon to lookup, for example, preferences-power
@param size: desired icon size, for example, 48
'''
__icon_theme = gtk.IconTheme()
__icon_theme.set_custom_theme(icon_theme)
return __icon_theme.lookup_icon(icon_name, size, gtk.ICON_LOOKUP_NO_SVG)
def get_pixbuf_support_formats():
'''
Get formats that support by pixbuf.
@return: Return formats that support by pixbuf.
'''
support_formats = []
for support_format in gtk.gdk.pixbuf_get_formats():
support_formats += support_format.get("extensions")
return support_formats
def gdkcolor_to_string(gdkcolor):
'''
Gdk color to string.
@param gdkcolor: Gdk.Color
@return: Return string of gdk color.
'''
return "#%0.2X%0.2X%0.2X" % (gdkcolor.red / 256, gdkcolor.green / 256, gdkcolor.blue / 256)
def get_window_shadow_size(window):
'''
Get window shadow size.
@param window: Test window.
@return: Return shadow size as (width, height), or return (0, 0) if window haven't shadow.
'''
if hasattr(window, "get_shadow_size"):
return window.get_shadow_size()
else:
return (0, 0)
def get_resize_pixbuf_with_height(filepath, expect_height):
pixbuf = gtk.gdk.pixbuf_new_from_file(filepath)
if pixbuf.get_height() > expect_height:
return pixbuf.scale_simple(
int(float(expect_height) / pixbuf.get_height() * pixbuf.get_width()),
expect_height,
gtk.gdk.INTERP_BILINEAR)
else:
return pixbuf
def get_optimum_pixbuf_from_pixbuf(pixbuf, expect_width, expect_height, cut_middle_area=True):
pixbuf_width, pixbuf_height = pixbuf.get_width(), pixbuf.get_height()
if pixbuf_width >= expect_width and pixbuf_height >= expect_height:
if float(pixbuf_width) / pixbuf_height == float(expect_width) / expect_height:
scale_width, scale_height = expect_width, expect_height
elif float(pixbuf_width) / pixbuf_height > float(expect_width) / expect_height:
scale_height = expect_height
scale_width = int(float(pixbuf_width) * expect_height / pixbuf_height)
else:
scale_width = expect_width
scale_height = int(float(pixbuf_height) * expect_width / pixbuf_width)
if cut_middle_area:
subpixbuf_x = (scale_width - expect_width) / 2
subpixbuf_y = (scale_height - expect_height) / 2
else:
subpixbuf_x = 0
subpixbuf_y = 0
return pixbuf.scale_simple(
scale_width,
scale_height,
gtk.gdk.INTERP_BILINEAR).subpixbuf(subpixbuf_x,
subpixbuf_y,
expect_width,
expect_height)
elif pixbuf_width >= expect_width:
scale_width = expect_width
scale_height = int(float(expect_width) * pixbuf_height / pixbuf_width)
if cut_middle_area:
subpixbuf_x = (scale_width - expect_width) / 2
subpixbuf_y = max((scale_height - expect_height) / 2, 0)
else:
subpixbuf_x = 0
subpixbuf_y = 0
return pixbuf.scale_simple(
scale_width,
scale_height,
gtk.gdk.INTERP_BILINEAR).subpixbuf(subpixbuf_x,
subpixbuf_y,
expect_width,
min(expect_height, scale_height))
elif pixbuf_height >= expect_height:
scale_width = int(float(expect_height) * pixbuf_width / pixbuf_height)
scale_height = expect_height
if cut_middle_area:
subpixbuf_x = max((scale_width - expect_width) / 2, 0)
subpixbuf_y = (scale_height - expect_height) / 2
else:
subpixbuf_x = 0
subpixbuf_y = 0
return pixbuf.scale_simple(
scale_width,
scale_height,
gtk.gdk.INTERP_BILINEAR).subpixbuf(subpixbuf_x,
subpixbuf_y,
min(expect_width, scale_width),
expect_height)
else:
return pixbuf
def get_optimum_pixbuf_from_file(filepath, expect_width, expect_height, cut_middle_area=True):
'''
Get optimum size pixbuf from file.
@param filepath: Filepath to contain image.
@param expect_width: Expect width.
@param expect_height: Expect height.
@param cut_middle_area: Default cut image with middle area.
@return: Return optimum size pixbuf with expect size.
'''
pixbuf = gtk.gdk.pixbuf_new_from_file(filepath)
return get_optimum_pixbuf_from_pixbuf(pixbuf, expect_width, expect_height, cut_middle_area)
def unique_print(text):
'''
Unique print, generic for test code.
@param text: Test text.
'''
print "%s: %s" % (time.time(), text)
def invisible_window(window):
'''
Make window invisible.
We use this function for global event that to hide event window.
'''
def shape_window(widget, rect):
w, h = rect.width, rect.height
bitmap = gtk.gdk.Pixmap(None, w, h, 1)
cr = bitmap.cairo_create()
cr.set_source_rgb(0.0, 0.0, 0.0)
cr.set_operator(cairo.OPERATOR_CLEAR)
cr.paint()
widget.shape_combine_mask(bitmap, 0, 0)
window.move(-10, -10)
window.set_default_size(0, 0)
window.set_decorated(False)
window.connect("size-allocate", shape_window)
def split_with(split_list, condition_func):
print "Please import deepin_utils.core.split_with, this function will departed in next release version."
return core.split_with(split_list, condition_func)
def create_directory(directory, remove_first=False):
print "Please import deepin_utils.file.create_directory, this function will departed in next release version."
return file.create_directory(directory, remove_first=False)
def remove_file(path):
print "Please import deepin_utils.file.remove_file, this function will departed in next release version."
return file.remove_file(path)
def remove_directory(path):
print "Please import deepin_utils.file.remove_directory, this function will departed in next release version."
return file.remove_directory(path)
def touch_file(filepath):
print "Please import deepin_utils.file.touch_file, this function will departed in next release version."
return file.touch_file(filepath)
def touch_file_dir(filepath):
print "Please import deepin_utils.file.touch_file_dir, this function will departed in next release version."
return file.touch_file_dir(filepath)
def read_file(filepath, check_exists=False):
print "Please import deepin_utils.file.read_file, this function will departed in next release version."
return file.read_file(filepath, check_exists=False)
def read_first_line(filepath, check_exists=False):
print "Please import deepin_utils.file.read_first_line, this function will departed in next release version."
return file.read_first_line(filepath, check_exists=False)
def eval_file(filepath, check_exists=False):
print "Please import deepin_utils.file.eval_file, this function will departed in next release version."
return file.eval_file(filepath, check_exists=False)
def write_file(filepath, content, mkdir=False):
print "Please import deepin_utils.file.write_file, this function will departed in next release version."
return file.write_file(filepath, content, mkdir=False)
def kill_process(proc):
print "Please import deepin_utils.process.kill_process, this function will departed in next release version."
return process.kill_process(proc)
def get_command_output_first_line(commands, in_shell=False):
print "Please import deepin_utils.process.get_command_output_first_line, this function will departed in next release version."
return process.get_command_output_first_line(commands, in_shell=False)
def get_command_output(commands, in_shell=False):
print "Please import deepin_utils.process.get_command_output, this function will departed in next release version."
return process.get_command_output(commands, in_shell=False)
def run_command(command):
print "Please import deepin_utils.process.run_command, this function will departed in next release version."
return process.run_command(command)
def get_current_time(time_format="%Y-%m-%d %H:%M:%S"):
print "Please import deepin_utils.date_time.get_current_time, this function will departed in next release version."
return date_time.get_current_time(time_format="%Y-%m-%d %H:%M:%S")
def add_in_list(e_list, element):
print "Please import deepin_utils.core.add_in_list, this function will departed in next release version."
return core.add_in_list(e_list, element)
def remove_from_list(e_list, element):
print "Please import deepin_utils.core.remove_from_list, this function will departed in next release version."
return core.remove_from_list(e_list, element)
def get_dir_size(dirname):
print "Please import deepin_utils.file.get_dir_size, this function will departed in next release version."
return file.get_dir_size(dirname)
def print_exec_time(func):
print "Please import deepin_utils.date_time.print_exec_time, this function will departed in next release version."
return date_time.print_exec_time(func)
def format_file_size(bytes, precision=2):
print "Please import deepin_utils.file.format_file_size, this function will departed in next release version."
return file.format_file_size(bytes, precision=2)
def map_value(value_list, get_value_callback):
print "Please import deepin_utils.core.map_value, this function will departed in next release version."
return core.map_value(value_list, get_value_callback)
def mix_list_max(list_a, list_b):
print "Please import deepin_utils.core.mix_list_max, this function will departed in next release version."
return core.mix_list_max(list_a, list_b)
def unzip(unzip_list):
print "Please import deepin_utils.core.unzip, this function will departed in next release version."
return core.unzip(unzip_list)
def is_seriate_list(test_list):
print "Please import deepin_utils.core.is_seriate_list, this function will departed in next release version."
return core.is_seriate_list(test_list)
def get_disperse_index(disperse_list, value):
print "Please import deepin_utils.core.get_disperse_index, this function will departed in next release version."
return core.get_disperse_index(disperse_list, value)
def last_index(test_list):
print "Please import deepin_utils.core.last_index, this function will departed in next release version."
return core.last_index(test_list)
def end_with_suffixs(filepath, suffixs):
print "Please import deepin_utils.file.end_with_suffixs, this function will departed in next release version."
return file.end_with_suffixs(filepath, suffixs)
def get_current_dir(filepath):
print "Please import deepin_utils.file.get_current_dir, this function will departed in next release version."
return file.get_current_dir(filepath)
def get_parent_dir(filepath, level=1):
print "Please import deepin_utils.file.get_parent_dir, this function will departed in next release version."
return file.get_parent_dir(filepath, level)
def is_long(string):
print "Please import deepin_utils.core.is_long, this function will departed in next release version."
return core.is_long(string)
def is_int(string):
print "Please import deepin_utils.core.is_int, this function will departed in next release version."
return core.is_int(string)
def is_float(string):
print "Please import deepin_utils.core.is_float, this function will departed in next release version."
return core.is_float(string)
def is_hex_color(string):
print "Please import deepin_utils.core.is_hex_color, this function will departed in next release version."
return core.is_hex_color(string)
def check_connect_by_port(port, retry_times=6, sleep_time=0.5):
print "Please import deepin_utils.net.check_connect_by_port, this function will departed in next release version."
return net.check_connect_by_port(port, retry_times=6, sleep_time=0.5)
def is_network_connected():
print "Please import deepin_utils.net.is_network_connected, this function will departed in next release version."
return net.is_network_connected()
def is_dbus_name_exists(dbus_name, request_session_bus=True):
print "Please import deepin_utils.ipc.is_dbus_name_exists, this function will departed in next release version."
return ipc.is_dbus_name_exists(dbus_name, request_session_bus=True)
def get_unused_port(address="localhost"):
print "Please import deepin_utils.net.get_unused_port, this function will departed in next release version."
return net.get_unused_port(address="localhost")
def file_is_image(file, filter_type=get_pixbuf_support_formats()):
gfile = gio.File(file)
try:
fileinfo = gfile.query_info('standard::type,standard::content-type')
file_type = fileinfo.get_file_type()
if file_type == gio.FILE_TYPE_REGULAR:
content_type = fileinfo.get_attribute_as_string("standard::content-type")
split_content = content_type.split("/")
if len(split_content) == 2:
if split_content[0] == "image" and split_content[1] in filter_type:
file_path = gfile.get_path()
if not file_path.endswith(".part"):
return True
except:
pass
return False
```
#### File: dtk/ui/window_base.py
```python
import cairo
import gobject
from constant import EDGE_DICT
from skin_config import skin_config
import gtk
from utils import (resize_window, is_double_click, move_window)
class WindowBase(gtk.Window):
'''
WindowBase class.
@undocumented: draw_background
@undocumented: draw_skin
@undocumented: get_cursor_type_with_coordinate
'''
__gsignals__ = {
"window-resize" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self,
window_type=gtk.WINDOW_TOPLEVEL,
):
'''
Initialize WindowBase class.
@param window_type: The window type, default is gtk.WINDOW_TOPLEVEL
'''
gtk.Window.__init__(self, window_type)
self.move_window_x = 0
self.move_window_y = 0
self.move_start_x = 0
self.move_start_y = 0
self.move_end_x = 0
self.move_end_y = 0
def show_window(self):
'''
Show the window.
'''
self.show_all()
def toggle_max_window(self):
'''
Toggle the window size between maximized size and normal size.
'''
window_state = self.window.get_state()
if window_state & gtk.gdk.WINDOW_STATE_MAXIMIZED == gtk.gdk.WINDOW_STATE_MAXIMIZED:
self.unmaximize()
else:
self.maximize()
def toggle_fullscreen_window(self):
'''
Toggle the window between fullscreen mode and normal size.
'''
window_state = self.window.get_state()
if window_state & gtk.gdk.WINDOW_STATE_FULLSCREEN == gtk.gdk.WINDOW_STATE_FULLSCREEN:
self.unfullscreen()
else:
self.fullscreen()
def close_window(self):
'''
Close the window. Send the destroy signal to the program.
@return: Always return False.
'''
# Hide window immediately when user click close button,
# user will feeling this software very quick, ;p
self.hide_all()
self.emit("destroy")
return False
def min_window(self):
'''
Minimize the window. Make it iconified.
'''
self.iconify()
def resize_window(self, widget, event):
'''
Resize the window.
@param widget: The window of type gtk.Widget.
@param event: A signal of type gtk.gdk.Event.
'''
if self.enable_resize:
edge = self.get_edge()
if edge != None:
resize_window(self, event, self, edge)
self.emit("window-resize")
def is_disable_window_maximized(self):
'''
An interface which indicates whether the window could be maximized, you should implement this function you own.
@return: Always return False.
'''
return False
def monitor_window_state(self, widget, event):
'''
Internal function to monitor window state,
add shadow when window at maximized or fullscreen status. Otherwise hide shadow.
@param widget: The window of type gtk.Widget.
@param event: The event of gtk.gdk.Event.
'''
window_state = self.window.get_state()
if (window_state & gtk.gdk.WINDOW_STATE_MAXIMIZED == gtk.gdk.WINDOW_STATE_MAXIMIZED or
window_state & gtk.gdk.WINDOW_STATE_FULLSCREEN == gtk.gdk.WINDOW_STATE_FULLSCREEN):
self.hide_shadow()
if self.is_disable_window_maximized():
self.unmaximize()
else:
self.show_shadow()
def add_motion_move_event(self, widget):
'''
Add move event callback.
@param widget: A widget of type gtk.Widget.
'''
def handle_button_press(widget, event):
(self.move_window_x, self.move_window_y) = widget.get_toplevel().window.get_origin()
(self.move_start_x, self.move_start_y) = event.x_root, event.y_root
def handle_motion_event(widget, event):
(self.move_end_x, self.move_end_y) = event.x_root, event.y_root
widget.get_toplevel().move(
int(self.move_window_x + self.move_end_x - self.move_start_x),
int(self.move_window_y + self.move_end_y - self.move_start_y),
)
widget.connect("button-press-event", handle_button_press)
widget.connect("motion-notify-event", handle_motion_event)
def add_move_event(self, widget):
'''
Add move event callback.
@param widget: A widget of type gtk.Widget.
'''
widget.connect("button-press-event", lambda w, e: move_window(w, e, self))
def add_toggle_event(self, widget):
'''
Add toggle event callback.
@param widget: A widget of type gtk.Widget.
'''
widget.connect("button-press-event", self.double_click_window)
def double_click_window(self, widget, event):
'''
Double click event handler of the window. It will maximize the window.
@param widget: A widget of type gtk.Widget.
@param event: A event of type gtk.gdk.Event.
@return: Always return False.
'''
if is_double_click(event):
self.toggle_max_window()
return False
def get_edge(self):
'''
Get the edge which the cursor is on, according to the cursor type.
@return: If there is a corresponding cursor type, an instance of gtk.gdk.WindowEdge is returned, else None is returned.
'''
if EDGE_DICT.has_key(self.cursor_type):
return EDGE_DICT[self.cursor_type]
else:
return None
def draw_background(self, cr, x, y, w, h):
cr.set_source_rgba(*self.background_color)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
def draw_skin(self, cr, x, y, w, h):
skin_config.render_background(cr, self, x, y)
def draw_mask(self, cr, x, y, w, h):
'''
Draw mask interface, you should implement this function own.
@param cr: Cairo context.
@param x: X coordinate of draw area.
@param y: Y coordinate of draw area.
@param w: Width of draw area.
@param h: Height of draw area.
'''
pass
def get_cursor_type_with_coordinate(self, ex, ey, wx, wy, ww, wh):
'''
Get cursor type with given coordinate.
'''
if self.get_resizable():
if wx <= ex <= wx + self.shadow_padding:
if wy <= ey <= wy + self.shadow_padding * 2:
return gtk.gdk.TOP_LEFT_CORNER
elif wy + wh - (self.shadow_padding * 2) <= ey <= wy + wh:
return gtk.gdk.BOTTOM_LEFT_CORNER
elif wy + self.shadow_padding < ey < wy + wh - self.shadow_padding:
return gtk.gdk.LEFT_SIDE
else:
return None
elif wx + ww - self.shadow_padding <= ex <= wx + ww:
if wy <= ey <= wy + self.shadow_padding * 2:
return gtk.gdk.TOP_RIGHT_CORNER
elif wy + wh - (self.shadow_padding * 2) <= ey <= wy + wh:
return gtk.gdk.BOTTOM_RIGHT_CORNER
elif wy + self.shadow_padding < ey < wy + wh - self.shadow_padding:
return gtk.gdk.RIGHT_SIDE
else:
return None
elif wx + self.shadow_padding < ex < wx + ww - self.shadow_padding:
if wy <= ey <= wy + self.shadow_padding:
return gtk.gdk.TOP_SIDE
elif wy + wh - self.shadow_padding <= ey <= wy + wh:
return gtk.gdk.BOTTOM_SIDE
else:
return None
else:
return None
else:
return None
```
#### File: dist-packages/duplicity/backend.py
```python
import os
import sys
import socket
import time
import re
import getpass
import gettext
import urllib
from duplicity import dup_temp
from duplicity import dup_threading
from duplicity import file_naming
from duplicity import globals
from duplicity import log
from duplicity import urlparse_2_5 as urlparser
from duplicity import progress
from duplicity import util
from duplicity.util import exception_traceback
from duplicity.errors import BackendException, FatalBackendError
from duplicity.errors import TemporaryLoadException
from duplicity.errors import ConflictingScheme
from duplicity.errors import InvalidBackendURL
from duplicity.errors import UnsupportedBackendScheme
import duplicity.backends
# todo: this should really NOT be done here
socket.setdefaulttimeout(globals.timeout)
_forced_backend = None
_backends = {}
def import_backends():
"""
Import files in the duplicity/backends directory where
the filename ends in 'backend.py' and ignore the rest.
@rtype: void
@return: void
"""
path = duplicity.backends.__path__[0]
assert path.endswith("duplicity/backends"), duplicity.backends.__path__
files = os.listdir(path)
for fn in files:
if fn.endswith("backend.py"):
fn = fn[:-3]
imp = "duplicity.backends.%s" % (fn,)
# ignore gio as it is explicitly loaded in commandline.parse_cmdline_options()
if fn == "giobackend": continue
try:
__import__(imp)
res = "Succeeded"
level = log.INFO
except Exception:
res = "Failed: " + str(sys.exc_info()[1])
level = log.WARNING
log.Log(_("Import of %s %s") % (imp, res), level)
else:
continue
def force_backend(backend):
"""
Forces the use of a particular backend, regardless of schema
"""
global _forced_backend
_forced_backend = backend
def register_backend(scheme, backend_factory):
"""
Register a given backend factory responsible for URL:s with the
given scheme.
The backend must be a callable which, when called with a URL as
the single parameter, returns an object implementing the backend
protocol (i.e., a subclass of Backend).
Typically the callable will be the Backend subclass itself.
This function is not thread-safe and is intended to be called
during module importation or start-up.
"""
global _backends
assert callable(backend_factory), "backend factory must be callable"
if scheme in _backends:
raise ConflictingScheme("the scheme %s already has a backend "
"associated with it"
"" % (scheme,))
_backends[scheme] = backend_factory
def is_backend_url(url_string):
"""
@return Whether the given string looks like a backend URL.
"""
pu = ParsedUrl(url_string)
# Be verbose to actually return True/False rather than string.
if pu.scheme:
return True
else:
return False
def get_backend(url_string):
"""
Instantiate a backend suitable for the given URL, or return None
if the given string looks like a local path rather than a URL.
Raise InvalidBackendURL if the URL is not a valid URL.
"""
if not is_backend_url(url_string):
return None
pu = ParsedUrl(url_string)
# Implicit local path
assert pu.scheme, "should be a backend url according to is_backend_url"
global _backends, _forced_backend
if _forced_backend:
return _forced_backend(pu)
elif not pu.scheme in _backends:
raise UnsupportedBackendScheme(url_string)
else:
try:
return _backends[pu.scheme](pu)
except ImportError:
raise BackendException(_("Could not initialize backend: %s") % str(sys.exc_info()[1]))
_urlparser_initialized = False
_urlparser_initialized_lock = dup_threading.threading_module().Lock()
def _ensure_urlparser_initialized():
"""
Ensure that the appropriate clobbering of variables in the
urlparser module has been done. In the future, the need for this
clobbering to begin with should preferably be eliminated.
"""
def init():
global _urlparser_initialized
if not _urlparser_initialized:
# These URL schemes have a backend with a notion of an RFC "network location".
# The 'file' and 's3+http' schemes should not be in this list.
# 'http' and 'https' are not actually used for duplicity backend urls, but are needed
# in order to properly support urls returned from some webdav servers. adding them here
# is a hack. we should instead not stomp on the url parsing module to begin with.
#
# todo: eliminate the need for backend specific hacking here completely.
urlparser.uses_netloc = ['ftp',
'ftps',
'hsi',
'rsync',
's3',
'u1',
'scp', 'ssh', 'sftp',
'webdav', 'webdavs',
'gdocs',
'http', 'https',
'imap', 'imaps',
'mega']
# Do not transform or otherwise parse the URL path component.
urlparser.uses_query = []
urlparser.uses_fragm = []
_urlparser_initialized = True
dup_threading.with_lock(_urlparser_initialized_lock, init)
class ParsedUrl:
"""
Parse the given URL as a duplicity backend URL.
Returns the data of a parsed URL with the same names as that of
the standard urlparse.urlparse() except that all values have been
resolved rather than deferred. There are no get_* members. This
makes sure that the URL parsing errors are detected early.
Raise InvalidBackendURL on invalid URL's
"""
def __init__(self, url_string):
self.url_string = url_string
_ensure_urlparser_initialized()
# While useful in some cases, the fact is that the urlparser makes
# all the properties in the URL deferred or lazy. This means that
# problems don't get detected till called. We'll try to trap those
# problems here, so they will be caught early.
try:
pu = urlparser.urlparse(url_string)
except Exception:
raise InvalidBackendURL("Syntax error in: %s" % url_string)
try:
self.scheme = pu.scheme
except Exception:
raise InvalidBackendURL("Syntax error (scheme) in: %s" % url_string)
try:
self.netloc = pu.netloc
except Exception:
raise InvalidBackendURL("Syntax error (netloc) in: %s" % url_string)
try:
self.path = pu.path
except Exception:
raise InvalidBackendURL("Syntax error (path) in: %s" % url_string)
try:
self.username = pu.username
except Exception:
raise InvalidBackendURL("Syntax error (username) in: %s" % url_string)
if self.username:
self.username = urllib.unquote(pu.username)
else:
self.username = None
try:
self.password = pu.password
except Exception:
raise InvalidBackendURL("Syntax error (password) in: %s" % url_string)
if self.password:
self.password = urllib.unquote(self.password)
else:
self.password = None
try:
self.hostname = pu.hostname
except Exception:
raise InvalidBackendURL("Syntax error (hostname) in: %s" % url_string)
# init to None, overwrite with actual value on success
self.port = None
try:
self.port = pu.port
except Exception:
# old style rsync://host::[/]dest, are still valid, though they contain no port
if not ( self.scheme in ['rsync'] and re.search('::[^:]*$', self.url_string)):
raise InvalidBackendURL("Syntax error (port) in: %s A%s B%s C%s" % (url_string, (self.scheme in ['rsync']), re.search('::[^:]+$', self.netloc), self.netloc ) )
# This happens for implicit local paths.
if not pu.scheme:
return
# Our backends do not handle implicit hosts.
if pu.scheme in urlparser.uses_netloc and not pu.hostname:
raise InvalidBackendURL("Missing hostname in a backend URL which "
"requires an explicit hostname: %s"
"" % (url_string))
# Our backends do not handle implicit relative paths.
if pu.scheme not in urlparser.uses_netloc and not pu.path.startswith('//'):
raise InvalidBackendURL("missing // - relative paths not supported "
"for scheme %s: %s"
"" % (pu.scheme, url_string))
def geturl(self):
return self.url_string
def strip_auth_from_url(parsed_url):
"""Return a URL from a urlparse object without a username or password."""
# Get a copy of the network location without the username or password.
straight_netloc = parsed_url.netloc.split('@')[-1]
# Replace the full network location with the stripped copy.
return parsed_url.geturl().replace(parsed_url.netloc, straight_netloc, 1)
# Decorator for backend operation functions to simplify writing one that
# retries. Make sure to add a keyword argument 'raise_errors' to your function
# and if it is true, raise an exception on an error. If false, fatal-log it.
def retry(fn):
def iterate(*args):
for n in range(1, globals.num_retries):
try:
kwargs = {"raise_errors" : True}
return fn(*args, **kwargs)
except Exception, e:
log.Warn(_("Attempt %s failed: %s: %s")
% (n, e.__class__.__name__, util.uexc(e)))
log.Debug(_("Backtrace of previous error: %s")
% exception_traceback())
if isinstance(e, TemporaryLoadException):
time.sleep(30) # wait longer before trying again
else:
time.sleep(10) # wait a bit before trying again
# Now try one last time, but fatal-log instead of raising errors
kwargs = {"raise_errors" : False}
return fn(*args, **kwargs)
return iterate
# same as above, a bit dumber and always dies fatally if last trial fails
# hence no need for the raise_errors var ;), we really catch everything here
# as we don't know what the underlying code comes up with and we really *do*
# want to retry globals.num_retries times under all circumstances
def retry_fatal(fn):
def _retry_fatal(self, *args):
try:
n = 0
for n in range(1, globals.num_retries):
try:
self.retry_count = n
return fn(self, *args)
except FatalBackendError, e:
# die on fatal errors
raise e
except Exception, e:
# retry on anything else
log.Warn(_("Attempt %s failed. %s: %s")
% (n, e.__class__.__name__, util.uexc(e)))
log.Debug(_("Backtrace of previous error: %s")
% exception_traceback())
time.sleep(10) # wait a bit before trying again
# final trial, die on exception
self.retry_count = n+1
return fn(self, *args)
except Exception, e:
log.Debug(_("Backtrace of previous error: %s")
% exception_traceback())
log.FatalError(_("Giving up after %s attempts. %s: %s")
% (self.retry_count, e.__class__.__name__, util.uexc(e)),
log.ErrorCode.backend_error)
self.retry_count = 0
return _retry_fatal
class Backend:
"""
Represents a generic duplicity backend, capable of storing and
retrieving files.
Concrete sub-classes are expected to implement:
- put
- get
- list
- delete
- close (if needed)
Optional:
- move
"""
def __init__(self, parsed_url):
self.parsed_url = parsed_url
def put(self, source_path, remote_filename = None):
"""
Transfer source_path (Path object) to remote_filename (string)
If remote_filename is None, get the filename from the last
path component of pathname.
"""
raise NotImplementedError()
def move(self, source_path, remote_filename = None):
"""
Move source_path (Path object) to remote_filename (string)
Same as put(), but unlinks source_path in the process. This allows the
local backend to do this more efficiently using rename.
"""
self.put(source_path, remote_filename)
source_path.delete()
def get(self, remote_filename, local_path):
"""Retrieve remote_filename and place in local_path"""
raise NotImplementedError()
def list(self):
"""
Return list of filenames (byte strings) present in backend
"""
def tobytes(filename):
"Convert a (maybe unicode) filename to bytes"
if isinstance(filename, unicode):
# There shouldn't be any encoding errors for files we care
# about, since duplicity filenames are ascii. But user files
# may be in the same directory. So just replace characters.
return filename.encode(sys.getfilesystemencoding(), 'replace')
else:
return filename
if hasattr(self, '_list'):
# Make sure that duplicity internals only ever see byte strings
# for filenames, no matter what the backend thinks it is talking.
return map(tobytes, self._list())
else:
raise NotImplementedError()
def delete(self, filename_list):
"""
Delete each filename in filename_list, in order if possible.
"""
raise NotImplementedError()
# Should never cause FatalError.
# Returns a dictionary of dictionaries. The outer dictionary maps
# filenames to metadata dictionaries. Supported metadata are:
#
# 'size': if >= 0, size of file
# if -1, file is not found
# if None, error querying file
#
# Returned dictionary is guaranteed to contain a metadata dictionary for
# each filename, but not all metadata are guaranteed to be present.
def query_info(self, filename_list, raise_errors=True):
"""
Return metadata about each filename in filename_list
"""
info = {}
if hasattr(self, '_query_list_info'):
info = self._query_list_info(filename_list)
elif hasattr(self, '_query_file_info'):
for filename in filename_list:
info[filename] = self._query_file_info(filename)
# Fill out any missing entries (may happen if backend has no support
# or its query_list support is lazy)
for filename in filename_list:
if filename not in info:
info[filename] = {}
return info
""" use getpass by default, inherited backends may overwrite this behaviour """
use_getpass = True
def get_password(self):
"""
Return a password for authentication purposes. The password
will be obtained from the backend URL, the environment, by
asking the user, or by some other method. When applicable, the
result will be cached for future invocations.
"""
if self.parsed_url.password:
return self.parsed_url.password
try:
password = os.environ['FTP_PASSWORD']
except KeyError:
if self.use_getpass:
password = getpass.getpass("Password for '%s@%s': " %
(self.parsed_url.username,self.parsed_url.hostname) )
os.environ['FTP_PASSWORD'] = password
else:
password = None
return password
def munge_password(self, commandline):
"""
Remove password from commandline by substituting the password
found in the URL, if any, with a generic place-holder.
This is intended for display purposes only, and it is not
guaranteed that the results are correct (i.e., more than just
the ':password@' may be substituted.
"""
if self.parsed_url.password:
return re.sub( r'(:([^\s:/@]+)@([^\s@]+))', r':*****@\3', commandline )
else:
return commandline
"""
DEPRECATED:
run_command(_persist) - legacy wrappers for subprocess_popen(_persist)
"""
def run_command(self, commandline):
return self.subprocess_popen(commandline)
def run_command_persist(self, commandline):
return self.subprocess_popen_persist(commandline)
"""
DEPRECATED:
popen(_persist) - legacy wrappers for subprocess_popen(_persist)
"""
def popen(self, commandline):
result, stdout, stderr = self.subprocess_popen(commandline)
return stdout
def popen_persist(self, commandline):
result, stdout, stderr = self.subprocess_popen_persist(commandline)
return stdout
def _subprocess_popen(self, commandline):
"""
For internal use.
Execute the given command line, interpreted as a shell command.
Returns int Exitcode, string StdOut, string StdErr
"""
from subprocess import Popen, PIPE
p = Popen(commandline, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
return p.returncode, stdout, stderr
def subprocess_popen(self, commandline):
"""
Execute the given command line with error check.
Returns int Exitcode, string StdOut, string StdErr
Raise a BackendException on failure.
"""
private = self.munge_password(commandline)
log.Info(_("Reading results of '%s'") % private)
result, stdout, stderr = self._subprocess_popen(commandline)
if result != 0:
raise BackendException("Error running '%s'" % private)
return result, stdout, stderr
""" a dictionary for persist breaking exceptions, syntax is
{ 'command' : [ code1, code2 ], ... } see ftpbackend for an example """
popen_persist_breaks = {}
def subprocess_popen_persist(self, commandline):
"""
Execute the given command line with error check.
Retries globals.num_retries times with 30s delay.
Returns int Exitcode, string StdOut, string StdErr
Raise a BackendException on failure.
"""
private = self.munge_password(commandline)
for n in range(1, globals.num_retries+1):
# sleep before retry
if n > 1:
time.sleep(30)
log.Info(_("Reading results of '%s'") % private)
result, stdout, stderr = self._subprocess_popen(commandline)
if result == 0:
return result, stdout, stderr
try:
m = re.search("^\s*([\S]+)", commandline)
cmd = m.group(1)
ignores = self.popen_persist_breaks[ cmd ]
ignores.index(result)
""" ignore a predefined set of error codes """
return 0, '', ''
except (KeyError, ValueError):
pass
log.Warn(ngettext("Running '%s' failed with code %d (attempt #%d)",
"Running '%s' failed with code %d (attempt #%d)", n) %
(private, result, n))
if stdout or stderr:
log.Warn(_("Error is:\n%s") % stderr + (stderr and stdout and "\n") + stdout)
log.Warn(ngettext("Giving up trying to execute '%s' after %d attempt",
"Giving up trying to execute '%s' after %d attempts",
globals.num_retries) % (private, globals.num_retries))
raise BackendException("Error running '%s'" % private)
def get_fileobj_read(self, filename, parseresults = None):
"""
Return fileobject opened for reading of filename on backend
The file will be downloaded first into a temp file. When the
returned fileobj is closed, the temp file will be deleted.
"""
if not parseresults:
parseresults = file_naming.parse(filename)
assert parseresults, "Filename not correctly parsed"
tdp = dup_temp.new_tempduppath(parseresults)
self.get(filename, tdp)
tdp.setdata()
return tdp.filtered_open_with_delete("rb")
def get_fileobj_write(self, filename,
parseresults = None,
sizelist = None):
"""
Return fileobj opened for writing, which will cause the file
to be written to the backend on close().
The file will be encoded as specified in parseresults (or as
read from the filename), and stored in a temp file until it
can be copied over and deleted.
If sizelist is not None, it should be set to an empty list.
The number of bytes will be inserted into the list.
"""
if not parseresults:
parseresults = file_naming.parse(filename)
assert parseresults, u"Filename %s not correctly parsed" % util.ufn(filename)
tdp = dup_temp.new_tempduppath(parseresults)
def close_file_hook():
"""This is called when returned fileobj is closed"""
self.put(tdp, filename)
if sizelist is not None:
tdp.setdata()
sizelist.append(tdp.getsize())
tdp.delete()
fh = dup_temp.FileobjHooked(tdp.filtered_open("wb"))
fh.addhook(close_file_hook)
return fh
def get_data(self, filename, parseresults = None):
"""
Retrieve a file from backend, process it, return contents.
"""
fin = self.get_fileobj_read(filename, parseresults)
buf = fin.read()
assert not fin.close()
return buf
def put_data(self, buffer, filename, parseresults = None):
"""
Put buffer into filename on backend after processing.
"""
fout = self.get_fileobj_write(filename, parseresults)
fout.write(buffer)
assert not fout.close()
def close(self):
"""
Close the backend, releasing any resources held and
invalidating any file objects obtained from the backend.
"""
pass
```
#### File: duplicity/backends/_boto_single.py
```python
import time
import duplicity.backend
from duplicity import globals
from duplicity import log
from duplicity.errors import * #@UnusedWildImport
from duplicity.util import exception_traceback
from duplicity.backend import retry
from duplicity import progress
BOTO_MIN_VERSION = "2.0"
class BotoBackend(duplicity.backend.Backend):
"""
Backend for Amazon's Simple Storage System, (aka Amazon S3), though
the use of the boto module, (http://code.google.com/p/boto/).
To make use of this backend you must set aws_access_key_id
and aws_secret_access_key in your ~/.boto or /etc/boto.cfg
with your Amazon Web Services key id and secret respectively.
Alternatively you can export the environment variables
AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.
"""
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
import boto
assert boto.Version >= BOTO_MIN_VERSION
# This folds the null prefix and all null parts, which means that:
# //MyBucket/ and //MyBucket are equivalent.
# //MyBucket//My///My/Prefix/ and //MyBucket/My/Prefix are equivalent.
self.url_parts = filter(lambda x: x != '', parsed_url.path.split('/'))
if self.url_parts:
self.bucket_name = self.url_parts.pop(0)
else:
# Duplicity hangs if boto gets a null bucket name.
# HC: Caught a socket error, trying to recover
raise BackendException('Boto requires a bucket name.')
self.scheme = parsed_url.scheme
if self.url_parts:
self.key_prefix = '%s/' % '/'.join(self.url_parts)
else:
self.key_prefix = ''
self.straight_url = duplicity.backend.strip_auth_from_url(parsed_url)
self.parsed_url = parsed_url
# duplicity and boto.storage_uri() have different URI formats.
# boto uses scheme://bucket[/name] and specifies hostname on connect()
self.boto_uri_str = '://'.join((parsed_url.scheme[:2],
parsed_url.path.lstrip('/')))
self.storage_uri = boto.storage_uri(self.boto_uri_str)
self.resetConnection()
def resetConnection(self):
self.bucket = None
self.conn = None
try:
from boto.s3.connection import S3Connection
from boto.s3.key import Key
assert hasattr(S3Connection, 'lookup')
# Newer versions of boto default to using
# virtual hosting for buckets as a result of
# upstream deprecation of the old-style access
# method by Amazon S3. This change is not
# backwards compatible (in particular with
# respect to upper case characters in bucket
# names); so we default to forcing use of the
# old-style method unless the user has
# explicitly asked us to use new-style bucket
# access.
#
# Note that if the user wants to use new-style
# buckets, we use the subdomain calling form
# rather than given the option of both
# subdomain and vhost. The reason being that
# anything addressable as a vhost, is also
# addressable as a subdomain. Seeing as the
# latter is mostly a convenience method of
# allowing browse:able content semi-invisibly
# being hosted on S3, the former format makes
# a lot more sense for us to use - being
# explicit about what is happening (the fact
# that we are talking to S3 servers).
try:
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.connection import SubdomainCallingFormat
cfs_supported = True
calling_format = OrdinaryCallingFormat()
except ImportError:
cfs_supported = False
calling_format = None
if globals.s3_use_new_style:
if cfs_supported:
calling_format = SubdomainCallingFormat()
else:
log.FatalError("Use of new-style (subdomain) S3 bucket addressing was"
"requested, but does not seem to be supported by the "
"boto library. Either you need to upgrade your boto "
"library or duplicity has failed to correctly detect "
"the appropriate support.",
log.ErrorCode.boto_old_style)
else:
if cfs_supported:
calling_format = OrdinaryCallingFormat()
else:
calling_format = None
except ImportError:
log.FatalError("This backend (s3) requires boto library, version %s or later, "
"(http://code.google.com/p/boto/)." % BOTO_MIN_VERSION,
log.ErrorCode.boto_lib_too_old)
if not self.parsed_url.hostname:
# Use the default host.
self.conn = self.storage_uri.connect(
is_secure=(not globals.s3_unencrypted_connection))
else:
assert self.scheme == 's3'
self.conn = self.storage_uri.connect(
host=self.parsed_url.hostname,
is_secure=(not globals.s3_unencrypted_connection))
if hasattr(self.conn, 'calling_format'):
if calling_format is None:
log.FatalError("It seems we previously failed to detect support for calling "
"formats in the boto library, yet the support is there. This is "
"almost certainly a duplicity bug.",
log.ErrorCode.boto_calling_format)
else:
self.conn.calling_format = calling_format
else:
# Duplicity hangs if boto gets a null bucket name.
# HC: Caught a socket error, trying to recover
raise BackendException('Boto requires a bucket name.')
self.bucket = self.conn.lookup(self.bucket_name)
def put(self, source_path, remote_filename=None):
from boto.s3.connection import Location
if globals.s3_european_buckets:
if not globals.s3_use_new_style:
log.FatalError("European bucket creation was requested, but not new-style "
"bucket addressing (--s3-use-new-style)",
log.ErrorCode.s3_bucket_not_style)
#Network glitch may prevent first few attempts of creating/looking up a bucket
for n in range(1, globals.num_retries+1):
if self.bucket:
break
if n > 1:
time.sleep(30)
try:
try:
self.bucket = self.conn.get_bucket(self.bucket_name, validate=True)
except Exception, e:
if "NoSuchBucket" in str(e):
if globals.s3_european_buckets:
self.bucket = self.conn.create_bucket(self.bucket_name,
location=Location.EU)
else:
self.bucket = self.conn.create_bucket(self.bucket_name)
else:
raise e
except Exception, e:
log.Warn("Failed to create bucket (attempt #%d) '%s' failed (reason: %s: %s)"
"" % (n, self.bucket_name,
e.__class__.__name__,
str(e)))
self.resetConnection()
if not remote_filename:
remote_filename = source_path.get_filename()
key = self.bucket.new_key(self.key_prefix + remote_filename)
for n in range(1, globals.num_retries+1):
if n > 1:
# sleep before retry (new connection to a **hopeful** new host, so no need to wait so long)
time.sleep(10)
if globals.s3_use_rrs:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = 'STANDARD'
log.Info("Uploading %s/%s to %s Storage" % (self.straight_url, remote_filename, storage_class))
try:
key.set_contents_from_filename(source_path.name, {'Content-Type': 'application/octet-stream',
'x-amz-storage-class': storage_class},
cb=progress.report_transfer,
num_cb=(max(2, 8 * globals.volsize / (1024 * 1024)))
) # Max num of callbacks = 8 times x megabyte
key.close()
self.resetConnection()
return
except Exception, e:
log.Warn("Upload '%s/%s' failed (attempt #%d, reason: %s: %s)"
"" % (self.straight_url,
remote_filename,
n,
e.__class__.__name__,
str(e)))
log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
self.resetConnection()
log.Warn("Giving up trying to upload %s/%s after %d attempts" %
(self.straight_url, remote_filename, globals.num_retries))
raise BackendException("Error uploading %s/%s" % (self.straight_url, remote_filename))
def get(self, remote_filename, local_path):
for n in range(1, globals.num_retries+1):
if n > 1:
# sleep before retry (new connection to a **hopeful** new host, so no need to wait so long)
time.sleep(10)
log.Info("Downloading %s/%s" % (self.straight_url, remote_filename))
try:
key_name = self.key_prefix + remote_filename
key = self.bucket.get_key(key_name)
if key is None:
raise BackendException("%s: key not found" % key_name)
key.get_contents_to_filename(local_path.name)
local_path.setdata()
self.resetConnection()
return
except Exception, e:
log.Warn("Download %s/%s failed (attempt #%d, reason: %s: %s)"
"" % (self.straight_url,
remote_filename,
n,
e.__class__.__name__,
str(e)), 1)
log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
self.resetConnection()
log.Warn("Giving up trying to download %s/%s after %d attempts" %
(self.straight_url, remote_filename, globals.num_retries))
raise BackendException("Error downloading %s/%s" % (self.straight_url, remote_filename))
def _list(self):
if not self.bucket:
raise BackendException("No connection to backend")
for n in range(1, globals.num_retries+1):
if n > 1:
# sleep before retry
time.sleep(30)
log.Info("Listing %s" % self.straight_url)
try:
return self._list_filenames_in_bucket()
except Exception, e:
log.Warn("List %s failed (attempt #%d, reason: %s: %s)"
"" % (self.straight_url,
n,
e.__class__.__name__,
str(e)), 1)
log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
log.Warn("Giving up trying to list %s after %d attempts" %
(self.straight_url, globals.num_retries))
raise BackendException("Error listng %s" % self.straight_url)
def _list_filenames_in_bucket(self):
# We add a 'd' to the prefix to make sure it is not null (for boto) and
# to optimize the listing of our filenames, which always begin with 'd'.
# This will cause a failure in the regression tests as below:
# FAIL: Test basic backend operations
# <tracback snipped>
# AssertionError: Got list: []
# Wanted: ['testfile']
# Because of the need for this optimization, it should be left as is.
#for k in self.bucket.list(prefix = self.key_prefix + 'd', delimiter = '/'):
filename_list = []
for k in self.bucket.list(prefix = self.key_prefix, delimiter = '/'):
try:
filename = k.key.replace(self.key_prefix, '', 1)
filename_list.append(filename)
log.Debug("Listed %s/%s" % (self.straight_url, filename))
except AttributeError:
pass
return filename_list
def delete(self, filename_list):
for filename in filename_list:
self.bucket.delete_key(self.key_prefix + filename)
log.Debug("Deleted %s/%s" % (self.straight_url, filename))
@retry
def _query_file_info(self, filename, raise_errors=False):
try:
key = self.bucket.lookup(self.key_prefix + filename)
if key is None:
return {'size': -1}
return {'size': key.size}
except Exception, e:
log.Warn("Query %s/%s failed: %s"
"" % (self.straight_url,
filename,
str(e)))
self.resetConnection()
if raise_errors:
raise e
else:
return {'size': None}
duplicity.backend.register_backend("gs", BotoBackend)
duplicity.backend.register_backend("s3", BotoBackend)
duplicity.backend.register_backend("s3+http", BotoBackend)
```
#### File: duplicity/backends/ftpsbackend.py
```python
import os
import os.path
import urllib
import re
import duplicity.backend
from duplicity import globals
from duplicity import log
from duplicity.errors import *
from duplicity import tempdir
class FTPSBackend(duplicity.backend.Backend):
"""Connect to remote store using File Transfer Protocol"""
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
# we expect an output
try:
p = os.popen("lftp --version")
fout = p.read()
ret = p.close()
except Exception:
pass
# there is no output if lftp not found
if not fout:
log.FatalError("LFTP not found: Please install LFTP.",
log.ErrorCode.ftps_lftp_missing)
# version is the second word of the second part of the first line
version = fout.split('\n')[0].split(' | ')[1].split()[1]
log.Notice("LFTP version is %s" % version)
self.parsed_url = parsed_url
self.url_string = duplicity.backend.strip_auth_from_url(self.parsed_url)
# Use an explicit directory name.
if self.url_string[-1] != '/':
self.url_string += '/'
self.password = self.get_password()
if globals.ftp_connection == 'regular':
self.conn_opt = 'off'
else:
self.conn_opt = 'on'
if parsed_url.port != None and parsed_url.port != 21:
self.portflag = " -p '%s'" % (parsed_url.port)
else:
self.portflag = ""
self.tempfile, self.tempname = tempdir.default().mkstemp()
os.write(self.tempfile, "set ftp:ssl-allow true\n")
os.write(self.tempfile, "set ftp:ssl-protect-data true\n")
os.write(self.tempfile, "set ftp:ssl-protect-list true\n")
os.write(self.tempfile, "set net:timeout %s\n" % globals.timeout)
os.write(self.tempfile, "set net:max-retries %s\n" % globals.num_retries)
os.write(self.tempfile, "set ftp:passive-mode %s\n" % self.conn_opt)
os.write(self.tempfile, "open %s %s\n" % (self.portflag, self.parsed_url.hostname))
# allow .netrc auth by only setting user/pass when user was actually given
if self.parsed_url.username:
os.write(self.tempfile, "user %s %s\n" % (self.parsed_url.username, self.password))
os.close(self.tempfile)
self.flags = "-f %s" % self.tempname
def put(self, source_path, remote_filename = None):
"""Transfer source_path to remote_filename"""
if not remote_filename:
remote_filename = source_path.get_filename()
remote_path = os.path.join(urllib.unquote(self.parsed_url.path.lstrip('/')), remote_filename).rstrip()
commandline = "lftp -c 'source %s;put \'%s\' -o \'%s\''" % \
(self.tempname, source_path.name, remote_path)
l = self.run_command_persist(commandline)
def get(self, remote_filename, local_path):
"""Get remote filename, saving it to local_path"""
remote_path = os.path.join(urllib.unquote(self.parsed_url.path), remote_filename).rstrip()
commandline = "lftp -c 'source %s;get %s -o %s'" % \
(self.tempname, remote_path.lstrip('/'), local_path.name)
self.run_command_persist(commandline)
local_path.setdata()
def _list(self):
"""List files in directory"""
# Do a long listing to avoid connection reset
remote_dir = urllib.unquote(self.parsed_url.path.lstrip('/')).rstrip()
commandline = "lftp -c 'source %s;ls \'%s\''" % (self.tempname, remote_dir)
l = self.popen_persist(commandline).split('\n')
l = filter(lambda x: x, l)
# Look for our files as the last element of a long list line
return [x.split()[-1] for x in l]
def delete(self, filename_list):
"""Delete files in filename_list"""
filelist = ""
for filename in filename_list:
filelist += "\'%s\' " % filename
if filelist.rstrip():
remote_dir = urllib.unquote(self.parsed_url.path.lstrip('/')).rstrip()
commandline = "lftp -c 'source %s;cd \'%s\';rm %s'" % (self.tempname, remote_dir, filelist.rstrip())
self.popen_persist(commandline)
duplicity.backend.register_backend("ftps", FTPSBackend)
```
#### File: duplicity/backends/imapbackend.py
```python
import imaplib
import re
import os
import time
import socket
import StringIO
import rfc822
import getpass
import email
import duplicity.backend
from duplicity import globals
from duplicity import log
from duplicity.errors import * #@UnusedWildImport
class ImapBackend(duplicity.backend.Backend):
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
log.Debug("I'm %s (scheme %s) connecting to %s as %s" %
(self.__class__.__name__, parsed_url.scheme, parsed_url.hostname, parsed_url.username))
# Store url for reconnection on error
self._url = parsed_url
# Set the username
if ( parsed_url.username is None ):
username = raw_input('Enter account userid: ')
else:
username = parsed_url.username
# Set the password
if ( not parsed_url.password ):
if os.environ.has_key('IMAP_PASSWORD'):
password = os.environ.get('IMAP_PASSWORD')
else:
password = <PASSWORD>("Enter account password: ")
else:
password = parsed_url.password
self._username = username
self._password = password
self._resetConnection()
def _resetConnection(self):
parsed_url = self._url
try:
imap_server = os.environ['IMAP_SERVER']
except KeyError:
imap_server = parsed_url.hostname
# Try to close the connection cleanly
try:
self._conn.close()
except Exception:
pass
if (parsed_url.scheme == "imap"):
cl = imaplib.IMAP4
self._conn = cl(imap_server, 143)
elif (parsed_url.scheme == "imaps"):
cl = imaplib.IMAP4_SSL
self._conn = cl(imap_server, 993)
log.Debug("Type of imap class: %s" % (cl.__name__))
self.remote_dir = re.sub(r'^/', r'', parsed_url.path, 1)
# Login
if (not(globals.imap_full_address)):
self._conn.login(self._username, self._password)
self._conn.select(globals.imap_mailbox)
log.Info("IMAP connected")
else:
self._conn.login(self._username + "@" + parsed_url.hostname, self._password)
self._conn.select(globals.imap_mailbox)
log.Info("IMAP connected")
def _prepareBody(self,f,rname):
mp = email.MIMEMultipart.MIMEMultipart()
# I am going to use the remote_dir as the From address so that
# multiple archives can be stored in an IMAP account and can be
# accessed separately
mp["From"]=self.remote_dir
mp["Subject"]=rname
a = email.MIMEBase.MIMEBase("application","binary")
a.set_payload(f.read())
email.Encoders.encode_base64(a)
mp.attach(a)
return mp.as_string()
def put(self, source_path, remote_filename = None):
if not remote_filename:
remote_filename = source_path.get_filename()
f=source_path.open("rb")
allowedTimeout = globals.timeout
if (allowedTimeout == 0):
# Allow a total timeout of 1 day
allowedTimeout = 2880
while allowedTimeout > 0:
try:
self._conn.select(remote_filename)
body=self._prepareBody(f,remote_filename)
# If we don't select the IMAP folder before
# append, the message goes into the INBOX.
self._conn.select(globals.imap_mailbox)
self._conn.append(globals.imap_mailbox, None, None, body)
break
except (imaplib.IMAP4.abort, socket.error, socket.sslerror):
allowedTimeout -= 1
log.Info("Error saving '%s', retrying in 30s " % remote_filename)
time.sleep(30)
while allowedTimeout > 0:
try:
self._resetConnection()
break
except (imaplib.IMAP4.abort, socket.error, socket.sslerror):
allowedTimeout -= 1
log.Info("Error reconnecting, retrying in 30s ")
time.sleep(30)
log.Info("IMAP mail with '%s' subject stored" % remote_filename)
def get(self, remote_filename, local_path):
allowedTimeout = globals.timeout
if (allowedTimeout == 0):
# Allow a total timeout of 1 day
allowedTimeout = 2880
while allowedTimeout > 0:
try:
self._conn.select(globals.imap_mailbox)
(result,list) = self._conn.search(None, 'Subject', remote_filename)
if result != "OK":
raise Exception(list[0])
#check if there is any result
if list[0] == '':
raise Exception("no mail with subject %s")
(result,list) = self._conn.fetch(list[0],"(RFC822)")
if result != "OK":
raise Exception(list[0])
rawbody=list[0][1]
p = email.Parser.Parser()
m = p.parsestr(rawbody)
mp = m.get_payload(0)
body = mp.get_payload(decode=True)
break
except (imaplib.IMAP4.abort, socket.error, socket.sslerror):
allowedTimeout -= 1
log.Info("Error loading '%s', retrying in 30s " % remote_filename)
time.sleep(30)
while allowedTimeout > 0:
try:
self._resetConnection()
break
except (imaplib.IMAP4.abort, socket.error, socket.sslerror):
allowedTimeout -= 1
log.Info("Error reconnecting, retrying in 30s ")
time.sleep(30)
tfile = local_path.open("wb")
tfile.write(body)
local_path.setdata()
log.Info("IMAP mail with '%s' subject fetched" % remote_filename)
def _list(self):
ret = []
(result,list) = self._conn.select(globals.imap_mailbox)
if result != "OK":
raise BackendException(list[0])
# Going to find all the archives which have remote_dir in the From
# address
# Search returns an error if you haven't selected an IMAP folder.
(result,list) = self._conn.search(None, 'FROM', self.remote_dir)
if result!="OK":
raise Exception(list[0])
if list[0]=='':
return ret
nums=list[0].split(" ")
set="%s:%s"%(nums[0],nums[-1])
(result,list) = self._conn.fetch(set,"(BODY[HEADER])")
if result!="OK":
raise Exception(list[0])
for msg in list:
if (len(msg)==1):continue
io = StringIO.StringIO(msg[1])
m = rfc822.Message(io)
subj = m.getheader("subject")
header_from = m.getheader("from")
# Catch messages with empty headers which cause an exception.
if (not (header_from == None)):
if (re.compile("^" + self.remote_dir + "$").match(header_from)):
ret.append(subj)
log.Info("IMAP LIST: %s %s" % (subj,header_from))
return ret
def _imapf(self,fun,*args):
(ret,list)=fun(*args)
if ret != "OK":
raise Exception(list[0])
return list
def _delete_single_mail(self,i):
self._imapf(self._conn.store,i,"+FLAGS",'\\DELETED')
def _expunge(self):
list=self._imapf(self._conn.expunge)
def delete(self, filename_list):
assert len(filename_list) > 0
for filename in filename_list:
list = self._imapf(self._conn.search,None,"(SUBJECT %s)"%filename)
list = list[0].split()
if len(list)==0 or list[0]=="":raise Exception("no such mail with subject '%s'"%filename)
self._delete_single_mail(list[0])
log.Notice("marked %s to be deleted" % filename)
self._expunge()
log.Notice("IMAP expunged %s files" % len(list))
def close(self):
self._conn.select(globals.imap_mailbox)
self._conn.close()
self._conn.logout()
duplicity.backend.register_backend("imap", ImapBackend);
duplicity.backend.register_backend("imaps", ImapBackend);
```
#### File: dist-packages/duplicity/dup_time.py
```python
import time, types, re, calendar
from duplicity import globals
class TimeException(Exception):
pass
_interval_conv_dict = {"s": 1, "m": 60, "h": 3600, "D": 86400,
"W": 7*86400, "M": 30*86400, "Y": 365*86400}
_integer_regexp = re.compile("^[0-9]+$")
_interval_regexp = re.compile("^([0-9]+)([smhDWMY])")
_genstr_date_regexp1 = re.compile("^(?P<year>[0-9]{4})[-/]"
"(?P<month>[0-9]{1,2})[-/]"
"(?P<day>[0-9]{1,2})$")
_genstr_date_regexp2 = re.compile("^(?P<month>[0-9]{1,2})[-/]"
"(?P<day>[0-9]{1,2})[-/]"
"(?P<year>[0-9]{4})$")
_genstr_date_regexp3 = re.compile("^(?P<year>[0-9]{4})"
"(?P<month>[0-9]{2})"
"(?P<day>[0-9]{2})Z$")
curtime = curtimestr = None
prevtime = prevtimestr = None
bad_interval_string = _("""Bad interval string "%s"
Intervals are specified like 2Y (2 years) or 2h30m (2.5 hours). The
allowed special characters are s, m, h, D, W, M, and Y. See the man
page for more information.""")
bad_time_string = _("""Bad time string "%s"
The acceptible time strings are intervals (like "3D64s"), w3-datetime
strings, like "2002-04-26T04:22:01-07:00" (strings like
"2002-04-26T04:22:01" are also acceptable - duplicity will use the
current time zone), or ordinary dates like 2/4/1997 or 2001-04-23
(various combinations are acceptable, but the month always precedes
the day).""")
def setcurtime(time_in_secs = None):
"""Sets the current time in curtime and curtimestr"""
global curtime, curtimestr
t = time_in_secs or long(time.time())
assert type(t) in (types.LongType, types.IntType)
curtime, curtimestr = t, timetostring(t)
def setprevtime(time_in_secs):
"""Sets the previous time in prevtime and prevtimestr"""
global prevtime, prevtimestr
assert type(time_in_secs) in (types.LongType, types.IntType), prevtime
prevtime, prevtimestr = time_in_secs, timetostring(time_in_secs)
def timetostring(timeinseconds):
"""Return w3 or duplicity datetime compliant listing of timeinseconds"""
if globals.old_filenames:
# We need to know if DST applies to append the correct offset. So
# 1. Save the tuple returned by localtime.
# 2. Pass the DST flag into gettzd
lcltime = time.localtime(timeinseconds)
return time.strftime("%Y-%m-%dT%H" + globals.time_separator +
"%M" + globals.time_separator + "%S",
lcltime) + gettzd(lcltime[-1])
else:
# DST never applies to UTC
lcltime = time.gmtime(timeinseconds)
return time.strftime("%Y%m%dT%H%M%SZ", lcltime)
def stringtotime(timestring):
"""Return time in seconds from w3 or duplicity timestring
If there is an error parsing the string, or it doesn't look
like a valid datetime string, return None.
"""
try:
date, daytime = timestring[:19].split("T")
if len(timestring) == 16:
# new format for filename time
year, month, day = map(int,
[date[0:4], date[4:6], date[6:8]])
hour, minute, second = map(int,
[daytime[0:2], daytime[2:4], daytime[4:6]])
else:
# old format for filename time
year, month, day = map(int, date.split("-"))
hour, minute, second = map(int,
daytime.split(globals.time_separator))
assert 1900 < year < 2100, year
assert 1 <= month <= 12
assert 1 <= day <= 31
assert 0 <= hour <= 23
assert 0 <= minute <= 59
assert 0 <= second <= 61 # leap seconds
# We want to return the time in units of seconds since the
# epoch. Unfortunately the only functin that does this
# works in terms of the current timezone and we have a
# timezone offset in the string.
timetuple = (year, month, day, hour, minute, second, -1, -1, 0)
if len(timestring) == 16:
# as said in documentation, time.gmtime() and timegm() are each others' inverse.
# As far as UTC format is used in new file format,
# do not rely on system's python DST and tzdata settings
# and use functions that working with UTC
utc_in_secs = calendar.timegm(timetuple)
else:
# mktime assumed that the tuple was a local time. Compensate
# by subtracting the value for the current timezone.
# We don't need to worry about DST here because we turned it
# off in the tuple
local_in_secs = time.mktime(timetuple)
utc_in_secs = local_in_secs - time.timezone
# Now apply the offset that we were given in the time string
# This gives the correct number of seconds from the epoch
# even when we're not in the same timezone that wrote the
# string
if len(timestring) == 16:
return long(utc_in_secs)
else:
return long(utc_in_secs + tzdtoseconds(timestring[19:]))
except (TypeError, ValueError, AssertionError):
return None
def timetopretty(timeinseconds):
"""Return pretty version of time"""
return time.asctime(time.localtime(timeinseconds))
def stringtopretty(timestring):
"""Return pretty version of time given w3 time string"""
return timetopretty(stringtotime(timestring))
def inttopretty(seconds):
"""Convert num of seconds to readable string like "2 hours"."""
partlist = []
hours, seconds = divmod(seconds, 3600)
if hours > 1:
partlist.append("%d hours" % hours)
elif hours == 1:
partlist.append("1 hour")
minutes, seconds = divmod(seconds, 60)
if minutes > 1:
partlist.append("%d minutes" % minutes)
elif minutes == 1:
partlist.append("1 minute")
if seconds == 1:
partlist.append("1 second")
elif not partlist or seconds > 1:
if isinstance(seconds, int) or isinstance(seconds, long):
partlist.append("%s seconds" % seconds)
else:
partlist.append("%.2f seconds" % seconds)
return " ".join(partlist)
def intstringtoseconds(interval_string):
"""Convert a string expressing an interval (e.g. "4D2s") to seconds"""
def error():
raise TimeException(bad_interval_string % interval_string)
if len(interval_string) < 2:
error()
total = 0
while interval_string:
match = _interval_regexp.match(interval_string)
if not match:
error()
num, ext = int(match.group(1)), match.group(2)
if not ext in _interval_conv_dict or num < 0:
error()
total += num*_interval_conv_dict[ext]
interval_string = interval_string[match.end(0):]
return total
def gettzd(dstflag):
"""Return w3's timezone identification string.
Expresed as [+/-]hh:mm. For instance, PST is -08:00. Zone is
coincides with what localtime(), etc., use.
"""
# time.daylight doesn't help us. It's a flag that indicates that we
# have a dst option for the current timezone. Compensate by allowing
# the caller to pass a flag to indicate that DST applies. This flag
# is in the same format as the last member of the tuple returned by
# time.localtime()
if dstflag > 0:
offset = -1 * time.altzone/60
else:
offset = -1 * time.timezone/60
if offset > 0:
prefix = "+"
elif offset < 0:
prefix = "-"
else:
return "Z" # time is already in UTC
hours, minutes = map(abs, divmod(offset, 60))
assert 0 <= hours <= 23
assert 0 <= minutes <= 59
return "%s%02d%s%02d" % (prefix, hours, globals.time_separator, minutes)
def tzdtoseconds(tzd):
"""Given w3 compliant TZD, return how far ahead UTC is"""
if tzd == "Z":
return 0
assert len(tzd) == 6 # only accept forms like +08:00 for now
assert (tzd[0] == "-" or tzd[0] == "+") and \
tzd[3] == globals.time_separator
return -60 * (60 * int(tzd[:3]) + int(tzd[4:]))
def cmp(time1, time2):
"""Compare time1 and time2 and return -1, 0, or 1"""
if type(time1) is types.StringType:
time1 = stringtotime(time1)
assert time1 is not None
if type(time2) is types.StringType:
time2 = stringtotime(time2)
assert time2 is not None
if time1 < time2:
return -1
elif time1 == time2:
return 0
else:
return 1
def genstrtotime(timestr, override_curtime = None):
"""Convert a generic time string to a time in seconds"""
if override_curtime is None:
override_curtime = curtime
if timestr == "now":
return override_curtime
def error():
raise TimeException(bad_time_string % timestr)
# Test for straight integer
if _integer_regexp.search(timestr):
return int(timestr)
# Test for w3-datetime format, possibly missing tzd
# This is an ugly hack. We need to know if DST applies when doing
# gettzd. However, we don't have the flag to pass. Assume that DST
# doesn't apply and pass 0. Getting a reasonable default from
# localtime() is a bad idea, since we transition to/from DST between
# calls to this method on the same run
t = stringtotime(timestr) or stringtotime(timestr+gettzd(0))
if t:
return t
try: # test for an interval, like "2 days ago"
return override_curtime - intstringtoseconds(timestr)
except TimeException:
pass
# Now check for dates like 2001/3/23
match = _genstr_date_regexp1.search(timestr) or \
_genstr_date_regexp2.search(timestr) or \
_genstr_date_regexp3.search(timestr)
if not match:
error()
timestr = "%s-%02d-%02dT00:00:00%s" % (match.group('year'),
int(match.group('month')),
int(match.group('day')),
gettzd(0))
t = stringtotime(timestr)
if t:
return t
else:
error()
```
#### File: dist-packages/duplicity/path.py
```python
import stat, errno, socket, time, re, gzip
from duplicity import tarfile
from duplicity import file_naming
from duplicity import globals
from duplicity import gpg
from duplicity import util
from duplicity import librsync
from duplicity import log #@UnusedImport
from duplicity import dup_time
from duplicity import cached_ops
from duplicity.lazy import * #@UnusedWildImport
_copy_blocksize = 64 * 1024
_tmp_path_counter = 1
class StatResult:
"""Used to emulate the output of os.stat() and related"""
# st_mode is required by the TarInfo class, but it's unclear how
# to generate it from file permissions.
st_mode = 0
class PathException(Exception):
pass
class ROPath:
"""Read only Path
Objects of this class doesn't represent real files, so they don't
have a name. They are required to be indexed though.
"""
def __init__(self, index, stat = None):
"""ROPath initializer"""
self.opened, self.fileobj = None, None
self.index = index
self.stat, self.type = None, None
self.mode, self.devnums = None, None
def set_from_stat(self):
"""Set the value of self.type, self.mode from self.stat"""
if not self.stat:
self.type = None
st_mode = self.stat.st_mode
if stat.S_ISREG(st_mode):
self.type = "reg"
elif stat.S_ISDIR(st_mode):
self.type = "dir"
elif stat.S_ISLNK(st_mode):
self.type = "sym"
elif stat.S_ISFIFO(st_mode):
self.type = "fifo"
elif stat.S_ISSOCK(st_mode):
raise PathException(util.ufn(self.get_relative_path()) +
u"is a socket, unsupported by tar")
self.type = "sock"
elif stat.S_ISCHR(st_mode):
self.type = "chr"
elif stat.S_ISBLK(st_mode):
self.type = "blk"
else:
raise PathException("Unknown type")
self.mode = stat.S_IMODE(st_mode)
if self.type in ("chr", "blk"):
self.devnums = (os.major(self.stat.st_rdev),
os.minor(self.stat.st_rdev))
def blank(self):
"""Black out self - set type and stat to None"""
self.type, self.stat = None, None
def exists(self):
"""True if corresponding file exists"""
return self.type
def isreg(self):
"""True if self corresponds to regular file"""
return self.type == "reg"
def isdir(self):
"""True if self is dir"""
return self.type == "dir"
def issym(self):
"""True if self is sym"""
return self.type == "sym"
def isfifo(self):
"""True if self is fifo"""
return self.type == "fifo"
def issock(self):
"""True is self is socket"""
return self.type == "sock"
def isdev(self):
"""True is self is a device file"""
return self.type == "chr" or self.type == "blk"
def getdevloc(self):
"""Return device number path resides on"""
return self.stat.st_dev
def getsize(self):
"""Return length in bytes from stat object"""
return self.stat.st_size
def getmtime(self):
"""Return mod time of path in seconds"""
return int(self.stat.st_mtime)
def get_relative_path(self):
"""Return relative path, created from index"""
if self.index:
return "/".join(self.index)
else:
return "."
def getperms(self):
"""Return permissions mode, owner and group"""
s1 = self.stat
return '%s:%s %o' % (s1.st_uid, s1.st_gid, self.mode)
def open(self, mode):
"""Return fileobj associated with self"""
assert mode == "rb" and self.fileobj and not self.opened, \
"%s %s %s" % (mode, self.fileobj, self.opened)
self.opened = 1
return self.fileobj
def get_data(self):
"""Return contents of associated fileobj in string"""
fin = self.open("rb")
buf = fin.read()
assert not fin.close()
return buf
def setfileobj(self, fileobj):
"""Set file object returned by open()"""
assert not self.fileobj
self.fileobj = fileobj
self.opened = None
def init_from_tarinfo(self, tarinfo):
"""Set data from tarinfo object (part of tarfile module)"""
# Set the typepp
type = tarinfo.type
if type == tarfile.REGTYPE or type == tarfile.AREGTYPE:
self.type = "reg"
elif type == tarfile.LNKTYPE:
raise PathException("Hard links not supported yet")
elif type == tarfile.SYMTYPE:
self.type = "sym"
self.symtext = tarinfo.linkname
elif type == tarfile.CHRTYPE:
self.type = "chr"
self.devnums = (tarinfo.devmajor, tarinfo.devminor)
elif type == tarfile.BLKTYPE:
self.type = "blk"
self.devnums = (tarinfo.devmajor, tarinfo.devminor)
elif type == tarfile.DIRTYPE:
self.type = "dir"
elif type == tarfile.FIFOTYPE:
self.type = "fifo"
else:
raise PathException("Unknown tarinfo type %s" % (type,))
self.mode = tarinfo.mode
self.stat = StatResult()
""" Set user and group id
use numeric id if name lookup fails
OR
--numeric-owner is set
"""
try:
if globals.numeric_owner:
raise KeyError
self.stat.st_uid = cached_ops.getpwnam(tarinfo.uname)[2]
except KeyError:
self.stat.st_uid = tarinfo.uid
try:
if globals.numeric_owner:
raise KeyError
self.stat.st_gid = cached_ops.getgrnam(tarinfo.gname)[2]
except KeyError:
self.stat.st_gid = tarinfo.gid
self.stat.st_mtime = int(tarinfo.mtime)
if self.stat.st_mtime < 0:
log.Warn(_("Warning: %s has negative mtime, treating as 0.")
% (util.ufn(tarinfo.name)))
self.stat.st_mtime = 0
self.stat.st_size = tarinfo.size
def get_ropath(self):
"""Return ropath copy of self"""
new_ropath = ROPath(self.index, self.stat)
new_ropath.type, new_ropath.mode = self.type, self.mode
if self.issym():
new_ropath.symtext = self.symtext
elif self.isdev():
new_ropath.devnums = self.devnums
if self.exists():
new_ropath.stat = self.stat
return new_ropath
def get_tarinfo(self):
"""Generate a tarfile.TarInfo object based on self
Doesn't set size based on stat, because we may want to replace
data wiht other stream. Size should be set separately by
calling function.
"""
ti = tarfile.TarInfo()
if self.index:
ti.name = "/".join(self.index)
else:
ti.name = "."
if self.isdir():
ti.name += "/" # tar dir naming convention
ti.size = 0
if self.type:
# Lots of this is specific to tarfile.py, hope it doesn't
# change much...
if self.isreg():
ti.type = tarfile.REGTYPE
ti.size = self.stat.st_size
elif self.isdir():
ti.type = tarfile.DIRTYPE
elif self.isfifo():
ti.type = tarfile.FIFOTYPE
elif self.issym():
ti.type = tarfile.SYMTYPE
ti.linkname = self.symtext
elif self.isdev():
if self.type == "chr":
ti.type = tarfile.CHRTYPE
else:
ti.type = tarfile.BLKTYPE
ti.devmajor, ti.devminor = self.devnums
else:
raise PathException("Unrecognized type " + str(self.type))
ti.mode = self.mode
ti.uid, ti.gid = self.stat.st_uid, self.stat.st_gid
if self.stat.st_mtime < 0:
log.Warn(_("Warning: %s has negative mtime, treating as 0.")
% (util.ufn(self.get_relative_path())))
ti.mtime = 0
else:
ti.mtime = int(self.stat.st_mtime)
try:
ti.uname = cached_ops.getpwuid(ti.uid)[0]
except KeyError:
ti.uname = ''
try:
ti.gname = cached_ops.getgrgid(ti.gid)[0]
except KeyError:
ti.gname = ''
if ti.type in (tarfile.CHRTYPE, tarfile.BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
ti.devmajor, ti.devminor = self.devnums
else:
# Currently we depend on an uninitiliazed tarinfo file to
# already have appropriate headers. Still, might as well
# make sure mode and size set.
ti.mode, ti.size = 0, 0
return ti
def __eq__(self, other):
"""Used to compare two ROPaths. Doesn't look at fileobjs"""
if not self.type and not other.type:
return 1 # neither exists
if not self.stat and other.stat or not other.stat and self.stat:
return 0
if self.type != other.type:
return 0
if self.isreg() or self.isdir() or self.isfifo():
# Don't compare sizes, because we might be comparing
# signature size to size of file.
if not self.perms_equal(other):
return 0
if int(self.stat.st_mtime) == int(other.stat.st_mtime):
return 1
# Below, treat negative mtimes as equal to 0
return self.stat.st_mtime <= 0 and other.stat.st_mtime <= 0
elif self.issym():
# here only symtext matters
return self.symtext == other.symtext
elif self.isdev():
return self.perms_equal(other) and self.devnums == other.devnums
assert 0
def __ne__(self, other):
return not self.__eq__(other)
def compare_verbose(self, other, include_data = 0):
"""Compare ROPaths like __eq__, but log reason if different
This is placed in a separate function from __eq__ because
__eq__ should be very time sensitive, and logging statements
would slow it down. Used when verifying.
If include_data is true, also read all the data of regular
files and see if they differ.
"""
def log_diff(log_string):
log_str = _("Difference found:") + u" " + log_string
log.Notice(log_str % (util.ufn(self.get_relative_path())))
if not self.type and not other.type:
return 1
if not self.stat and other.stat:
log_diff(_("New file %s"))
return 0
if not other.stat and self.stat:
log_diff(_("File %s is missing"))
return 0
if self.type != other.type:
log_diff(_("File %%s has type %s, expected %s") %
(other.type, self.type))
return 0
if self.isreg() or self.isdir() or self.isfifo():
if not self.perms_equal(other):
log_diff(_("File %%s has permissions %s, expected %s") %
(other.getperms(), self.getperms()))
return 0
if ((int(self.stat.st_mtime) != int(other.stat.st_mtime)) and
(self.stat.st_mtime > 0 or other.stat.st_mtime > 0)):
log_diff(_("File %%s has mtime %s, expected %s") %
(dup_time.timetopretty(int(other.stat.st_mtime)),
dup_time.timetopretty(int(self.stat.st_mtime))))
return 0
if self.isreg() and include_data:
if self.compare_data(other):
return 1
else:
log_diff(_("Data for file %s is different"))
return 0
else:
return 1
elif self.issym():
if self.symtext == other.symtext:
return 1
else:
log_diff(_("Symlink %%s points to %s, expected %s") %
(other.symtext, self.symtext))
return 0
elif self.isdev():
if not self.perms_equal(other):
log_diff(_("File %%s has permissions %s, expected %s") %
(other.getperms(), self.getperms()))
return 0
if self.devnums != other.devnums:
log_diff(_("Device file %%s has numbers %s, expected %s")
% (other.devnums, self.devnums))
return 0
return 1
assert 0
def compare_data(self, other):
"""Compare data from two regular files, return true if same"""
f1 = self.open("rb")
f2 = other.open("rb")
def close():
assert not f1.close()
assert not f2.close()
while 1:
buf1 = f1.read(_copy_blocksize)
buf2 = f2.read(_copy_blocksize)
if buf1 != buf2:
close()
return 0
if not buf1:
close()
return 1
def perms_equal(self, other):
"""True if self and other have same permissions and ownership"""
s1, s2 = self.stat, other.stat
return (self.mode == other.mode and
s1.st_gid == s2.st_gid and s1.st_uid == s2.st_uid)
def copy(self, other):
"""Copy self to other. Also copies data. Other must be Path"""
if self.isreg():
other.writefileobj(self.open("rb"))
elif self.isdir():
os.mkdir(other.name)
elif self.issym():
os.symlink(self.symtext, other.name)
os.lchown(other.name, self.stat.st_uid, self.stat.st_gid)
other.setdata()
return # no need to copy symlink attributes
elif self.isfifo():
os.mkfifo(other.name)
elif self.issock():
socket.socket(socket.AF_UNIX).bind(other.name)
elif self.isdev():
if self.type == "chr":
devtype = "c"
else:
devtype = "b"
other.makedev(devtype, *self.devnums)
self.copy_attribs(other)
def copy_attribs(self, other):
"""Only copy attributes from self to other"""
if isinstance(other, Path):
util.maybe_ignore_errors(lambda: os.chown(other.name, self.stat.st_uid, self.stat.st_gid))
util.maybe_ignore_errors(lambda: os.chmod(other.name, self.mode))
util.maybe_ignore_errors(lambda: os.utime(other.name, (time.time(), self.stat.st_mtime)))
other.setdata()
else:
# write results to fake stat object
assert isinstance(other, ROPath)
stat = StatResult()
stat.st_uid, stat.st_gid = self.stat.st_uid, self.stat.st_gid
stat.st_mtime = int(self.stat.st_mtime)
other.stat = stat
other.mode = self.mode
def __unicode__(self):
"""Return string representation"""
return u"(%s %s)" % (util.uindex(self.index), self.type)
class Path(ROPath):
"""
Path class - wrapper around ordinary local files
Besides caching stat() results, this class organizes various file
code.
"""
regex_chars_to_quote = re.compile("[\\\\\\\"\\$`]")
def rename_index(self, index):
if not globals.rename or not index:
return index # early exit
path = os.path.normcase(os.path.join(*index))
tail = []
while path and path not in globals.rename:
path, extra = os.path.split(path)
tail.insert(0, extra)
if path:
return globals.rename[path].split(os.sep) + tail
else:
return index # no rename found
def __init__(self, base, index = ()):
"""Path initializer"""
# self.opened should be true if the file has been opened, and
# self.fileobj can override returned fileobj
self.opened, self.fileobj = None, None
self.base = base
self.index = self.rename_index(index)
self.name = os.path.join(base, *self.index)
self.setdata()
def setdata(self):
"""Refresh stat cache"""
try:
self.stat = os.lstat(self.name)
except OSError, e:
err_string = errno.errorcode[e[0]]
if err_string in ["ENOENT", "ENOTDIR", "ELOOP", "ENOTCONN"]:
self.stat, self.type = None, None # file doesn't exist
self.mode = None
else:
raise
else:
self.set_from_stat()
if self.issym():
self.symtext = os.readlink(self.name)
def append(self, ext):
"""Return new Path with ext added to index"""
return self.__class__(self.base, self.index + (ext,))
def new_index(self, index):
"""Return new Path with index index"""
return self.__class__(self.base, index)
def listdir(self):
"""Return list generated by os.listdir"""
return os.listdir(self.name)
def isemptydir(self):
"""Return true if path is a directory and is empty"""
return self.isdir() and not self.listdir()
def open(self, mode = "rb"):
"""
Return fileobj associated with self
Usually this is just the file data on disk, but can be
replaced with arbitrary data using the setfileobj method.
"""
assert not self.opened
if self.fileobj:
result = self.fileobj
else:
result = open(self.name, mode)
return result
def makedev(self, type, major, minor):
"""Make a device file with specified type, major/minor nums"""
cmdlist = ['mknod', self.name, type, str(major), str(minor)]
if os.spawnvp(os.P_WAIT, 'mknod', cmdlist) != 0:
raise PathException("Error running %s" % cmdlist)
self.setdata()
def mkdir(self):
"""Make directory(s) at specified path"""
log.Info(_("Making directory %s") % util.ufn(self.name))
try:
os.makedirs(self.name)
except OSError:
if (not globals.force):
raise PathException("Error creating directory %s" % util.ufn(self.name), 7)
self.setdata()
def delete(self):
"""Remove this file"""
log.Info(_("Deleting %s") % util.ufn(self.name))
if self.isdir():
util.ignore_missing(os.rmdir, self.name)
else:
util.ignore_missing(os.unlink, self.name)
self.setdata()
def touch(self):
"""Open the file, write 0 bytes, close"""
log.Info(_("Touching %s") % util.ufn(self.name))
fp = self.open("wb")
fp.close()
def deltree(self):
"""Remove self by recursively deleting files under it"""
from duplicity import selection # todo: avoid circ. dep. issue
log.Info(_("Deleting tree %s") % util.ufn(self.name))
itr = IterTreeReducer(PathDeleter, [])
for path in selection.Select(self).set_iter():
itr(path.index, path)
itr.Finish()
self.setdata()
def get_parent_dir(self):
"""Return directory that self is in"""
if self.index:
return Path(self.base, self.index[:-1])
else:
components = self.base.split("/")
if len(components) == 2 and not components[0]:
return Path("/") # already in root directory
else:
return Path("/".join(components[:-1]))
def writefileobj(self, fin):
"""Copy file object fin to self. Close both when done."""
fout = self.open("wb")
while 1:
buf = fin.read(_copy_blocksize)
if not buf:
break
fout.write(buf)
if fin.close() or fout.close():
raise PathException("Error closing file object")
self.setdata()
def rename(self, new_path):
"""Rename file at current path to new_path."""
os.rename(self.name, new_path.name)
self.setdata()
new_path.setdata()
def move(self, new_path):
"""Like rename but destination may be on different file system"""
self.copy(new_path)
self.delete()
def chmod(self, mode):
"""Change permissions of the path"""
os.chmod(self.name, mode)
self.setdata()
def patch_with_attribs(self, diff_ropath):
"""Patch self with diff and then copy attributes over"""
assert self.isreg() and diff_ropath.isreg()
temp_path = self.get_temp_in_same_dir()
patch_fileobj = librsync.PatchedFile(self.open("rb"),
diff_ropath.open("rb"))
temp_path.writefileobj(patch_fileobj)
diff_ropath.copy_attribs(temp_path)
temp_path.rename(self)
def get_temp_in_same_dir(self):
"""Return temp non existent path in same directory as self"""
global _tmp_path_counter
parent_dir = self.get_parent_dir()
while 1:
temp_path = parent_dir.append("duplicity_temp." +
str(_tmp_path_counter))
if not temp_path.type:
return temp_path
_tmp_path_counter += 1
assert _tmp_path_counter < 10000, \
u"Warning too many temp files created for " + util.ufn(self.name)
def compare_recursive(self, other, verbose = None):
"""Compare self to other Path, descending down directories"""
from duplicity import selection # todo: avoid circ. dep. issue
selfsel = selection.Select(self).set_iter()
othersel = selection.Select(other).set_iter()
return Iter.equal(selfsel, othersel, verbose)
def __repr__(self):
"""Return string representation"""
return "(%s %s %s)" % (self.index, self.name, self.type)
def quote(self, s = None):
"""
Return quoted version of s (defaults to self.name)
The output is meant to be interpreted with shells, so can be
used with os.system.
"""
if not s:
s = self.name
return '"%s"' % self.regex_chars_to_quote.sub(lambda m: "\\"+m.group(0), s)
def unquote(self, s):
"""Return unquoted version of string s, as quoted by above quote()"""
assert s[0] == s[-1] == "\"" # string must be quoted by above
result = ""; i = 1
while i < len(s)-1:
if s[i] == "\\":
result += s[i+1]
i += 2
else:
result += s[i]
i += 1
return result
def get_filename(self):
"""Return filename of last component"""
components = self.name.split("/")
assert components and components[-1]
return components[-1]
def get_canonical(self):
"""
Return string of canonical version of path
Remove ".", and trailing slashes where possible. Note that
it's harder to remove "..", as "foo/bar/.." is not necessarily
"foo", so we can't use path.normpath()
"""
newpath = "/".join(filter(lambda x: x and x != ".",
self.name.split("/")))
if self.name[0] == "/":
return "/" + newpath
elif newpath:
return newpath
else:
return "."
class DupPath(Path):
"""
Represent duplicity data files
Based on the file name, files that are compressed or encrypted
will have different open() methods.
"""
def __init__(self, base, index = (), parseresults = None):
"""
DupPath initializer
The actual filename (no directory) must be the single element
of the index, unless parseresults is given.
"""
if parseresults:
self.pr = parseresults
else:
assert len(index) == 1
self.pr = file_naming.parse(index[0])
assert self.pr, "must be a recognizable duplicity file"
Path.__init__(self, base, index)
def filtered_open(self, mode = "rb", gpg_profile = None):
"""
Return fileobj with appropriate encryption/compression
If encryption is specified but no gpg_profile, use
globals.default_profile.
"""
assert not self.opened and not self.fileobj
assert not (self.pr.encrypted and self.pr.compressed)
if gpg_profile:
assert self.pr.encrypted
if self.pr.compressed:
return gzip.GzipFile(self.name, mode)
elif self.pr.encrypted:
if not gpg_profile:
gpg_profile = globals.gpg_profile
if mode == "rb":
return gpg.GPGFile(False, self, gpg_profile)
elif mode == "wb":
return gpg.GPGFile(True, self, gpg_profile)
else:
return self.open(mode)
class PathDeleter(ITRBranch):
"""Delete a directory. Called by Path.deltree"""
def start_process(self, index, path):
self.path = path
def end_process(self):
self.path.delete()
def can_fast_process(self, index, path):
return not path.isdir()
def fast_process(self, index, path):
path.delete()
```
#### File: dist-packages/gi/importer.py
```python
from __future__ import absolute_import
import sys
from ._gi import Repository
from .module import DynamicModule
repository = Repository.get_default()
modules = {}
class DynamicImporter(object):
# Note: see PEP302 for the Importer Protocol implemented below.
def __init__(self, path):
self.path = path
def find_module(self, fullname, path=None):
if not fullname.startswith(self.path):
return
path, namespace = fullname.rsplit('.', 1)
if path != self.path:
return
if repository.enumerate_versions(namespace):
return self
else:
raise ImportError('cannot import name %s, '
'introspection typelib not found' % namespace)
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
path, namespace = fullname.rsplit('.', 1)
dynamic_module = DynamicModule(namespace)
modules[namespace] = dynamic_module
dynamic_module.__file__ = '<%s>' % fullname
dynamic_module.__loader__ = self
sys.modules[fullname] = dynamic_module
dynamic_module._load()
return dynamic_module
```
#### File: dist-packages/gobject/propertyhelper.py
```python
import sys
import gobject._gobject
_gobject = sys.modules['gobject._gobject']
from gobject.constants import \
TYPE_NONE, TYPE_INTERFACE, TYPE_CHAR, TYPE_UCHAR, \
TYPE_BOOLEAN, TYPE_INT, TYPE_UINT, TYPE_LONG, \
TYPE_ULONG, TYPE_INT64, TYPE_UINT64, TYPE_ENUM, \
TYPE_FLAGS, TYPE_FLOAT, TYPE_DOUBLE, TYPE_STRING, \
TYPE_POINTER, TYPE_BOXED, TYPE_PARAM, TYPE_OBJECT, \
TYPE_PYOBJECT
from gobject.constants import \
G_MINFLOAT, G_MAXFLOAT, G_MINDOUBLE, G_MAXDOUBLE, \
G_MININT, G_MAXINT, G_MAXUINT, G_MINLONG, G_MAXLONG, \
G_MAXULONG
if sys.version_info >= (3, 0):
_basestring = str
_long = int
else:
_basestring = basestring
_long = long
class property(object):
"""
Creates a new property which in conjunction with GObject subclass will
create a property proxy:
>>> class MyObject(gobject.GObject):
>>> ... prop = gobject.property(type=str)
>>> obj = MyObject()
>>> obj.prop = 'value'
>>> obj.prop
'value'
The API is similar to the builtin property:
class AnotherObject(gobject.GObject):
@gobject.property
def prop(self):
return ...
Which will create a read-only property called prop.
"""
class __metaclass__(type):
def __repr__(self):
return "<class 'gobject.property'>"
def __init__(self, getter=None, setter=None, type=None, default=None,
nick='', blurb='', flags=_gobject.PARAM_READWRITE,
minimum=None, maximum=None):
"""
@param getter: getter to get the value of the property
@type getter: callable
@param setter: setter to set the value of the property
@type setter: callable
@param type: type of property
@type type: type
@param default: default value
@param nick: short description
@type bick: string
@param blurb: long description
@type blurb: string
@param flags: parameter flags, one of:
- gobject.PARAM_READABLE
- gobject.PARAM_READWRITE
- gobject.PARAM_WRITABLE
- gobject.PARAM_CONSTRUCT
- gobject.PARAM_CONSTRUCT_ONLY
- gobject.PARAM_LAX_VALIDATION
@keyword minimum: minimum allowed value (int, float, long only)
@keyword maximum: maximum allowed value (int, float, long only)
"""
if getter and not setter:
setter = self._readonly_setter
elif setter and not getter:
getter = self._writeonly_getter
elif not setter and not getter:
getter = self._default_getter
setter = self._default_setter
self.getter = getter
self.setter = setter
if type is None:
type = object
self.type = self._type_from_python(type)
self.default = self._get_default(default)
self._check_default()
if not isinstance(nick, _basestring):
raise TypeError("nick must be a string")
self.nick = nick
if not isinstance(blurb, _basestring):
raise TypeError("blurb must be a string")
self.blurb = blurb
if flags < 0 or flags > 32:
raise TypeError("invalid flag value: %r" % (flags,))
self.flags = flags
if minimum is not None:
if minimum < self._get_minimum():
raise TypeError(
"Minimum for type %s cannot be lower than %d" % (
self.type, self._get_minimum()))
else:
minimum = self._get_minimum()
self.minimum = minimum
if maximum is not None:
if maximum > self._get_maximum():
raise TypeError(
"Maximum for type %s cannot be higher than %d" % (
self.type, self._get_maximum()))
else:
maximum = self._get_maximum()
self.maximum = maximum
self.name = None
self._exc = None
def __repr__(self):
return '<gobject property %s (%s)>' % (
self.name or '(uninitialized)',
_gobject.type_name(self.type))
def __get__(self, instance, klass):
if instance is None:
return self
self._exc = None
value = instance.get_property(self.name)
if self._exc:
exc = self._exc
self._exc = None
raise exc
return value
def __set__(self, instance, value):
if instance is None:
raise TypeError
self._exc = None
instance.set_property(self.name, value)
if self._exc:
exc = self._exc
self._exc = None
raise exc
def _type_from_python(self, type_):
if type_ == _long:
return TYPE_LONG
elif type_ == int:
return TYPE_INT
elif type_ == bool:
return TYPE_BOOLEAN
elif type_ == float:
return TYPE_DOUBLE
elif type_ == str:
return TYPE_STRING
elif type_ == object:
return TYPE_PYOBJECT
elif (isinstance(type_, type) and
issubclass(type_, (_gobject.GObject,
_gobject.GEnum))):
return type_.__gtype__
elif type_ in [TYPE_NONE, TYPE_INTERFACE, TYPE_CHAR, TYPE_UCHAR,
TYPE_INT, TYPE_UINT, TYPE_BOOLEAN, TYPE_LONG,
TYPE_ULONG, TYPE_INT64, TYPE_UINT64,
TYPE_FLOAT, TYPE_DOUBLE, TYPE_POINTER,
TYPE_BOXED, TYPE_PARAM, TYPE_OBJECT, TYPE_STRING,
TYPE_PYOBJECT]:
return type_
else:
raise TypeError("Unsupported type: %r" % (type_,))
def _get_default(self, default):
ptype = self.type
if default is not None:
return default
if ptype in [TYPE_INT, TYPE_UINT, TYPE_LONG, TYPE_ULONG,
TYPE_INT64, TYPE_UINT64]:
return 0
elif ptype == TYPE_STRING:
return ''
elif ptype == TYPE_FLOAT or ptype == TYPE_DOUBLE:
return 0.0
else:
return None
def _check_default(self):
ptype = self.type
default = self.default
if (ptype == TYPE_BOOLEAN and (default not in (True, False))):
raise TypeError(
"default must be True or False, not %r" % (default,))
elif ptype == TYPE_PYOBJECT:
if default is not None:
raise TypeError("object types does not have default values")
elif gobject.type_is_a(ptype, TYPE_ENUM):
if default is None:
raise TypeError("enum properties needs a default value")
elif not gobject.type_is_a(default, ptype):
raise TypeError("enum value %s must be an instance of %r" %
(default, ptype))
def _get_minimum(self):
ptype = self.type
if ptype in [TYPE_UINT, TYPE_ULONG, TYPE_UINT64]:
return 0
# Remember that G_MINFLOAT and G_MINDOUBLE are something different.
elif ptype == TYPE_FLOAT:
return -G_MAXFLOAT
elif ptype == TYPE_DOUBLE:
return -G_MAXDOUBLE
elif ptype == TYPE_INT:
return G_MININT
elif ptype == TYPE_LONG:
return G_MINLONG
elif ptype == TYPE_INT64:
return -2 ** 62 - 1
return None
def _get_maximum(self):
ptype = self.type
if ptype == TYPE_UINT:
return G_MAXUINT
elif ptype == TYPE_ULONG:
return G_MAXULONG
elif ptype == TYPE_INT64:
return 2 ** 62 - 1
elif ptype == TYPE_UINT64:
return 2 ** 63 - 1
elif ptype == TYPE_FLOAT:
return G_MAXFLOAT
elif ptype == TYPE_DOUBLE:
return G_MAXDOUBLE
elif ptype == TYPE_INT:
return G_MAXINT
elif ptype == TYPE_LONG:
return G_MAXLONG
return None
#
# Getter and Setter
#
def _default_setter(self, instance, value):
setattr(instance, '_property_helper_'+self.name, value)
def _default_getter(self, instance):
return getattr(instance, '_property_helper_'+self.name, self.default)
def _readonly_setter(self, instance, value):
self._exc = TypeError("%s property of %s is read-only" % (
self.name, type(instance).__name__))
def _writeonly_getter(self, instance):
self._exc = TypeError("%s property of %s is write-only" % (
self.name, type(instance).__name__))
#
# Public API
#
def get_pspec_args(self):
ptype = self.type
if ptype in [TYPE_INT, TYPE_UINT, TYPE_LONG, TYPE_ULONG,
TYPE_INT64, TYPE_UINT64, TYPE_FLOAT, TYPE_DOUBLE]:
args = self._get_minimum(), self._get_maximum(), self.default
elif (ptype == TYPE_STRING or ptype == TYPE_BOOLEAN or
ptype.is_a(TYPE_ENUM)):
args = (self.default,)
elif ptype == TYPE_PYOBJECT:
args = ()
elif ptype.is_a(TYPE_OBJECT):
args = ()
else:
raise NotImplementedError(ptype)
return (self.type, self.nick, self.blurb) + args + (self.flags,)
```
#### File: gst/extend/leveller.py
```python
import os
import sys
import math
import gobject
import pygst
pygst.require('0.10')
import gst
import utils
from pygobject import gsignal
import sources
from sources import EOS, ERROR, UNKNOWN_TYPE, WRONG_TYPE
class Leveller(gst.Pipeline):
"""
I am a pipeline that calculates RMS values and mix-in/out points.
I will signal 'done' when I'm done scanning the file, with return value
EOS, ERROR, UNKNOWN_TYPE, or WRONG_TYPE from gst.extend.sources
"""
gsignal('done', str)
def __init__(self, filename, threshold=-9.0):
gst.Pipeline.__init__(self)
self._filename = filename
self._thresholddB = threshold
self._threshold = math.pow(10, self._thresholddB / 10.0)
self._source = sources.AudioSource(filename)
self._source.connect('done', self._done_cb)
self._level = gst.element_factory_make("level")
self._fakesink = gst.element_factory_make("fakesink")
self.add(self._source, self._level, self._fakesink)
self._source.connect("pad-added", self._sourcePadAddedCb)
self._level.link(self._fakesink)
# temporary values for each timepoint
self._rmsdB = {} # hash of channel, rmsdB value
self._peakdB = 0.0 # highest value over all channels for this time
# results over the whole file
self._meansquaresums = [] # list of time -> mean square sum value
self._peaksdB = [] # list of time -> peak value
self._lasttime = 0
# will be set when done
self.mixin = 0
self.mixout = 0
self.length = 0
self.rms = 0.0
self.rmsdB = 0.0
def _sourcePadAddedCb(self, source, pad):
self._source.link(self._level)
def do_handle_message(self, message):
self.debug("got message %r" % message)
if (message.type == gst.MESSAGE_ELEMENT) and (message.src == self._level):
struc = message.structure
endtime = struc["endtime"]
rmss = struc["rms"]
peaks = struc["peak"]
decays = struc["decay"]
infos = zip(rmss, peaks, decays)
channid = 0
for rms,peak,decay in infos:
self._level_cb(message.src, endtime, channid, rms, peak, decay)
channid += 1
elif message.type == gst.MESSAGE_EOS:
self._eos_cb(message.src)
# chaining up
gst.Pipeline.do_handle_message(self, message)
def _level_cb(self, element, time, channel, rmsdB, peakdB, decaydB):
# rms is being signalled in dB
# FIXME: maybe level should have a way of signalling actual values
# signals are received in order, so I should get each channel one
# by one
if time > self._lasttime and self._lasttime > 0:
# we have a new time point, so calculate stuff for the old block
meansquaresum = 0.0
for i in self._rmsdB.keys():
meansquaresum += math.pow(10, self._rmsdB[i] / 10.0)
# average over channels
meansquaresum /= len(self._rmsdB.keys())
try:
rmsdBstr = str(10 * math.log10(meansquaresum))
except OverflowError:
rmsdBstr = "(-inf)"
gst.log("meansquaresum %f (%s dB)" % (meansquaresum, rmsdBstr))
# update values
self._peaksdB.append((self._lasttime, peakdB))
self._meansquaresums.append((self._lasttime, meansquaresum))
self._rmsdB = {}
self._peakdB = 0.0
# store the current values for later processing
gst.log("time %s, channel %d, rmsdB %f" % (gst.TIME_ARGS(time), channel, rmsdB))
self._lasttime = time
self._rmsdB[channel] = rmsdB
if peakdB > self._peakdB:
self._peakdB = peakdB
def _done_cb(self, source, reason):
gst.debug("done, reason %s" % reason)
# we ignore eos because we want the whole pipeline to eos
if reason == EOS:
return
self.emit('done', reason)
def _eos_cb(self, source):
gst.debug("eos, start calcing")
# get the highest peak RMS for this track
highestdB = self._peaksdB[0][1]
for (time, peakdB) in self._peaksdB:
if peakdB > highestdB:
highestdB = peakdB
gst.debug("highest peak(dB): %f" % highestdB)
# get the length
(self.length, peakdB) = self._peaksdB[-1]
# find the mix in point
for (time, peakdB) in self._peaksdB:
gst.log("time %s, peakdB %f" % (gst.TIME_ARGS(time), peakdB))
if peakdB > self._thresholddB + highestdB:
gst.debug("found mix-in point of %f dB at %s" % (
peakdB, gst.TIME_ARGS(time)))
self.mixin = time
break
# reverse and find out point
self._peaksdB.reverse()
found = None
for (time, peakdB) in self._peaksdB:
if found:
self.mixout = time
gst.debug("found mix-out point of %f dB right before %s" % (
found, gst.TIME_ARGS(time)))
break
if peakdB > self._thresholddB + highestdB:
found = peakdB
# now calculate RMS between these two points
weightedsquaresums = 0.0
lasttime = self.mixin
for (time, meansquaresum) in self._meansquaresums:
if time <= self.mixin:
continue
delta = time - lasttime
weightedsquaresums += meansquaresum * delta
gst.log("added MSS %f over time %s at time %s, now %f" % (
meansquaresum, gst.TIME_ARGS(delta),
gst.TIME_ARGS(time), weightedsquaresums))
lasttime = time
if time > self.mixout:
break
# calculate
try:
ms = weightedsquaresums / (self.mixout - self.mixin)
except ZeroDivisionError:
# this is possible when, for example, the whole sound file is
# empty
gst.warning('ZeroDivisionError on %s, mixin %s, mixout %s' % (
self._filename, gst.TIME_ARGS(self.mixin),
gst.TIME_ARGS(self.mixout)))
self.emit('done', WRONG_TYPE)
return
self.rms = math.sqrt(ms)
self.rmsdB = 10 * math.log10(ms)
self.emit('done', EOS)
def start(self):
gst.debug("Setting to PLAYING")
self.set_state(gst.STATE_PLAYING)
gst.debug("Set to PLAYING")
# FIXME: we might want to do this ourselves automatically ?
def stop(self):
"""
Stop the leveller, freeing all resources.
Call after the leveller emitted 'done' to clean up.
"""
gst.debug("Setting to NULL")
self.set_state(gst.STATE_NULL)
gst.debug("Set to NULL")
utils.gc_collect('Leveller.stop()')
def clean(self):
# clean ourselves up completely
self.stop()
# let's be ghetto and clean out our bin manually
self.remove(self._source)
self.remove(self._level)
self.remove(self._fakesink)
gst.debug("Emptied myself")
self._source.clean()
utils.gc_collect('Leveller.clean() cleaned up source')
self._source = None
self._fakesink = None
self._level = None
utils.gc_collect('Leveller.clean() done')
gobject.type_register(Leveller)
if __name__ == "__main__":
main = gobject.MainLoop()
try:
leveller = Leveller(sys.argv[1])
except IndexError:
sys.stderr.write("Please give a file to calculate level of\n")
sys.exit(1)
print "Starting"
bus = leveller.get_bus()
bus.add_signal_watch()
dontstop = True
leveller.set_state(gst.STATE_PLAYING)
while dontstop:
message = bus.poll(gst.MESSAGE_ANY, gst.SECOND)
if message:
gst.debug("got message from poll:%s/%r" % (message.type, message))
else:
gst.debug("got NOTHING from poll")
if message:
if message.type == gst.MESSAGE_EOS:
print "in: %s, out: %s, length: %s" % (gst.TIME_ARGS(leveller.mixin),
gst.TIME_ARGS(leveller.mixout),
gst.TIME_ARGS(leveller.length))
print "rms: %f, %f dB" % (leveller.rms, leveller.rmsdB)
dontstop = False
elif message.type == gst.MESSAGE_ERROR:
error,debug = message.parse_error()
print "ERROR[%s] %s" % (error.domain, error.message)
dontstop = False
leveller.stop()
leveller.clean()
gst.debug('deleting leveller, verify objects are freed')
utils.gc_collect('quit main loop')
del leveller
utils.gc_collect('deleted leveller')
gst.debug('stopping forever')
```
#### File: gtk-2.0/gtk/_lazyutils.py
```python
import sys
from types import ModuleType
class LazyModule(object):
def __init__(self, name, locals):
self._name = name
self._locals = locals
self._modname = '%s.%s' % (self._locals.get('__name__'), self._name)
def __getattr__(self, attr):
module = __import__(self._name, self._locals, {}, ' ')
sys.modules[self._modname] = module
if attr == '__members__':
return dir(module)
return getattr(module, attr)
class _NotLoadedMarker:
pass
_marker = _NotLoadedMarker()
class LazyDict(dict):
def __init__(self, module):
self._module = module
dict.__init__(self)
def __getitem__(self, name):
print name
return getattr(self._module, name)
class LazyNamespace(ModuleType):
def __init__(self, module, locals):
ModuleType.__init__(self, locals['__name__'])
self._imports = {}
ns = self.__dict__
ns.update(locals)
ns['__module__'] = self
lazy_symbols = {}
for symbol in module._get_symbol_names():
lazy_symbols[symbol] = ns[symbol] = _marker
ns.update(__dict__=LazyDict(self),
__bases__=(ModuleType,),
add_submodule=self.add_submodule)
def __getattribute__(_, name):
v = ns.get(name, _marker)
if v is not _marker:
return v
if name in lazy_symbols:
s = module._get_symbol(ns, name)
return s
elif name in self._imports:
m = __import__(self._imports[name], {}, {}, ' ')
ns[name] = m
return m
raise AttributeError(name)
LazyNamespace.__getattribute__ = __getattribute__
def add_submodule(self, name, importname):
self._imports[name] = importname
```
#### File: dist-packages/ibus/bus.py
```python
__all__ = (
"Bus",
)
import dbus
import dbus.lowlevel
import dbus.connection
import dbus.mainloop.glib
import gobject
import common
import object
import serializable
import config
dbus.mainloop.glib.DBusGMainLoop(set_as_default = True)
class Bus(object.Object):
__gtype_name__ = "PYIBusBus"
__gsignals__ = {
"disconnected" : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()
),
"config-reloaded" : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()
),
"registry-changed" : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()
),
}
def __init__(self):
super(Bus, self).__init__()
self.__dbusconn = dbus.connection.Connection(common.get_address())
_dbus = self.__dbusconn.get_object(dbus.BUS_DAEMON_NAME,
dbus.BUS_DAEMON_PATH)
self.__dbus = dbus.Interface (_dbus, dbus_interface="org.freedesktop.DBus")
self.__unique_name = self.hello()
_ibus = self.__dbusconn.get_object(common.IBUS_SERVICE_IBUS,
common.IBUS_PATH_IBUS)
self.__ibus = dbus.Interface (_ibus, dbus_interface='org.freedesktop.IBus')
self.__ibus.connect_to_signal("RegistryChanged", self.__registry_changed_cb)
self.__dbusconn.call_on_disconnection(self.__dbusconn_disconnected_cb)
# self.__dbusconn.add_message_filter(self.__filter_cb)
def __filter_cb(self, conn, message):
if message.get_type() == 4:
print "Signal %s" % message.get_member()
print " sender = %s" % message.get_sender()
print " path = %s" % message.get_path()
return dbus.lowlevel.HANDLER_RESULT_NOT_YET_HANDLED
def __dbusconn_disconnected_cb(self, dbusconn):
assert self.__dbusconn == dbusconn
self.__dbusconn = None
self.emit("disconnected")
def __registry_changed_cb(self):
self.emit("registry-changed")
def get_name(self):
return self.__unique_name
def get_is_connected(self):
if self.__dbusconn == None:
return False
return self.__dbusconn.get_is_connected()
# define dbus methods
def get_dbus(self):
return self.__dbus
def hello(self):
return self.__dbus.Hello()
def request_name(self, name, flags):
return self.__dbus.RequestName(name, dbus.UInt32 (flags))
def release_name(self, name):
return self.__dbus.ReleaseName(name)
def start_service_by_name(self, name, flags):
return self.__dbus.StartServiceByName(name, dbus.UInt32 (flags))
def list_queued_owners(self, name):
return self.__dbus.ListQueuedOwners(name)
def get_name_owner(self, name):
return self.__dbus.GetNameOwner(name)
def add_match(self, rule):
return self.__dbus.AddMatch(rule)
def remove_match(self, rule):
return self.__dbus.RemoveMatch(rule)
def get_dbusconn(self):
return self.__dbusconn
def get_address(self):
return common.get_address()
# define ibus methods
def register_component(self, component):
component = serializable.serialize_object(component)
return self.__ibus.RegisterComponent(component)
def list_engines(self):
engines = self.__ibus.ListEngines()
return map(serializable.deserialize_object, engines)
def get_engines_by_names(self, names):
engines = self.__ibus.GetEnginesByNames(names)
return map(serializable.deserialize_object, engines)
def list_active_engines(self):
engines = self.__ibus.ListActiveEngines()
return map(serializable.deserialize_object, engines)
def set_global_engine(self, name):
return self.__ibus.SetGlobalEngine(name)
def create_input_context(self, client_name):
return self.__ibus.CreateInputContext(client_name)
def current_input_contxt(self):
return self.__ibus.CurrentInputContext()
def exit(self, restart):
return self.__ibus.Exit(restart)
def ping(self, data):
flag = isinstance(data, serializable.Serializable)
if flag:
data = serializable.serialize_object(data)
data = self.__ibus.Ping(data, dbus_interface="org.freedesktop.IBus")
if flag:
data = serializable.deserialize_object(data)
return data
def introspect_ibus(self):
return self.__ibus.Introspect()
def introspect_dbus(self):
return self.__dbus.Introspect()
def get_config(self):
try:
return self.__config
except:
self.__config = config.Config(self)
return self.__config
def test():
import glib
import factory
import text
mainloop = glib.MainLoop()
def __disconnected_cb(*args):
print "Disconnected", args
mainloop.quit()
b = Bus()
b.connect("disconnected", __disconnected_cb)
print "unique_name =", b.get_name()
for i in b.list_factories():
print i.name
mainloop.run()
print "Exit"
if __name__ == "__main__":
test()
```
#### File: dist-packages/ibus/_config.py
```python
__all__ = (
"get_version",
"get_copyright",
"get_license",
"get_ICON_KEYBOARD",
"LIBIBUS_SONAME",
"ISOCODES_PREFIX",
"_"
)
import gettext
_ = lambda a: gettext.dgettext("ibus10", a)
def get_version():
return '1.5.5'
def get_copyright():
return _('''Copyright (c) 2007-2010 Peng Huang
Copyright (c) 2007-2010 Red Hat, Inc.''')
def get_license():
return 'LGPL'
def get_ICON_KEYBOARD():
import gtk
icon = 'ibus-keyboard'
fallback_icon = 'ibus-keyboard'
settings = gtk.settings_get_default()
if settings.get_property('gtk-icon-theme-name') != 'gnome':
return fallback_icon
theme = gtk.icon_theme_get_default()
if not theme.lookup_icon(icon, 18, 0):
return fallback_icon
return icon
LIBIBUS_SONAME='libibus-1.0.so.5'
ISOCODES_PREFIX='/usr'
```
#### File: dist-packages/ibus/notifications.py
```python
__all__ = (
"NotificationsBase",
"IBUS_SERVICE_NOTIFICATIONS",
"IBUS_PATH_NOTIFICATIONS"
)
IBUS_SERVICE_NOTIFICATIONS = "org.freedesktop.IBus.Notifications"
IBUS_PATH_NOTIFICATIONS = "/org/freedesktop/IBus/Notifications"
import ibus
from ibus import interface
class NotificationsBase(ibus.Object):
def __init__(self, bus):
super(NotificationsBase, self).__init__()
self.__proxy = NotificationsProxy(self, bus.get_dbusconn())
def notify(self, replaces_id, app_icon, summary, body, actions, expire_timeout):
pass
def close_notification(self, id):
pass
def notification_closed(self, id, reason):
self.__proxy.NotificationClosed(id, reason)
def action_invoked(self, id, action_key):
self.__proxy.ActionInvoked(id, action_key)
class NotificationsProxy(interface.INotifications):
def __init__ (self, notify, dbusconn):
super(NotificationsProxy, self).__init__(dbusconn, IBUS_PATH_NOTIFICATIONS)
self.__dbusconn = dbusconn
self.__notify = notify
def Notify(self, replaces_id, app_icon, summary, body, actions, expire_timeout):
return self.__notify.notify(replaces_id, app_icon, summary, body, actions, expire_timeout)
def CloseNotification(self, id):
return self.__notify.close_notification(id)
```
#### File: dist-packages/ibus/observedpath.py
```python
__all__ = (
"ObservedPath",
)
import dbus
from exception import IBusException
from serializable import *
class ObservedPath(Serializable):
__gtype_name__ = "PYIBusObservedPath"
__NAME__ = "IBusObservedPath"
def __init__ (self, path="", mtime=0):
super(ObservedPath, self).__init__()
self.__path = path
self.__mtime = mtime
def get_path(self):
return self.__path
def get_mtime(self):
return self.__mtime
path = property(get_path)
mtime = property(get_mtime)
def serialize(self, struct):
super(ObservedPath, self).serialize(struct)
struct.append (dbus.String(self.__path))
struct.append (dbus.Int64(self.__mtime))
def deserialize(self, struct):
super(ObservedPath, self).deserialize(struct)
self.__path = struct.pop(0)
self.__mtime = struct.pop(0)
def test():
op = ObservedPath("/tmp", 111)
value = serialize_object(op)
op= deserialize_object(value)
if __name__ == "__main__":
test()
```
#### File: numpy/core/_methods.py
```python
from __future__ import division, absolute_import, print_function
import warnings
from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core.numeric import asanyarray
from numpy.core import numerictypes as nt
def _amax(a, axis=None, out=None, keepdims=False):
return um.maximum.reduce(a, axis=axis,
out=out, keepdims=keepdims)
def _amin(a, axis=None, out=None, keepdims=False):
return um.minimum.reduce(a, axis=axis,
out=out, keepdims=keepdims)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
return um.add.reduce(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
return um.multiply.reduce(a, axis=axis, dtype=dtype,
out=out, keepdims=keepdims)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return um.logical_or.reduce(a, axis=axis, dtype=dtype, out=out,
keepdims=keepdims)
def _all(a, axis=None, dtype=None, out=None, keepdims=False):
return um.logical_and.reduce(a, axis=axis, dtype=dtype, out=out,
keepdims=keepdims)
def _count_reduce_items(arr, axis):
if axis is None:
axis = tuple(range(arr.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
items = 1
for ax in axis:
items *= arr.shape[ax]
return items
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis)
# Make this warning show up first
if rcount == 0:
warnings.warn("Mean of empty slice.", RuntimeWarning)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
ret = um.add.reduce(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis)
# Make this warning show up on top.
if ddof >= rcount:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
# Compute the mean.
# Note that if dtype is not of inexact type then arraymean will
# not be either.
arrmean = um.add.reduce(arr, axis=axis, dtype=dtype, keepdims=True)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(
arrmean, rcount, out=arrmean, casting='unsafe', subok=False)
else:
arrmean = arrmean.dtype.type(arrmean / rcount)
# Compute sum of squared deviations from mean
# Note that x may not be inexact and that we need it to be an array,
# not a scalar.
x = asanyarray(arr - arrmean)
if issubclass(arr.dtype.type, nt.complexfloating):
x = um.multiply(x, um.conjugate(x), out=x).real
else:
x = um.multiply(x, x, out=x)
ret = um.add.reduce(x, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# Compute degrees of freedom and make sure it is not negative.
rcount = max([rcount - ddof, 0])
# divide by degrees of freedom
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(um.sqrt(ret))
else:
ret = um.sqrt(ret)
return ret
```
#### File: dist-packages/oneconf/hosts.py
```python
import hashlib
import json
import logging
import os
import platform
import sys
from gi.repository import Gio
from gettext import gettext as _
LOG = logging.getLogger(__name__)
from oneconf.paths import (
FAKE_WALLPAPER, FAKE_WALLPAPER_MTIME, HOST_DATA_FILENAME,
LAST_SYNC_DATE_FILENAME, LOGO_BASE_FILENAME, LOGO_PREFIX,
ONECONF_CACHE_DIR, OTHER_HOST_FILENAME, PACKAGE_LIST_PREFIX,
PENDING_UPLOAD_FILENAME)
from oneconf import utils
class HostError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class Hosts(object):
"""
Class to get hosts
"""
def __init__(self):
'''initialize database
This will register/update this host if not already done.
'''
# create cache dir if doesn't exist
if not os.path.isdir(ONECONF_CACHE_DIR):
os.makedirs(ONECONF_CACHE_DIR)
(logo_checksum, logo_path) = self._get_current_wallpaper_data()
LOG.debug('LOGO %s: %s' % (logo_checksum, logo_path))
try:
# faking this id for testing purpose. Format is hostid:hostname
hostid, hostname = os.environ["ONECONF_HOST"].split(':')
LOG.debug("Fake current hostid to %s and hostname to %s" %
(hostid, hostname))
except KeyError:
with open('/var/lib/dbus/machine-id') as fp:
hostid = fp.read()[:-1]
hostname = platform.node()
self._host_file_dir = os.path.join(ONECONF_CACHE_DIR, hostid)
try:
file_path = os.path.join(self._host_file_dir, HOST_DATA_FILENAME)
with open(file_path, 'r') as f:
self.current_host = json.load(f)
has_changed = False
if hostname != self.current_host['hostname']:
self.current_host['hostname'] = hostname
has_changed = True
if hostid != self.current_host['hostid']:
self.current_host['hostid'] = hostid
has_changed = True
if logo_checksum != self.current_host['logo_checksum']:
if self._create_logo(logo_path):
self.current_host['logo_checksum'] = logo_checksum
has_changed = True
if has_changed:
self.save_current_host()
except (IOError, ValueError):
self.current_host = {
'hostid': hostid,
'hostname': hostname,
'share_inventory': False,
'logo_checksum': logo_checksum,
'packages_checksum': None,
}
if not os.path.isdir(self._host_file_dir):
os.mkdir(self._host_file_dir)
if not self._create_logo(logo_path):
self.current_host['logo_checksum'] = None
self.save_current_host()
self.other_hosts = None
self.update_other_hosts()
def _get_current_wallpaper_data(self):
'''Get current wallpaper metadatas from store'''
# TODO: add fake objects instead of introducing logic into the code
# for testing.
file_path = FAKE_WALLPAPER
file_mtime = FAKE_WALLPAPER_MTIME
if not file_path:
settings = Gio.Settings.new("org.gnome.desktop.background")
file_path = settings.get_string("picture-uri")
if not file_path:
return ('', '')
file_path = file_path.replace("file://", "")
try:
if not file_mtime:
file_mtime = str(os.stat(file_path).st_mtime)
file_path_bytes = file_path.encode(sys.getfilesystemencoding())
logo_checksum = "%s%s" % (
hashlib.sha224(file_path_bytes).hexdigest(), file_mtime)
except OSError:
logo_checksum = None
file_path = None
return (logo_checksum, file_path)
def _create_logo(self, wallpaper_path):
'''create a logo from a wallpaper
return True if succeeded'''
# 2012-12-20 BAW: There is as yet no PIL for Python 3. This means we
# actually can't enable PIL for Python 2 either because otherwise, we
# can't write a test suite that succeeds for both versions. Currently
# oneconf must be bilingual because Software Center imports oneconf,
# and it is Python 2. (Getting Xapian ported, or switched to some
# other Python 3 friendly search engine would solve *that* probably,
# so I guess it's a race between Xapian and PIL.)
return False
## if not wallpaper_path:
## return False
## try:
## # 2012-11-21 BAW: There is as yet no PIL for Python 3.
## from PIL import Image
## except ImportError:
## return False
## try:
## im = Image.open(LOGO_BASE_FILENAME)
## im2 = Image.open(wallpaper_path)
## im3 = im2.resize((42, 26), Image.BICUBIC)
## im.paste(im3, (3,3))
## im.save(os.path.join(self._host_file_dir, "%s_%s.png" % (LOGO_PREFIX, self.current_host['hostid'])))
## return True
## except IOError as e:
## LOG.warning ("Cant create logo for %s: %s" % (wallpaper_path, e))
## return False
def update_other_hosts(self):
'''Update all the other hosts from local store'''
new_other_hosts = self._load_other_hosts()
if self.other_hosts:
for old_hostid in self.other_hosts:
if old_hostid not in new_other_hosts:
try:
os.remove(os.path.join(self.get_currenthost_dir(), '%s_%s' % (PACKAGE_LIST_PREFIX, old_hostid)))
except OSError:
pass
try:
os.remove(os.path.join(self.get_currenthost_dir(), '%s_%s.png' % (LOGO_PREFIX, old_hostid)))
except OSError:
pass
# TODO: remove rather with regexp in case of crash during upgrade, do not keep cruft
self.other_hosts = new_other_hosts
def _load_other_hosts(self):
'''Load all other hosts from local store'''
try:
with open(os.path.join(self._host_file_dir, OTHER_HOST_FILENAME), 'r') as f:
return json.load(f)
except (IOError, TypeError, ValueError) as e:
LOG.warning("Error in loading %s file: %s" % (OTHER_HOST_FILENAME, e))
return {}
def save_current_host(self, arg=None):
'''Save current host on disk'''
LOG.debug("Save current host to disk")
utils.save_json_file_update(os.path.join(self._host_file_dir, HOST_DATA_FILENAME), self.current_host)
def add_hostid_pending_change(self, change):
'''Pend a scheduled change for another host on disk
change has a {hostid: {key: value, key2: value2}} format'''
LOG.debug("Pend a change for another host on disk")
try:
with open(os.path.join(self._host_file_dir, PENDING_UPLOAD_FILENAME), 'r') as f:
pending_changes = json.load(f)
except (IOError, ValueError):
pending_changes = {}
# merge existing changes with new ones
for hostid in change:
if not hostid in pending_changes:
pending_changes[hostid] = {}
pending_changes[hostid].update(change[hostid])
utils.save_json_file_update(os.path.join(self._host_file_dir, PENDING_UPLOAD_FILENAME), pending_changes)
def get_hostid_pending_change(self, hostid, attribute):
'''Get the status if a pending change is in progress for an host
Return None if nothing in progress'''
try:
with open(os.path.join(self._host_file_dir, PENDING_UPLOAD_FILENAME), 'r') as f:
return json.load(f)[hostid][attribute]
except (IOError, KeyError, ValueError):
return None
def gethost_by_id(self, hostid):
'''Get host dictionnary by id
Return: hostname
can trigger HostError exception if no hostname found for this id
'''
if hostid == self.current_host['hostid']:
return self.current_host
try:
return self.other_hosts[hostid]
except KeyError:
raise HostError(_("No hostname registered for this id"))
def _gethostid_by_name(self, hostname):
'''Get hostid by hostname
Return: hostid
can trigger HostError exception unexisting hostname
or multiple hostid for this hostname
'''
LOG.debug("Get a hostid for %s", hostname)
result_hostid = None
if hostname == self.current_host['hostname']:
result_hostid = self.current_host['hostid']
for hostid in self.other_hosts:
if hostname == self.other_hosts[hostid]['hostname']:
if not result_hostid:
result_hostid = hostid
else:
raise HostError(_("Multiple hostid registered for this "\
"hostname. Use --list --host to get the hostid and "\
"use the --hostid option."))
if not result_hostid:
raise HostError(_("No hostid registered for this hostname"))
return result_hostid
def get_hostid_from_context(self, hostid=None, hostname=None):
'''get and check hostid
if hostid and hostname are none, hostid is the current one
Return: the corresponding hostid, raise an error if multiple hostid
for an hostname
'''
if not hostid and not hostname:
hostid = self.current_host['hostid']
if hostid:
# just checking if it exists
self.gethost_by_id(hostid)
hostid = hostid
else:
hostid = self._gethostid_by_name(hostname)
return hostid
def get_currenthost_dir(self):
'''Get the oneconf current host directory'''
return self._host_file_dir
def get_all_hosts(self):
'''Return a dictionnary of all hosts
put in them as dict -> tuple for dbus connection'''
LOG.debug("Request to compute an list of all hosts")
result = {
self.current_host['hostid']: (
True, self.current_host['hostname'],
self.current_host['share_inventory']),
}
for hostid in self.other_hosts:
result[hostid] = (
False, self.other_hosts[hostid]['hostname'], True)
return result
def set_share_inventory(self, share_inventory, hostid=None, hostname=None):
'''Change if we share the current inventory to other hosts'''
if hostid or hostname:
hostid = self.get_hostid_from_context(hostid, hostname)
if hostid and (hostid != self.current_host['hostid']):
# do not update if there is already this pending change is already registered
pending_change_scheduled = self.get_hostid_pending_change(hostid, 'share_inventory')
if pending_change_scheduled != None:
if share_inventory == pending_change_scheduled:
return
save_function = self.add_hostid_pending_change
arg = {hostid: {'share_inventory': share_inventory}}
msg = "Update share_inventory state for %s to %s" % (hostid, share_inventory)
else:
save_function = self.save_current_host
arg = None
msg = "Update current share_inventory state to %s" % share_inventory
if self.current_host['share_inventory'] == share_inventory:
return
self.current_host['share_inventory'] = share_inventory
LOG.debug(msg)
save_function(arg)
def get_last_sync_date(self):
'''Get last sync date, if already synced, with remote server'''
LOG.debug("Getting last sync date with remote server")
try:
with open(os.path.join(self._host_file_dir, LAST_SYNC_DATE_FILENAME), 'r') as f:
content = json.load(f)
last_sync = content['last_sync']
#last_sync = datetime.datetime.fromtimestamp(content['last_sync']).strftime("%X %x")
except IOError:
last_sync = _("Was never synced")
# FIXME: give a better sentence like "Last sync not completed successfully", but let's not add a translation right now
except ValueError:
last_sync = _("Was never synced")
return last_sync
```
#### File: oneconf/networksync/fake_webcatalog_silo.py
```python
import time
import logging
import os
import pickle
LOG = logging.getLogger(__name__)
# decorator to add a fake network delay if set
# in FakeReviewSettings.fake_network_delay
def network_delay(fn):
def slp(self, *args, **kwargs):
#FIXME: CHECK how a decorator can take parameters
#delay = fake_settings.get_setting('fake_network_delay')
delay = 2
if delay:
time.sleep(delay)
return fn(self, *args, **kwargs)
return slp
class FakeWebCatalogSilo(object):
"""An object that simply holds settings and data which are used by
WebCatalogAPI in the infraclient_fake module. Using this module allows a
developer to test the oneconf functionality without any interaction with a
webcatalog server. Each setting here provides complete control over how
the 'server' will respond. Changes to these settings should be made to the
class attributes directly without creating an instance of this class. The
intended usage is for unit tests where a predictable response is required
and where the application should THINK it has spoken to a server.
The unit test would make changes to settings in this class before running
the unit test.
It also contains some data for integration test, faking a in memory
WebCatalog server.
"""
_FAKE_SETTINGS = {}
# Default stored data
#_FAKE_SETTINGS['hosts_metadata'] = {
# 'AAAAA': {'hostname': 'aaaaa', 'logo_checksum': 'logoAAAAA', 'packages_checksum': 'packageAAAAAA'},
# 'BBBBB': {'hostname': 'bbbbb', 'logo_checksum': 'logoBBBBB', 'packages_checksum': 'packageBBBBBB'},}
#_FAKE_SETTINGS['packages_metadata'] = {
# 'AAAAA': {'kiki': {'auto': False}, 'unity': {'auto': False},
# 'libFoo': {'auto': True}, 'libFool': {'auto': True}},
# 'BBBBB': {'kiki': {'auto': False}, 'gnome-panel': {'auto': False},
# 'libBar': {'auto': True}, 'libFool': {'auto': False}},}
_FAKE_SETTINGS['hosts_metadata'] = {}
_FAKE_SETTINGS['packages_metadata'] = {}
# general settings
# *****************************
# delay (in seconds) before returning from any of the fake cat methods
# useful for emulating real network timings (use None for no delays)
_FAKE_SETTINGS['fake_network_delay'] = 2
# server status
# *****************************
# can be env variables as well like: ONECONF_server_response_error
# raises APIError if True
_FAKE_SETTINGS['server_response_error'] = False
# list machines
# *****************************
# raises APIError if True
_FAKE_SETTINGS['list_machines_error'] = False
# update machine
# *****************************
# raises APIError if True
_FAKE_SETTINGS['update_machine_error'] = False
# delete machine
# *****************************
# raises APIError if True
_FAKE_SETTINGS['delete_machine_error'] = False
# get machine logo
# *****************************
# raises APIError if True
_FAKE_SETTINGS['get_machine_logo_error'] = False
# update machine logo
# *****************************
# raises APIError if True
_FAKE_SETTINGS['update_machine_logo_error'] = False
# list packages
# *****************************
# raises APIError if True
_FAKE_SETTINGS['list_packages_error'] = False
# update package list
# *****************************
# raises APIError if True
_FAKE_SETTINGS['update_packages_error'] = False
def __init__(self, silo_filepath=None):
"""Initialises the object and loads the settings into the
_FAKE_SETTINGS dict.. If settings_file is not provided, any existing
settings in the cache file are ignored and the cache file is
overwritten with the defaults set in the class.
"""
if silo_filepath:
self._update_from_file(silo_filepath)
def get_setting(self, key_name):
"""Takes a string (key_name) which corresponds to a setting in this
object.
Raises a NameError if the setting name doesn't exist
"""
if 'error' in key_name:
value = os.getenv('ONECONF_' + key_name)
# The value should be the string True or False, but it can be None.
if value is not None:
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
raise RuntimeError('unexpected value %s' % value)
if not key_name in self._FAKE_SETTINGS:
raise NameError('Setting %s does not exist' % key_name)
return self._FAKE_SETTINGS[key_name]
def get_host_silo(self):
""" return a reference to the host list silo"""
return self._FAKE_SETTINGS['hosts_metadata']
def get_package_silo(self):
""" return a reference to the package list silo"""
return self._FAKE_SETTINGS['packages_metadata']
def _update_from_file(self, filepath):
'''Loads existing settings from cache file into _FAKE_SETTINGS dict'''
if os.path.exists(filepath):
with open(filepath, 'rb') as fp:
self._FAKE_SETTINGS = pickle.load(fp)
else:
LOG.warning("Settings file %s doesn't exist. "
'Will run with the default' % filepath)
return
def save_settings(self, filepath):
"""write the dict out to cache file, for generating new cases"""
try:
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
# File must be open in binary mode since pickle will write bytes.
with open(filepath, 'wb') as fp:
pickle.dump(self._FAKE_SETTINGS, fp)
return True
except:
return False
```
#### File: dist-packages/PIL/ImageSequence.py
```python
class Iterator:
"""
This class implements an iterator object that can be used to loop
over an image sequence.
You can use the ``[]`` operator to access elements by index. This operator
will raise an :py:exc:`IndexError` if you try to access a nonexistent
frame.
:param im: An image object.
"""
def __init__(self, im):
if not hasattr(im, "seek"):
raise AttributeError("im must have seek method")
self.im = im
def __getitem__(self, ix):
try:
if ix:
self.im.seek(ix)
return self.im
except EOFError:
raise IndexError # end of sequence
```
#### File: dist-packages/PIL/OleFileIO.py
```python
from __future__ import print_function
import io
import sys
from PIL import _binary
from PIL._util import isPath
if str is not bytes:
long = int
i8 = _binary.i8
i16 = _binary.i16le
i32 = _binary.i32le
MAGIC = b'\320\317\021\340\241\261\032\341'
#
# --------------------------------------------------------------------
# property types
VT_EMPTY=0; VT_NULL=1; VT_I2=2; VT_I4=3; VT_R4=4; VT_R8=5; VT_CY=6;
VT_DATE=7; VT_BSTR=8; VT_DISPATCH=9; VT_ERROR=10; VT_BOOL=11;
VT_VARIANT=12; VT_UNKNOWN=13; VT_DECIMAL=14; VT_I1=16; VT_UI1=17;
VT_UI2=18; VT_UI4=19; VT_I8=20; VT_UI8=21; VT_INT=22; VT_UINT=23;
VT_VOID=24; VT_HRESULT=25; VT_PTR=26; VT_SAFEARRAY=27; VT_CARRAY=28;
VT_USERDEFINED=29; VT_LPSTR=30; VT_LPWSTR=31; VT_FILETIME=64;
VT_BLOB=65; VT_STREAM=66; VT_STORAGE=67; VT_STREAMED_OBJECT=68;
VT_STORED_OBJECT=69; VT_BLOB_OBJECT=70; VT_CF=71; VT_CLSID=72;
VT_VECTOR=0x1000;
# map property id to name (for debugging purposes)
VT = {}
for k, v in list(vars().items()):
if k[:3] == "VT_":
VT[v] = k
#
# --------------------------------------------------------------------
# Some common document types (root.clsid fields)
WORD_CLSID = "00020900-0000-0000-C000-000000000046"
#
# --------------------------------------------------------------------
class _OleStream(io.BytesIO):
"""OLE2 Stream
Returns a read-only file object which can be used to read
the contents of a OLE stream. To open a stream, use the
openstream method in the OleFile class.
This function can be used with either ordinary streams,
or ministreams, depending on the offset, sectorsize, and
fat table arguments.
"""
# FIXME: should store the list of sects obtained by following
# the fat chain, and load new sectors on demand instead of
# loading it all in one go.
def __init__(self, fp, sect, size, offset, sectorsize, fat):
data = []
while sect != -2: # 0xFFFFFFFEL:
fp.seek(offset + sectorsize * sect)
data.append(fp.read(sectorsize))
sect = fat[sect]
data = b"".join(data)
# print len(data), size
io.BytesIO.__init__(self, data[:size])
#
# --------------------------------------------------------------------
# FIXME: should add a counter in here to avoid looping forever
# if the tree is broken.
class _OleDirectoryEntry:
"""OLE2 Directory Entry
Encapsulates a stream directory entry. Note that the
constructor builds a tree of all subentries, so we only
have to call it with the root object.
"""
def __init__(self, sidlist, sid):
# store directory parameters. the caller provides
# a complete list of directory entries, as read from
# the directory stream.
name, type, sect, size, sids, clsid = sidlist[sid]
self.sid = sid
self.name = name
self.type = type # 1=storage 2=stream
self.sect = sect
self.size = size
self.clsid = clsid
# process child nodes, if any
self.kids = []
sid = sidlist[sid][4][2]
if sid != -1:
# the directory entries are organized as a red-black tree.
# the following piece of code does an ordered traversal of
# such a tree (at least that's what I hope ;-)
stack = [self.sid]
# start at leftmost position
left, right, child = sidlist[sid][4]
while left != -1: # 0xFFFFFFFFL:
stack.append(sid)
sid = left
left, right, child = sidlist[sid][4]
while sid != self.sid:
self.kids.append(_OleDirectoryEntry(sidlist, sid))
# try to move right
left, right, child = sidlist[sid][4]
if right != -1: # 0xFFFFFFFFL:
# and then back to the left
sid = right
while True:
left, right, child = sidlist[sid][4]
if left == -1: # 0xFFFFFFFFL:
break
stack.append(sid)
sid = left
else:
# couldn't move right; move up instead
while True:
ptr = stack[-1]
del stack[-1]
left, right, child = sidlist[ptr][4]
if right != sid:
break
sid = right
left, right, child = sidlist[sid][4]
if right != ptr:
sid = ptr
# in the OLE file, entries are sorted on (length, name).
# for convenience, we sort them on name instead.
self.kids.sort()
def __cmp__(self, other):
"Compare entries by name"
return cmp(self.name, other.name)
def dump(self, tab = 0):
"Dump this entry, and all its subentries (for debug purposes only)"
TYPES = ["(invalid)", "(storage)", "(stream)", "(lockbytes)",
"(property)", "(root)"]
print(" "*tab + repr(self.name), TYPES[self.type], end=' ')
if self.type in (2, 5):
print(self.size, "bytes", end=' ')
print()
if self.type in (1, 5) and self.clsid:
print(" "*tab + "{%s}" % self.clsid)
for kid in self.kids:
kid.dump(tab + 2)
#
# --------------------------------------------------------------------
##
# This class encapsulates the interface to an OLE 2 structured
# storage file. Use the {@link listdir} and {@link openstream}
# methods to access the contents of this file.
class OleFileIO:
"""OLE container object
This class encapsulates the interface to an OLE 2 structured
storage file. Use the listdir and openstream methods to access
the contents of this file.
Object names are given as a list of strings, one for each subentry
level. The root entry should be omitted. For example, the following
code extracts all image streams from a Microsoft Image Composer file::
ole = OleFileIO("fan.mic")
for entry in ole.listdir():
if entry[1:2] == "Image":
fin = ole.openstream(entry)
fout = open(entry[0:1], "wb")
while 1:
s = fin.read(8192)
if not s:
break
fout.write(s)
You can use the viewer application provided with the Python Imaging
Library to view the resulting files (which happens to be standard
TIFF files).
"""
def __init__(self, filename = None):
if filename:
self.open(filename)
##
# Open an OLE2 file.
def open(self, filename):
"""Open an OLE2 file"""
if isPath(filename):
self.fp = open(filename, "rb")
else:
self.fp = filename
header = self.fp.read(512)
if len(header) != 512 or header[:8] != MAGIC:
raise IOError("not an OLE2 structured storage file")
# file clsid (probably never used, so we don't store it)
clsid = self._clsid(header[8:24])
# FIXME: could check version and byte order fields
self.sectorsize = 1 << i16(header, 30)
self.minisectorsize = 1 << i16(header, 32)
self.minisectorcutoff = i32(header, 56)
# Load file allocation tables
self.loadfat(header)
# Load direcory. This sets both the sidlist (ordered by id)
# and the root (ordered by hierarchy) members.
self.loaddirectory(i32(header, 48))
self.ministream = None
self.minifatsect = i32(header, 60)
def loadfat(self, header):
# Load the FAT table. The header contains a sector numbers
# for the first 109 FAT sectors. Additional sectors are
# described by DIF blocks (FIXME: not yet implemented)
sect = header[76:512]
fat = []
for i in range(0, len(sect), 4):
ix = i32(sect, i)
if ix == -2 or ix == -1: # ix == 0xFFFFFFFEL or ix == 0xFFFFFFFFL:
break
s = self.getsect(ix)
fat = fat + [i32(s, i) for i in range(0, len(s), 4)]
self.fat = fat
def loadminifat(self):
# Load the MINIFAT table. This is stored in a standard sub-
# stream, pointed to by a header field.
s = self._open(self.minifatsect).read()
self.minifat = [i32(s, i) for i in range(0, len(s), 4)]
def getsect(self, sect):
# Read given sector
self.fp.seek(512 + self.sectorsize * sect)
return self.fp.read(self.sectorsize)
def _unicode(self, s):
# Map unicode string to Latin 1
if bytes is str:
# Old version tried to produce a Latin-1 str
return s.decode('utf-16').encode('latin-1', 'replace')
else:
# Provide actual Unicode string
return s.decode('utf-16')
def loaddirectory(self, sect):
# Load the directory. The directory is stored in a standard
# substream, independent of its size.
# read directory stream
fp = self._open(sect)
# create list of sid entries
self.sidlist = []
while True:
entry = fp.read(128)
if not entry:
break
type = i8(entry[66])
name = self._unicode(entry[0:0+i16(entry, 64)])
ptrs = i32(entry, 68), i32(entry, 72), i32(entry, 76)
sect, size = i32(entry, 116), i32(entry, 120)
clsid = self._clsid(entry[80:96])
self.sidlist.append((name, type, sect, size, ptrs, clsid))
# create hierarchical list of directory entries
self.root = _OleDirectoryEntry(self.sidlist, 0)
def dumpdirectory(self):
# Dump directory (for debugging only)
self.root.dump()
def _clsid(self, clsid):
if clsid == "\0" * len(clsid):
return ""
return (("%08X-%04X-%04X-%02X%02X-" + "%02X" * 6) %
((i32(clsid, 0), i16(clsid, 4), i16(clsid, 6)) +
tuple(map(i8, clsid[8:16]))))
def _list(self, files, prefix, node):
# listdir helper
prefix = prefix + [node.name]
for entry in node.kids:
if entry.kids:
self._list(files, prefix, entry)
else:
files.append(prefix[1:] + [entry.name])
def _find(self, filename):
# openstream helper
node = self.root
for name in filename:
for kid in node.kids:
if kid.name == name:
break
else:
raise IOError("file not found")
node = kid
return node.sid
def _open(self, start, size = 0x7FFFFFFF):
# openstream helper.
if size < self.minisectorcutoff:
# ministream object
if not self.ministream:
self.loadminifat()
self.ministream = self._open(self.sidlist[0][2])
return _OleStream(self.ministream, start, size, 0,
self.minisectorsize, self.minifat)
# standard stream
return _OleStream(self.fp, start, size, 512,
self.sectorsize, self.fat)
##
# Returns a list of streams stored in this file.
def listdir(self):
"""Return a list of streams stored in this file"""
files = []
self._list(files, [], self.root)
return files
##
# Opens a stream as a read-only file object.
def openstream(self, filename):
"""Open a stream as a read-only file object"""
slot = self._find(filename)
name, type, sect, size, sids, clsid = self.sidlist[slot]
if type != 2:
raise IOError("this file is not a stream")
return self._open(sect, size)
##
# Gets a list of properties described in substream.
def getproperties(self, filename):
"""Return properties described in substream"""
fp = self.openstream(filename)
data = {}
# header
s = fp.read(28)
clsid = self._clsid(s[8:24])
# format id
s = fp.read(20)
fmtid = self._clsid(s[:16])
fp.seek(i32(s, 16))
# get section
s = "****" + fp.read(i32(fp.read(4))-4)
for i in range(i32(s, 4)):
id = i32(s, 8+i*8)
offset = i32(s, 12+i*8)
type = i32(s, offset)
# test for common types first (should perhaps use
# a dictionary instead?)
if type == VT_I2:
value = i16(s, offset+4)
if value >= 32768:
value = value - 65536
elif type == VT_UI2:
value = i16(s, offset+4)
elif type in (VT_I4, VT_ERROR):
value = i32(s, offset+4)
elif type == VT_UI4:
value = i32(s, offset+4) # FIXME
elif type in (VT_BSTR, VT_LPSTR):
count = i32(s, offset+4)
value = s[offset+8:offset+8+count-1]
elif type == VT_BLOB:
count = i32(s, offset+4)
value = s[offset+8:offset+8+count]
elif type == VT_LPWSTR:
count = i32(s, offset+4)
value = self._unicode(s[offset+8:offset+8+count*2])
elif type == VT_FILETIME:
value = long(i32(s, offset+4)) + (long(i32(s, offset+8))<<32)
# FIXME: this is a 64-bit int: "number of 100ns periods
# since Jan 1,1601". Should map this to Python time
value = value // 10000000 # seconds
elif type == VT_UI1:
value = i8(s[offset+4])
elif type == VT_CLSID:
value = self._clsid(s[offset+4:offset+20])
elif type == VT_CF:
count = i32(s, offset+4)
value = s[offset+8:offset+8+count]
else:
value = None # everything else yields "None"
# FIXME: add support for VT_VECTOR
#print "%08x" % id, repr(value),
#print "(%s)" % VT[i32(s, offset) & 0xFFF]
data[id] = value
return data
#
# --------------------------------------------------------------------
# This script can be used to dump the directory of any OLE2 structured
# storage file.
if __name__ == "__main__":
import sys
for file in sys.argv[1:]:
try:
ole = OleFileIO(file)
print("-" * 68)
print(file)
print("-" * 68)
ole.dumpdirectory()
for file in ole.listdir():
if file[-1][0] == "\005":
print(file)
props = ole.getproperties(file)
props = sorted(props.items())
for k, v in props:
print(" ", k, v)
except IOError as v:
print("***", "cannot read", file, "-", v)
```
#### File: reportlab/lib/pygments2xpre.py
```python
__all__ = ('pygments2xpre',)
def _2xpre(s,styles):
"Helper to transform Pygments HTML output to ReportLab markup"
s = s.replace('<div class="highlight">','')
s = s.replace('</div>','')
s = s.replace('<pre>','')
s = s.replace('</pre>','')
for k,c in styles+[('p','#000000'),('n','#000000'),('err','#000000')]:
s = s.replace('<span class="%s">' % k,'<span color="%s">' % c)
return s
def pygments2xpre(s, language="python"):
"Return markup suitable for XPreformatted"
try:
from pygments import highlight
from pygments.formatters import HtmlFormatter
except ImportError:
return s
from pygments.lexers import get_lexer_by_name
l = get_lexer_by_name(language)
h = HtmlFormatter()
from io import StringIO
out = StringIO()
highlight(s,l,h,out)
styles = [(cls, style.split(';')[0].split(':')[1].strip())
for cls, (style, ttype, level) in h.class2style.items()
if cls and style and style.startswith('color:')]
return _2xpre(out.getvalue(),styles)
def convertSourceFiles(filenames):
"Helper function - makes minimal PDF document"
from reportlab.platypus import Paragraph, SimpleDocTemplate, Spacer, XPreformatted
from reportlab.lib.styles import getSampleStyleSheet
styT=getSampleStyleSheet()["Title"]
styC=getSampleStyleSheet()["Code"]
doc = SimpleDocTemplate("pygments2xpre.pdf")
S = [].append
for filename in filenames:
S(Paragraph(filename,style=styT))
src = open(filename, 'r').read()
fmt = pygments2xpre(src)
S(XPreformatted(fmt, style=styC))
doc.build(S.__self__)
print('saved pygments2xpre.pdf')
if __name__=='__main__':
import sys
filenames = sys.argv[1:]
if not filenames:
print('usage: pygments2xpre.py file1.py [file2.py] [...]')
sys.exit(0)
convertSourceFiles(filenames)
```
#### File: reportlab/pdfgen/pathobject.py
```python
__version__=''' $Id$ '''
__doc__="""
PDFPathObject is an efficient way to draw paths on a Canvas. Do not
instantiate directly, obtain one from the Canvas instead.
Progress Reports:
8.83, 2000-01-13, gmcm: created from pdfgen.py
"""
from reportlab.pdfgen import pdfgeom
from reportlab.lib.rl_accel import fp_str
class PDFPathObject:
"""Represents a graphic path. There are certain 'modes' to PDF
drawing, and making a separate object to expose Path operations
ensures they are completed with no run-time overhead. Ask
the Canvas for a PDFPath with getNewPathObject(); moveto/lineto/
curveto wherever you want; add whole shapes; and then add it back
into the canvas with one of the relevant operators.
Path objects are probably not long, so we pack onto one line
the code argument allows a canvas to get the operatiosn appended directly so
avoiding the final getCode
"""
def __init__(self,code=None):
self._code = (code,[])[code is None]
self._code_append = self._init_code_append
def _init_code_append(self,c):
assert c.endswith(' m') or c.endswith(' re'), 'path must start with a moveto or rect'
code_append = self._code.append
code_append('n')
code_append(c)
self._code_append = code_append
def getCode(self):
"pack onto one line; used internally"
return ' '.join(self._code)
def moveTo(self, x, y):
self._code_append('%s m' % fp_str(x,y))
def lineTo(self, x, y):
self._code_append('%s l' % fp_str(x,y))
def curveTo(self, x1, y1, x2, y2, x3, y3):
self._code_append('%s c' % fp_str(x1, y1, x2, y2, x3, y3))
def arc(self, x1,y1, x2,y2, startAng=0, extent=90):
"""Contributed to piddlePDF by <NAME>, 28/7/99.
Draw a partial ellipse inscribed within the rectangle x1,y1,x2,y2,
starting at startAng degrees and covering extent degrees. Angles
start with 0 to the right (+x) and increase counter-clockwise.
These should have x1<x2 and y1<y2.
The algorithm is an elliptical generalization of the formulae in
<NAME>'s TeX tutorial <URL: http://www.tinaja.com/bezarc1.pdf>."""
self._curves(pdfgeom.bezierArc(x1,y1, x2,y2, startAng, extent))
def arcTo(self, x1,y1, x2,y2, startAng=0, extent=90):
"""Like arc, but draws a line from the current point to
the start if the start is not the current point."""
self._curves(pdfgeom.bezierArc(x1,y1, x2,y2, startAng, extent),'lineTo')
def rect(self, x, y, width, height):
"""Adds a rectangle to the path"""
self._code_append('%s re' % fp_str((x, y, width, height)))
def ellipse(self, x, y, width, height):
"""adds an ellipse to the path"""
self._curves(pdfgeom.bezierArc(x, y, x + width,y + height, 0, 360))
def _curves(self,curves,initial='moveTo'):
getattr(self,initial)(*curves[0][:2])
for curve in curves:
self.curveTo(*curve[2:])
def circle(self, x_cen, y_cen, r):
"""adds a circle to the path"""
x1 = x_cen - r
y1 = y_cen - r
width = height = 2*r
self.ellipse(x1, y1, width, height)
def roundRect(self, x, y, width, height, radius):
"""Draws a rectangle with rounded corners. The corners are
approximately quadrants of a circle, with the given radius."""
#use a precomputed set of factors for the bezier approximation
#to a circle. There are six relevant points on the x axis and y axis.
#sketch them and it should all make sense!
t = 0.4472 * radius
x0 = x
x1 = x0 + t
x2 = x0 + radius
x3 = x0 + width - radius
x4 = x0 + width - t
x5 = x0 + width
y0 = y
y1 = y0 + t
y2 = y0 + radius
y3 = y0 + height - radius
y4 = y0 + height - t
y5 = y0 + height
self.moveTo(x2, y0)
self.lineTo(x3, y0) #bottom row
self.curveTo(x4, y0, x5, y1, x5, y2) #bottom right
self.lineTo(x5, y3) #right edge
self.curveTo(x5, y4, x4, y5, x3, y5) #top right
self.lineTo(x2, y5) #top row
self.curveTo(x1, y5, x0, y4, x0, y3) #top left
self.lineTo(x0, y2) #left edge
self.curveTo(x0, y1, x1, y0, x2, y0) #bottom left
self.close()
def close(self):
"draws a line back to where it started"
self._code_append('h')
```
#### File: samba/netcmd/dns.py
```python
import samba.getopt as options
from struct import pack
from socket import inet_ntoa
import shlex
from samba.netcmd import (
Command,
CommandError,
Option,
SuperCommand,
)
from samba.dcerpc import dnsp, dnsserver
def dns_connect(server, lp, creds):
if server.lower() == 'localhost':
server = '127.0.0.1'
binding_str = "ncacn_ip_tcp:%s[sign]" % server
dns_conn = dnsserver.dnsserver(binding_str, lp, creds)
return dns_conn
def bool_string(flag):
if flag == 0:
ret = 'FALSE'
elif flag == 1:
ret = 'TRUE'
else:
ret = 'UNKNOWN (0x%x)' % flag
return ret
def enum_string(module, enum_defs, value):
ret = None
for e in enum_defs:
if value == getattr(module, e):
ret = e
break
if not ret:
ret = 'UNKNOWN (0x%x)' % value
return ret
def bitmap_string(module, bitmap_defs, value):
ret = ''
for b in bitmap_defs:
if value & getattr(module, b):
ret += '%s ' % b
if not ret:
ret = 'NONE'
return ret
def boot_method_string(boot_method):
enum_defs = [ 'DNS_BOOT_METHOD_UNINITIALIZED', 'DNS_BOOT_METHOD_FILE',
'DNS_BOOT_METHOD_REGISTRY', 'DNS_BOOT_METHOD_DIRECTORY' ]
return enum_string(dnsserver, enum_defs, boot_method)
def name_check_flag_string(check_flag):
enum_defs = [ 'DNS_ALLOW_RFC_NAMES_ONLY', 'DNS_ALLOW_NONRFC_NAMES',
'DNS_ALLOW_MULTIBYTE_NAMES', 'DNS_ALLOW_ALL_NAMES' ]
return enum_string(dnsserver, enum_defs, check_flag)
def zone_type_string(zone_type):
enum_defs = [ 'DNS_ZONE_TYPE_CACHE', 'DNS_ZONE_TYPE_PRIMARY',
'DNS_ZONE_TYPE_SECONDARY', 'DNS_ZONE_TYPE_STUB',
'DNS_ZONE_TYPE_FORWARDER', 'DNS_ZONE_TYPE_SECONDARY_CACHE' ]
return enum_string(dnsp, enum_defs, zone_type)
def zone_update_string(zone_update):
enum_defs = [ 'DNS_ZONE_UPDATE_OFF', 'DNS_ZONE_UPDATE_SECURE',
'DNS_ZONE_UPDATE_SECURE' ]
return enum_string(dnsp, enum_defs, zone_update)
def zone_secondary_security_string(security):
enum_defs = [ 'DNS_ZONE_SECSECURE_NO_SECURITY', 'DNS_ZONE_SECSECURE_NS_ONLY',
'DNS_ZONE_SECSECURE_LIST_ONLY', 'DNS_ZONE_SECSECURE_NO_XFER' ]
return enum_string(dnsserver, enum_defs, security)
def zone_notify_level_string(notify_level):
enum_defs = [ 'DNS_ZONE_NOTIFY_OFF', 'DNS_ZONE_NOTIFY_ALL_SECONDARIES',
'DNS_ZONE_NOTIFY_LIST_ONLY' ]
return enum_string(dnsserver, enum_defs, notify_level)
def dp_flags_string(dp_flags):
bitmap_defs = [ 'DNS_DP_AUTOCREATED', 'DNS_DP_LEGACY', 'DNS_DP_DOMAIN_DEFAULT',
'DNS_DP_FOREST_DEFAULT', 'DNS_DP_ENLISTED', 'DNS_DP_DELETED' ]
return bitmap_string(dnsserver, bitmap_defs, dp_flags)
def zone_flags_string(flags):
bitmap_defs = [ 'DNS_RPC_ZONE_PAUSED', 'DNS_RPC_ZONE_SHUTDOWN',
'DNS_RPC_ZONE_REVERSE', 'DNS_RPC_ZONE_AUTOCREATED',
'DNS_RPC_ZONE_DSINTEGRATED', 'DNS_RPC_ZONE_AGING',
'DNS_RPC_ZONE_UPDATE_UNSECURE', 'DNS_RPC_ZONE_UPDATE_SECURE',
'DNS_RPC_ZONE_READONLY']
return bitmap_string(dnsserver, bitmap_defs, flags)
def ip4_array_string(array):
ret = []
if not array:
return ret
for i in xrange(array.AddrCount):
addr = '%s' % inet_ntoa(pack('i', array.AddrArray[i]))
ret.append(addr)
return ret
def dns_addr_array_string(array):
ret = []
if not array:
return ret
for i in xrange(array.AddrCount):
if array.AddrArray[i].MaxSa[0] == 0x02:
addr = '%d.%d.%d.%d (%d)' % \
tuple(array.AddrArray[i].MaxSa[4:8] + [array.AddrArray[i].MaxSa[3]])
elif array.AddrArray[i].MaxSa[0] == 0x17:
addr = '%x%x:%x%x:%x%x:%x%x:%x%x:%x%x:%x%x:%x%x (%d)' % \
tuple(array.AddrArray[i].MaxSa[4:20] + [array.AddrArray[i].MaxSa[3]])
else:
addr = 'UNKNOWN'
ret.append(addr)
return ret
def dns_type_flag(rec_type):
rtype = rec_type.upper()
if rtype == 'A':
record_type = dnsp.DNS_TYPE_A
elif rtype == 'AAAA':
record_type = dnsp.DNS_TYPE_AAAA
elif rtype == 'PTR':
record_type = dnsp.DNS_TYPE_PTR
elif rtype == 'NS':
record_type = dnsp.DNS_TYPE_NS
elif rtype == 'CNAME':
record_type = dnsp.DNS_TYPE_CNAME
elif rtype == 'SOA':
record_type = dnsp.DNS_TYPE_SOA
elif rtype == 'MX':
record_type = dnsp.DNS_TYPE_MX
elif rtype == 'SRV':
record_type = dnsp.DNS_TYPE_SRV
elif rtype == 'TXT':
record_type = dnsp.DNS_TYPE_TXT
elif rtype == 'ALL':
record_type = dnsp.DNS_TYPE_ALL
else:
raise CommandError('Unknown type of DNS record %s' % rec_type)
return record_type
def dns_client_version(cli_version):
version = cli_version.upper()
if version == 'W2K':
client_version = dnsserver.DNS_CLIENT_VERSION_W2K
elif version == 'DOTNET':
client_version = dnsserver.DNS_CLIENT_VERSION_DOTNET
elif version == 'LONGHORN':
client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
else:
raise CommandError('Unknown client version %s' % cli_version)
return client_version
def print_serverinfo(outf, typeid, serverinfo):
outf.write(' dwVersion : 0x%x\n' % serverinfo.dwVersion)
outf.write(' fBootMethod : %s\n' % boot_method_string(serverinfo.fBootMethod))
outf.write(' fAdminConfigured : %s\n' % bool_string(serverinfo.fAdminConfigured))
outf.write(' fAllowUpdate : %s\n' % bool_string(serverinfo.fAllowUpdate))
outf.write(' fDsAvailable : %s\n' % bool_string(serverinfo.fDsAvailable))
outf.write(' pszServerName : %s\n' % serverinfo.pszServerName)
outf.write(' pszDsContainer : %s\n' % serverinfo.pszDsContainer)
if typeid != dnsserver.DNSSRV_TYPEID_SERVER_INFO:
outf.write(' aipServerAddrs : %s\n' %
ip4_array_string(serverinfo.aipServerAddrs))
outf.write(' aipListenAddrs : %s\n' %
ip4_array_string(serverinfo.aipListenAddrs))
outf.write(' aipForwarders : %s\n' %
ip4_array_string(serverinfo.aipForwarders))
else:
outf.write(' aipServerAddrs : %s\n' %
dns_addr_array_string(serverinfo.aipServerAddrs))
outf.write(' aipListenAddrs : %s\n' %
dns_addr_array_string(serverinfo.aipListenAddrs))
outf.write(' aipForwarders : %s\n' %
dns_addr_array_string(serverinfo.aipForwarders))
outf.write(' dwLogLevel : %d\n' % serverinfo.dwLogLevel)
outf.write(' dwDebugLevel : %d\n' % serverinfo.dwDebugLevel)
outf.write(' dwForwardTimeout : %d\n' % serverinfo.dwForwardTimeout)
outf.write(' dwRpcPrototol : 0x%x\n' % serverinfo.dwRpcProtocol)
outf.write(' dwNameCheckFlag : %s\n' % name_check_flag_string(serverinfo.dwNameCheckFlag))
outf.write(' cAddressAnswerLimit : %d\n' % serverinfo.cAddressAnswerLimit)
outf.write(' dwRecursionRetry : %d\n' % serverinfo.dwRecursionRetry)
outf.write(' dwRecursionTimeout : %d\n' % serverinfo.dwRecursionTimeout)
outf.write(' dwMaxCacheTtl : %d\n' % serverinfo.dwMaxCacheTtl)
outf.write(' dwDsPollingInterval : %d\n' % serverinfo.dwDsPollingInterval)
outf.write(' dwScavengingInterval : %d\n' % serverinfo.dwScavengingInterval)
outf.write(' dwDefaultRefreshInterval : %d\n' % serverinfo.dwDefaultRefreshInterval)
outf.write(' dwDefaultNoRefreshInterval : %d\n' % serverinfo.dwDefaultNoRefreshInterval)
outf.write(' fAutoReverseZones : %s\n' % bool_string(serverinfo.fAutoReverseZones))
outf.write(' fAutoCacheUpdate : %s\n' % bool_string(serverinfo.fAutoCacheUpdate))
outf.write(' fRecurseAfterForwarding : %s\n' % bool_string(serverinfo.fRecurseAfterForwarding))
outf.write(' fForwardDelegations : %s\n' % bool_string(serverinfo.fForwardDelegations))
outf.write(' fNoRecursion : %s\n' % bool_string(serverinfo.fNoRecursion))
outf.write(' fSecureResponses : %s\n' % bool_string(serverinfo.fSecureResponses))
outf.write(' fRoundRobin : %s\n' % bool_string(serverinfo.fRoundRobin))
outf.write(' fLocalNetPriority : %s\n' % bool_string(serverinfo.fLocalNetPriority))
outf.write(' fBindSecondaries : %s\n' % bool_string(serverinfo.fBindSecondaries))
outf.write(' fWriteAuthorityNs : %s\n' % bool_string(serverinfo.fWriteAuthorityNs))
outf.write(' fStrictFileParsing : %s\n' % bool_string(serverinfo.fStrictFileParsing))
outf.write(' fLooseWildcarding : %s\n' % bool_string(serverinfo.fLooseWildcarding))
outf.write(' fDefaultAgingState : %s\n' % bool_string(serverinfo.fDefaultAgingState))
if typeid != dnsserver.DNSSRV_TYPEID_SERVER_INFO_W2K:
outf.write(' dwRpcStructureVersion : 0x%x\n' % serverinfo.dwRpcStructureVersion)
outf.write(' aipLogFilter : %s\n' % dns_addr_array_string(serverinfo.aipLogFilter))
outf.write(' pwszLogFilePath : %s\n' % serverinfo.pwszLogFilePath)
outf.write(' pszDomainName : %s\n' % serverinfo.pszDomainName)
outf.write(' pszForestName : %s\n' % serverinfo.pszForestName)
outf.write(' pszDomainDirectoryPartition : %s\n' % serverinfo.pszDomainDirectoryPartition)
outf.write(' pszForestDirectoryPartition : %s\n' % serverinfo.pszForestDirectoryPartition)
outf.write(' dwLocalNetPriorityNetMask : 0x%x\n' % serverinfo.dwLocalNetPriorityNetMask)
outf.write(' dwLastScavengeTime : %d\n' % serverinfo.dwLastScavengeTime)
outf.write(' dwEventLogLevel : %d\n' % serverinfo.dwEventLogLevel)
outf.write(' dwLogFileMaxSize : %d\n' % serverinfo.dwLogFileMaxSize)
outf.write(' dwDsForestVersion : %d\n' % serverinfo.dwDsForestVersion)
outf.write(' dwDsDomainVersion : %d\n' % serverinfo.dwDsDomainVersion)
outf.write(' dwDsDsaVersion : %d\n' % serverinfo.dwDsDsaVersion)
if typeid == dnsserver.DNSSRV_TYPEID_SERVER_INFO:
outf.write(' fReadOnlyDC : %s\n' % bool_string(serverinfo.fReadOnlyDC))
def print_zoneinfo(outf, typeid, zoneinfo):
outf.write(' pszZoneName : %s\n' % zoneinfo.pszZoneName)
outf.write(' dwZoneType : %s\n' % zone_type_string(zoneinfo.dwZoneType))
outf.write(' fReverse : %s\n' % bool_string(zoneinfo.fReverse))
outf.write(' fAllowUpdate : %s\n' % zone_update_string(zoneinfo.fAllowUpdate))
outf.write(' fPaused : %s\n' % bool_string(zoneinfo.fPaused))
outf.write(' fShutdown : %s\n' % bool_string(zoneinfo.fShutdown))
outf.write(' fAutoCreated : %s\n' % bool_string(zoneinfo.fAutoCreated))
outf.write(' fUseDatabase : %s\n' % bool_string(zoneinfo.fUseDatabase))
outf.write(' pszDataFile : %s\n' % zoneinfo.pszDataFile)
if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO:
outf.write(' aipMasters : %s\n' %
ip4_array_string(zoneinfo.aipMasters))
else:
outf.write(' aipMasters : %s\n' %
dns_addr_array_string(zoneinfo.aipMasters))
outf.write(' fSecureSecondaries : %s\n' % zone_secondary_security_string(zoneinfo.fSecureSecondaries))
outf.write(' fNotifyLevel : %s\n' % zone_notify_level_string(zoneinfo.fNotifyLevel))
if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO:
outf.write(' aipSecondaries : %s\n' %
ip4_array_string(zoneinfo.aipSecondaries))
outf.write(' aipNotify : %s\n' %
ip4_array_string(zoneinfo.aipNotify))
else:
outf.write(' aipSecondaries : %s\n' %
dns_addr_array_string(zoneinfo.aipSecondaries))
outf.write(' aipNotify : %s\n' %
dns_addr_array_string(zoneinfo.aipNotify))
outf.write(' fUseWins : %s\n' % bool_string(zoneinfo.fUseWins))
outf.write(' fUseNbstat : %s\n' % bool_string(zoneinfo.fUseNbstat))
outf.write(' fAging : %s\n' % bool_string(zoneinfo.fAging))
outf.write(' dwNoRefreshInterval : %d\n' % zoneinfo.dwNoRefreshInterval)
outf.write(' dwRefreshInterval : %d\n' % zoneinfo.dwRefreshInterval)
outf.write(' dwAvailForScavengeTime : %d\n' % zoneinfo.dwAvailForScavengeTime)
if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO:
outf.write(' aipScavengeServers : %s\n' %
ip4_array_string(zoneinfo.aipScavengeServers))
else:
outf.write(' aipScavengeServers : %s\n' %
dns_addr_array_string(zoneinfo.aipScavengeServers))
if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO_W2K:
outf.write(' dwRpcStructureVersion : 0x%x\n' % zoneinfo.dwRpcStructureVersion)
outf.write(' dwForwarderTimeout : %d\n' % zoneinfo.dwForwarderTimeout)
outf.write(' fForwarderSlave : %d\n' % zoneinfo.fForwarderSlave)
if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO:
outf.write(' aipLocalMasters : %s\n' %
ip4_array_string(zoneinfo.aipLocalMasters))
else:
outf.write(' aipLocalMasters : %s\n' %
dns_addr_array_string(zoneinfo.aipLocalMasters))
outf.write(' dwDpFlags : %s\n' % dp_flags_string(zoneinfo.dwDpFlags))
outf.write(' pszDpFqdn : %s\n' % zoneinfo.pszDpFqdn)
outf.write(' pwszZoneDn : %s\n' % zoneinfo.pwszZoneDn)
outf.write(' dwLastSuccessfulSoaCheck : %d\n' % zoneinfo.dwLastSuccessfulSoaCheck)
outf.write(' dwLastSuccessfulXfr : %d\n' % zoneinfo.dwLastSuccessfulXfr)
if typeid == dnsserver.DNSSRV_TYPEID_ZONE_INFO:
outf.write(' fQueuedForBackgroundLoad : %s\n' % bool_string(zoneinfo.fQueuedForBackgroundLoad))
outf.write(' fBackgroundLoadInProgress : %s\n' % bool_string(zoneinfo.fBackgroundLoadInProgress))
outf.write(' fReadOnlyZone : %s\n' % bool_string(zoneinfo.fReadOnlyZone))
outf.write(' dwLastXfrAttempt : %d\n' % zoneinfo.dwLastXfrAttempt)
outf.write(' dwLastXfrResult : %d\n' % zoneinfo.dwLastXfrResult)
def print_zone(outf, typeid, zone):
outf.write(' pszZoneName : %s\n' % zone.pszZoneName)
outf.write(' Flags : %s\n' % zone_flags_string(zone.Flags))
outf.write(' ZoneType : %s\n' % zone_type_string(zone.ZoneType))
outf.write(' Version : %s\n' % zone.Version)
if typeid != dnsserver.DNSSRV_TYPEID_ZONE_W2K:
outf.write(' dwDpFlags : %s\n' % dp_flags_string(zone.dwDpFlags))
outf.write(' pszDpFqdn : %s\n' % zone.pszDpFqdn)
def print_enumzones(outf, typeid, zones):
outf.write(' %d zone(s) found\n' % zones.dwZoneCount)
for zone in zones.ZoneArray:
outf.write('\n')
print_zone(outf, typeid, zone)
def print_dns_record(outf, rec):
if rec.wType == dnsp.DNS_TYPE_A:
mesg = 'A: %s' % (rec.data)
elif rec.wType == dnsp.DNS_TYPE_AAAA:
mesg = 'AAAA: %s' % (rec.data)
elif rec.wType == dnsp.DNS_TYPE_PTR:
mesg = 'PTR: %s' % (rec.data.str)
elif rec.wType == dnsp.DNS_TYPE_NS:
mesg = 'NS: %s' % (rec.data.str)
elif rec.wType == dnsp.DNS_TYPE_CNAME:
mesg = 'CNAME: %s' % (rec.data.str)
elif rec.wType == dnsp.DNS_TYPE_SOA:
mesg = 'SOA: serial=%d, refresh=%d, retry=%d, expire=%d, minttl=%d, ns=%s, email=%s' % (
rec.data.dwSerialNo,
rec.data.dwRefresh,
rec.data.dwRetry,
rec.data.dwExpire,
rec.data.dwMinimumTtl,
rec.data.NamePrimaryServer.str,
rec.data.ZoneAdministratorEmail.str)
elif rec.wType == dnsp.DNS_TYPE_MX:
mesg = 'MX: %s (%d)' % (rec.data.nameExchange.str, rec.data.wPreference)
elif rec.wType == dnsp.DNS_TYPE_SRV:
mesg = 'SRV: %s (%d, %d, %d)' % (rec.data.nameTarget.str, rec.data.wPort,
rec.data.wPriority, rec.data.wWeight)
elif rec.wType == dnsp.DNS_TYPE_TXT:
slist = ['"%s"' % name.str for name in rec.data.str]
mesg = 'TXT: %s' % ','.join(slist)
else:
mesg = 'Unknown: '
outf.write(' %s (flags=%x, serial=%d, ttl=%d)\n' % (
mesg, rec.dwFlags, rec.dwSerial, rec.dwTtlSeconds))
def print_dnsrecords(outf, records):
for rec in records.rec:
outf.write(' Name=%s, Records=%d, Children=%d\n' % (
rec.dnsNodeName.str,
rec.wRecordCount,
rec.dwChildCount))
for dns_rec in rec.records:
print_dns_record(outf, dns_rec)
#
# Always create a copy of strings when creating DNS_RPC_RECORDs
# to overcome the bug in pidl generated python bindings.
#
class ARecord(dnsserver.DNS_RPC_RECORD):
def __init__(self, ip_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE,
node_flag=0):
super(ARecord, self).__init__()
self.wType = dnsp.DNS_TYPE_A
self.dwFlags = rank | node_flag
self.dwSerial = serial
self.dwTtlSeconds = ttl
self._ip_addr = ip_addr[:]
self.data = self._ip_addr
class AAAARecord(dnsserver.DNS_RPC_RECORD):
def __init__(self, ip6_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE,
node_flag=0):
super(AAAARecord, self).__init__()
self.wType = dnsp.DNS_TYPE_AAAA
self.dwFlags = rank | node_flag
self.dwSerial = serial
self.dwTtlSeconds = ttl
self._ip6_addr = ip6_addr[:]
self.data = self._ip6_addr
class PTRRecord(dnsserver.DNS_RPC_RECORD):
def __init__(self, ptr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE,
node_flag=0):
super(PTRRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_PTR
self.dwFlags = rank | node_flag
self.dwSerial = serial
self.dwTtlSeconds = ttl
self._ptr = ptr[:]
ptr_name = dnsserver.DNS_RPC_NAME()
ptr_name.str = self._ptr
ptr_name.len = len(ptr)
self.data = ptr_name
class CNameRecord(dnsserver.DNS_RPC_RECORD):
def __init__(self, cname, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE,
node_flag=0):
super(CNameRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_CNAME
self.dwFlags = rank | node_flag
self.dwSerial = serial
self.dwTtlSeconds = ttl
self._cname = cname[:]
cname_name = dnsserver.DNS_RPC_NAME()
cname_name.str = self._cname
cname_name.len = len(cname)
self.data = cname_name
class NSRecord(dnsserver.DNS_RPC_RECORD):
def __init__(self, dns_server, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE,
node_flag=0):
super(NSRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_NS
self.dwFlags = rank | node_flag
self.dwSerial = serial
self.dwTtlSeconds = ttl
self._dns_server = dns_server[:]
ns = dnsserver.DNS_RPC_NAME()
ns.str = self._dns_server
ns.len = len(dns_server)
self.data = ns
class MXRecord(dnsserver.DNS_RPC_RECORD):
def __init__(self, mail_server, preference, serial=1, ttl=900,
rank=dnsp.DNS_RANK_ZONE, node_flag=0):
super(MXRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_MX
self.dwFlags = rank | node_flag
self.dwSerial = serial
self.dwTtlSeconds = ttl
self._mail_server = mail_server[:]
mx = dnsserver.DNS_RPC_RECORD_NAME_PREFERENCE()
mx.wPreference = preference
mx.nameExchange.str = self._mail_server
mx.nameExchange.len = len(mail_server)
self.data = mx
class SOARecord(dnsserver.DNS_RPC_RECORD):
def __init__(self, mname, rname, serial=1, refresh=900, retry=600,
expire=86400, minimum=3600, ttl=3600, rank=dnsp.DNS_RANK_ZONE,
node_flag=dnsp.DNS_RPC_FLAG_AUTH_ZONE_ROOT):
super(SOARecord, self).__init__()
self.wType = dnsp.DNS_TYPE_SOA
self.dwFlags = rank | node_flag
self.dwSerial = serial
self.dwTtlSeconds = ttl
self._mname = mname[:]
self._rname = rname[:]
soa = dnsserver.DNS_RPC_RECORD_SOA()
soa.dwSerialNo = serial
soa.dwRefresh = refresh
soa.dwRetry = retry
soa.dwExpire = expire
soa.dwMinimumTtl = minimum
soa.NamePrimaryServer.str = self._mname
soa.NamePrimaryServer.len = len(mname)
soa.ZoneAdministratorEmail.str = self._rname
soa.ZoneAdministratorEmail.len = len(rname)
self.data = soa
class SRVRecord(dnsserver.DNS_RPC_RECORD):
def __init__(self, target, port, priority=0, weight=100, serial=1, ttl=900,
rank=dnsp.DNS_RANK_ZONE, node_flag=0):
super(SRVRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_SRV
self.dwFlags = rank | node_flag
self.dwSerial = serial
self.dwTtlSeconds = ttl
self._target = target[:]
srv = dnsserver.DNS_RPC_RECORD_SRV()
srv.wPriority = priority
srv.wWeight = weight
srv.wPort = port
srv.nameTarget.str = self._target
srv.nameTarget.len = len(target)
self.data = srv
class TXTRecord(dnsserver.DNS_RPC_RECORD):
def __init__(self, slist, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE,
node_flag=0):
super(TXTRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_TXT
self.dwFlags = rank | node_flag
self.dwSerial = serial
self.dwTtlSeconds = ttl
self._slist = []
for s in slist:
self._slist.append(s[:])
names = []
for s in self._slist:
name = dnsserver.DNS_RPC_NAME()
name.str = s
name.len = len(s)
names.append(name)
txt = dnsserver.DNS_RPC_RECORD_STRING()
txt.count = len(slist)
txt.str = names
self.data = txt
# Convert data into a dns record
def data_to_dns_record(record_type, data):
if record_type == dnsp.DNS_TYPE_A:
rec = ARecord(data)
elif record_type == dnsp.DNS_TYPE_AAAA:
rec = AAAARecord(data)
elif record_type == dnsp.DNS_TYPE_PTR:
rec = PTRRecord(data)
elif record_type == dnsp.DNS_TYPE_CNAME:
rec = CNameRecord(data)
elif record_type == dnsp.DNS_TYPE_NS:
rec = NSRecord(data)
elif record_type == dnsp.DNS_TYPE_MX:
tmp = data.split(' ')
if len(tmp) != 2:
raise CommandError('Data requires 2 elements - mail_server, preference')
mail_server = tmp[0]
preference = int(tmp[1])
rec = MXRecord(mail_server, preference)
elif record_type == dnsp.DNS_TYPE_SRV:
tmp = data.split(' ')
if len(tmp) != 4:
raise CommandError('Data requires 4 elements - server, port, priority, weight')
server = tmp[0]
port = int(tmp[1])
priority = int(tmp[2])
weight = int(tmp[3])
rec = SRVRecord(server, port, priority=priority, weight=weight)
elif record_type == dnsp.DNS_TYPE_SOA:
tmp = data.split(' ')
if len(tmp) != 7:
raise CommandError('Data requires 7 elements - nameserver, email, serial, '
'refresh, retry, expire, minimumttl')
nameserver = tmp[0]
email = tmp[1]
serial = int(tmp[2])
refresh = int(tmp[3])
retry = int(tmp[4])
expire = int(tmp[5])
minimum = int(tmp[6])
rec = SOARecord(nameserver, email, serial=serial, refresh=refresh,
retry=retry, expire=expire, minimum=minimum)
elif record_type == dnsp.DNS_TYPE_TXT:
slist = shlex.split(data)
rec = TXTRecord(slist)
else:
raise CommandError('Unsupported record type')
return rec
# Match dns name (of type DNS_RPC_NAME)
def dns_name_equal(n1, n2):
return n1.str.rstrip('.').lower() == n2.str.rstrip('.').lower()
# Match a dns record with specified data
def dns_record_match(dns_conn, server, zone, name, record_type, data):
urec = data_to_dns_record(record_type, data)
select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA
try:
buflen, res = dns_conn.DnssrvEnumRecords2(
dnsserver.DNS_CLIENT_VERSION_LONGHORN, 0, server, zone, name, None,
record_type, select_flags, None, None)
except RuntimeError, e:
return None
if not res or res.count == 0:
return None
rec_match = None
for rec in res.rec[0].records:
if rec.wType != record_type:
continue
found = False
if record_type == dnsp.DNS_TYPE_A:
if rec.data == urec.data:
found = True
elif record_type == dnsp.DNS_TYPE_AAAA:
if rec.data == urec.data:
found = True
elif record_type == dnsp.DNS_TYPE_PTR:
if dns_name_equal(rec.data, urec.data):
found = True
elif record_type == dnsp.DNS_TYPE_CNAME:
if dns_name_equal(rec.data, urec.data):
found = True
elif record_type == dnsp.DNS_TYPE_NS:
if dns_name_equal(rec.data, urec.data):
found = True
elif record_type == dnsp.DNS_TYPE_MX:
if dns_name_equal(rec.data.nameExchange, urec.data.nameExchange) and \
rec.data.wPreference == urec.data.wPreference:
found = True
elif record_type == dnsp.DNS_TYPE_SRV:
if rec.data.wPriority == urec.data.wPriority and \
rec.data.wWeight == urec.data.wWeight and \
rec.data.wPort == urec.data.wPort and \
dns_name_equal(rec.data.nameTarget, urec.data.nameTarget):
found = True
elif record_type == dnsp.DNS_TYPE_SOA:
if rec.data.dwSerialNo == urec.data.dwSerialNo and \
rec.data.dwRefresh == urec.data.dwRefresh and \
rec.data.dwRetry == urec.data.dwRetry and \
rec.data.dwExpire == urec.data.dwExpire and \
rec.data.dwMinimumTtl == urec.data.dwMinimumTtl and \
dns_name_equal(rec.data.NamePrimaryServer,
urec.data.NamePrimaryServer) and \
dns_name_equal(rec.data.ZoneAdministratorEmail,
urec.data.ZoneAdministratorEmail):
found = True
elif record_type == dnsp.DNS_TYPE_TXT:
if rec.data.count == urec.data.count:
found = True
for i in xrange(rec.data.count):
found = found and \
(rec.data.str[i].str == urec.data.str[i].str)
if found:
rec_match = rec
break
return rec_match
class cmd_serverinfo(Command):
"""Query for Server information."""
synopsis = '%prog <server> [options]'
takes_args = [ 'server' ]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option('--client-version', help='Client Version',
default='longhorn', metavar='w2k|dotnet|longhorn',
choices=['w2k','dotnet','longhorn'], dest='cli_ver'),
]
def run(self, server, cli_ver, sambaopts=None, credopts=None,
versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp)
dns_conn = dns_connect(server, self.lp, self.creds)
client_version = dns_client_version(cli_ver)
typeid, res = dns_conn.DnssrvQuery2(client_version, 0, server,
None, 'ServerInfo')
print_serverinfo(self.outf, typeid, res)
class cmd_zoneinfo(Command):
"""Query for zone information."""
synopsis = '%prog <server> <zone> [options]'
takes_args = [ 'server', 'zone' ]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option('--client-version', help='Client Version',
default='longhorn', metavar='w2k|dotnet|longhorn',
choices=['w2k','dotnet','longhorn'], dest='cli_ver'),
]
def run(self, server, zone, cli_ver, sambaopts=None, credopts=None,
versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp)
dns_conn = dns_connect(server, self.lp, self.creds)
client_version = dns_client_version(cli_ver)
typeid, res = dns_conn.DnssrvQuery2(client_version, 0, server, zone,
'ZoneInfo')
print_zoneinfo(self.outf, typeid, res)
class cmd_zonelist(Command):
"""Query for zones."""
synopsis = '%prog <server> [options]'
takes_args = [ 'server' ]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option('--client-version', help='Client Version',
default='longhorn', metavar='w2k|dotnet|longhorn',
choices=['w2k','dotnet','longhorn'], dest='cli_ver'),
Option('--primary', help='List primary zones (default)',
action='store_true', dest='primary'),
Option('--secondary', help='List secondary zones',
action='store_true', dest='secondary'),
Option('--cache', help='List cached zones',
action='store_true', dest='cache'),
Option('--auto', help='List automatically created zones',
action='store_true', dest='auto'),
Option('--forward', help='List forward zones',
action='store_true', dest='forward'),
Option('--reverse', help='List reverse zones',
action='store_true', dest='reverse'),
Option('--ds', help='List directory integrated zones',
action='store_true', dest='ds'),
Option('--non-ds', help='List non-directory zones',
action='store_true', dest='nonds')
]
def run(self, server, cli_ver, primary=False, secondary=False, cache=False,
auto=False, forward=False, reverse=False, ds=False, nonds=False,
sambaopts=None, credopts=None, versionopts=None):
request_filter = 0
if primary:
request_filter |= dnsserver.DNS_ZONE_REQUEST_PRIMARY
if secondary:
request_filter |= dnsserver.DNS_ZONE_REQUEST_SECONDARY
if cache:
request_filter |= dnsserver.DNS_ZONE_REQUEST_CACHE
if auto:
request_filter |= dnsserver.DNS_ZONE_REQUEST_AUTO
if forward:
request_filter |= dnsserver.DNS_ZONE_REQUEST_FORWARD
if reverse:
request_filter |= dnsserver.DNS_ZONE_REQUEST_REVERSE
if ds:
request_filter |= dnsserver.DNS_ZONE_REQUEST_DS
if nonds:
request_filter |= dnsserver.DNS_ZONE_REQUEST_NON_DS
if request_filter == 0:
request_filter = dnsserver.DNS_ZONE_REQUEST_PRIMARY
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp)
dns_conn = dns_connect(server, self.lp, self.creds)
client_version = dns_client_version(cli_ver)
typeid, res = dns_conn.DnssrvComplexOperation2(client_version,
0, server, None,
'EnumZones',
dnsserver.DNSSRV_TYPEID_DWORD,
request_filter)
if client_version == dnsserver.DNS_CLIENT_VERSION_W2K:
typeid = dnsserver.DNSSRV_TYPEID_ZONE_W2K
else:
typeid = dnsserver.DNSSRV_TYPEID_ZONE
print_enumzones(self.outf, typeid, res)
class cmd_zonecreate(Command):
"""Create a zone."""
synopsis = '%prog <server> <zone> [options]'
takes_args = [ 'server', 'zone' ]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option('--client-version', help='Client Version',
default='longhorn', metavar='w2k|dotnet|longhorn',
choices=['w2k','dotnet','longhorn'], dest='cli_ver')
]
def run(self, server, zone, cli_ver, sambaopts=None, credopts=None,
versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp)
dns_conn = dns_connect(server, self.lp, self.creds)
zone = zone.lower()
client_version = dns_client_version(cli_ver)
if client_version == dnsserver.DNS_CLIENT_VERSION_W2K:
typeid = dnsserver.DNSSRV_TYPEID_ZONE_CREATE_W2K
zone_create_info = dnsserver.DNS_RPC_ZONE_CREATE_INFO_W2K()
zone_create_info.pszZoneName = zone
zone_create_info.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
zone_create_info.fAging = 0
zone_create_info.fDsIntegrated = 1
zone_create_info.fLoadExisting = 1
elif client_version == dnsserver.DNS_CLIENT_VERSION_DOTNET:
typeid = dnsserver.DNSSRV_TYPEID_ZONE_CREATE_DOTNET
zone_create_info = dnsserver.DNS_RPC_ZONE_CREATE_INFO_DOTNET()
zone_create_info.pszZoneName = zone
zone_create_info.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
zone_create_info.fAging = 0
zone_create_info.fDsIntegrated = 1
zone_create_info.fLoadExisting = 1
zone_create_info.dwDpFlags = dnsserver.DNS_DP_DOMAIN_DEFAULT
else:
typeid = dnsserver.DNSSRV_TYPEID_ZONE_CREATE
zone_create_info = dnsserver.DNS_RPC_ZONE_CREATE_INFO_LONGHORN()
zone_create_info.pszZoneName = zone
zone_create_info.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
zone_create_info.fAging = 0
zone_create_info.fDsIntegrated = 1
zone_create_info.fLoadExisting = 1
zone_create_info.dwDpFlags = dnsserver.DNS_DP_DOMAIN_DEFAULT
res = dns_conn.DnssrvOperation2(client_version, 0, server, None,
0, 'ZoneCreate', typeid,
zone_create_info)
typeid = dnsserver.DNSSRV_TYPEID_NAME_AND_PARAM
name_and_param = dnsserver.DNS_RPC_NAME_AND_PARAM()
name_and_param.pszNodeName = 'AllowUpdate'
name_and_param.dwParam = dnsp.DNS_ZONE_UPDATE_SECURE
res = dns_conn.DnssrvOperation2(client_version, 0, server, zone,
0, 'ResetDwordProperty', typeid,
name_and_param)
self.outf.write('Zone %s created successfully\n' % zone)
class cmd_zonedelete(Command):
"""Delete a zone."""
synopsis = '%prog <server> <zone> [options]'
takes_args = [ 'server', 'zone' ]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
def run(self, server, zone, sambaopts=None, credopts=None,
versionopts=None):
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp)
dns_conn = dns_connect(server, self.lp, self.creds)
zone = zone.lower()
res = dns_conn.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, server, zone, 0, 'DeleteZoneFromDs',
dnsserver.DNSSRV_TYPEID_NULL,
None)
self.outf.write('Zone %s delete successfully\n' % zone)
class cmd_query(Command):
"""Query a name."""
synopsis = '%prog <server> <zone> <name> <A|AAAA|CNAME|MX|NS|SOA|SRV|TXT|ALL> [options]'
takes_args = [ 'server', 'zone', 'name', 'rtype' ]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option('--authority', help='Search authoritative records (default)',
action='store_true', dest='authority'),
Option('--cache', help='Search cached records',
action='store_true', dest='cache'),
Option('--glue', help='Search glue records',
action='store_true', dest='glue'),
Option('--root', help='Search root hints',
action='store_true', dest='root'),
Option('--additional', help='List additional records',
action='store_true', dest='additional'),
Option('--no-children', help='Do not list children',
action='store_true', dest='no_children'),
Option('--only-children', help='List only children',
action='store_true', dest='only_children')
]
def run(self, server, zone, name, rtype, authority=False, cache=False,
glue=False, root=False, additional=False, no_children=False,
only_children=False, sambaopts=None, credopts=None,
versionopts=None):
record_type = dns_type_flag(rtype)
select_flags = 0
if authority:
select_flags |= dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA
if cache:
select_flags |= dnsserver.DNS_RPC_VIEW_CACHE_DATA
if glue:
select_flags |= dnsserver.DNS_RPC_VIEW_GLUE_DATA
if root:
select_flags |= dnsserver.DNS_RPC_VIEW_ROOT_HINT_DATA
if additional:
select_flags |= dnsserver.DNS_RPC_VIEW_ADDITIONAL_DATA
if no_children:
select_flags |= dnsserver.DNS_RPC_VIEW_NO_CHILDREN
if only_children:
select_flags |= dnsserver.DNS_RPC_VIEW_ONLY_CHILDREN
if select_flags == 0:
select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA
if select_flags == dnsserver.DNS_RPC_VIEW_ADDITIONAL_DATA:
self.outf.write('Specify either --authority or --root along with --additional.\n')
self.outf.write('Assuming --authority.\n')
select_flags |= dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp)
dns_conn = dns_connect(server, self.lp, self.creds)
buflen, res = dns_conn.DnssrvEnumRecords2(
dnsserver.DNS_CLIENT_VERSION_LONGHORN, 0, server, zone, name,
None, record_type, select_flags, None, None)
print_dnsrecords(self.outf, res)
class cmd_roothints(Command):
"""Query root hints."""
synopsis = '%prog <server> [<name>] [options]'
takes_args = [ 'server', 'name?' ]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
def run(self, server, name='.', sambaopts=None, credopts=None,
versionopts=None):
record_type = dnsp.DNS_TYPE_NS
select_flags = (dnsserver.DNS_RPC_VIEW_ROOT_HINT_DATA |
dnsserver.DNS_RPC_VIEW_ADDITIONAL_DATA)
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp)
dns_conn = dns_connect(server, self.lp, self.creds)
buflen, res = dns_conn.DnssrvEnumRecords2(
dnsserver.DNS_CLIENT_VERSION_LONGHORN, 0, server, '..RootHints',
name, None, record_type, select_flags, None, None)
print_dnsrecords(self.outf, res)
class cmd_add_record(Command):
"""Add a DNS record
For each type data contents are as follows:
A ipv4_address_string
AAAA ipv6_address_string
PTR fqdn_string
CNAME fqdn_string
NS fqdn_string
MX "fqdn_string preference"
SRV "fqdn_string port priority weight"
TXT "'string1' 'string2' ..."
"""
synopsis = '%prog <server> <zone> <name> <A|AAAA|PTR|CNAME|NS|MX|SRV|TXT> <data>'
takes_args = [ 'server', 'zone', 'name', 'rtype', 'data' ]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
def run(self, server, zone, name, rtype, data, sambaopts=None,
credopts=None, versionopts=None):
if rtype.upper() not in ('A','AAAA','PTR','CNAME','NS','MX','SRV','TXT'):
raise CommandError('Adding record of type %s is not supported' % rtype)
record_type = dns_type_flag(rtype)
rec = data_to_dns_record(record_type, data)
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp)
dns_conn = dns_connect(server, self.lp, self.creds)
rec_match = dns_record_match(dns_conn, server, zone, name, record_type,
data)
if rec_match is not None:
raise CommandError('Record already exists')
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec
dns_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, server, zone, name, add_rec_buf, None)
self.outf.write('Record added successfully\n')
class cmd_update_record(Command):
"""Update a DNS record
For each type data contents are as follows:
A ipv4_address_string
AAAA ipv6_address_string
PTR fqdn_string
CNAME fqdn_string
NS fqdn_string
MX "fqdn_string preference"
SOA "fqdn_dns fqdn_email serial refresh retry expire minimumttl"
SRV "fqdn_string port priority weight"
TXT "'string1' 'string2' ..."
"""
synopsis = '%prog <server> <zone> <name> <A|AAAA|PTR|CNAME|NS|MX|SOA|SRV|TXT> <olddata> <newdata>'
takes_args = [ 'server', 'zone', 'name', 'rtype', 'olddata', 'newdata' ]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
def run(self, server, zone, name, rtype, olddata, newdata,
sambaopts=None, credopts=None, versionopts=None):
if rtype.upper() not in ('A','AAAA','PTR','CNAME','NS','MX','SOA','SRV','TXT'):
raise CommandError('Updating record of type %s is not supported' % rtype)
record_type = dns_type_flag(rtype)
rec = data_to_dns_record(record_type, newdata)
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp)
dns_conn = dns_connect(server, self.lp, self.creds)
rec_match = dns_record_match(dns_conn, server, zone, name, record_type,
olddata)
if not rec_match:
raise CommandError('Record does not exist')
# Copy properties from existing record to new record
rec.dwFlags = rec_match.dwFlags
rec.dwSerial = rec_match.dwSerial
rec.dwTtlSeconds = rec_match.dwTtlSeconds
rec.dwTimeStamp = rec_match.dwTimeStamp
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec
del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
del_rec_buf.rec = rec_match
dns_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0,
server,
zone,
name,
add_rec_buf,
del_rec_buf)
self.outf.write('Record updated successfully\n')
class cmd_delete_record(Command):
"""Delete a DNS record
For each type data contents are as follows:
A ipv4_address_string
AAAA ipv6_address_string
PTR fqdn_string
CNAME fqdn_string
NS fqdn_string
MX "fqdn_string preference"
SRV "fqdn_string port priority weight"
TXT "'string1' 'string2' ..."
"""
synopsis = '%prog <server> <zone> <name> <A|AAAA|PTR|CNAME|NS|MX|SRV|TXT> <data>'
takes_args = [ 'server', 'zone', 'name', 'rtype', 'data' ]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
def run(self, server, zone, name, rtype, data, sambaopts=None, credopts=None, versionopts=None):
if rtype.upper() not in ('A','AAAA','PTR','CNAME','NS','MX','SRV','TXT'):
raise CommandError('Deleting record of type %s is not supported' % rtype)
record_type = dns_type_flag(rtype)
self.lp = sambaopts.get_loadparm()
self.creds = credopts.get_credentials(self.lp)
dns_conn = dns_connect(server, self.lp, self.creds)
rec_match = dns_record_match(dns_conn, server, zone, name, record_type, data)
if not rec_match:
raise CommandError('Record does not exist')
del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
del_rec_buf.rec = rec_match
dns_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0,
server,
zone,
name,
None,
del_rec_buf)
self.outf.write('Record deleted successfully\n')
class cmd_dns(SuperCommand):
"""Domain Name Service (DNS) management."""
subcommands = {}
subcommands['serverinfo'] = cmd_serverinfo()
subcommands['zoneinfo'] = cmd_zoneinfo()
subcommands['zonelist'] = cmd_zonelist()
subcommands['zonecreate'] = cmd_zonecreate()
subcommands['zonedelete'] = cmd_zonedelete()
subcommands['query'] = cmd_query()
subcommands['roothints'] = cmd_roothints()
subcommands['add'] = cmd_add_record()
subcommands['update'] = cmd_update_record()
subcommands['delete'] = cmd_delete_record()
```
#### File: dist-packages/samba/ntacls.py
```python
import os
import samba.xattr_native, samba.xattr_tdb, samba.posix_eadb
from samba.dcerpc import security, xattr, idmap
from samba.ndr import ndr_pack, ndr_unpack
from samba.samba3 import smbd
class XattrBackendError(Exception):
"""A generic xattr backend error."""
def checkset_backend(lp, backend, eadbfile):
'''return the path to the eadb, or None'''
if backend is None:
xattr_tdb = lp.get("xattr_tdb:file")
if xattr_tdb is not None:
return (samba.xattr_tdb, lp.get("xattr_tdb:file"))
posix_eadb = lp.get("posix:eadb")
if posix_eadb is not None:
return (samba.posix_eadb, lp.get("posix:eadb"))
return (None, None)
elif backend == "native":
return (None, None)
elif backend == "eadb":
if eadbfile is not None:
return (samba.posix_eadb, eadbfile)
else:
return (samba.posix_eadb, os.path.abspath(os.path.join(lp.get("private dir"), "eadb.tdb")))
elif backend == "tdb":
if eadbfile is not None:
return (samba.xattr_tdb, eadbfile)
else:
return (samba.xattr_tdb, os.path.abspath(os.path.join(lp.get("state dir"), "xattr.tdb")))
else:
raise XattrBackendError("Invalid xattr backend choice %s"%backend)
def getntacl(lp, file, backend=None, eadbfile=None, direct_db_access=True, service=None):
if direct_db_access:
(backend_obj, dbname) = checkset_backend(lp, backend, eadbfile)
if dbname is not None:
try:
attribute = backend_obj.wrap_getxattr(dbname, file,
xattr.XATTR_NTACL_NAME)
except Exception:
# FIXME: Don't catch all exceptions, just those related to opening
# xattrdb
print "Fail to open %s" % dbname
attribute = samba.xattr_native.wrap_getxattr(file,
xattr.XATTR_NTACL_NAME)
else:
attribute = samba.xattr_native.wrap_getxattr(file,
xattr.XATTR_NTACL_NAME)
ntacl = ndr_unpack(xattr.NTACL, attribute)
if ntacl.version == 1:
return ntacl.info
elif ntacl.version == 2:
return ntacl.info.sd
elif ntacl.version == 3:
return ntacl.info.sd
elif ntacl.version == 4:
return ntacl.info.sd
else:
return smbd.get_nt_acl(file, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL | security.SECINFO_SACL, service=service)
def setntacl(lp, file, sddl, domsid, backend=None, eadbfile=None, use_ntvfs=True, skip_invalid_chown=False, passdb=None, service=None):
assert(isinstance(domsid, str) or isinstance(domsid, security.dom_sid))
if isinstance(domsid, str):
sid = security.dom_sid(domsid)
elif isinstance(domsid, security.dom_sid):
sid = domsid
domsid = str(sid)
assert(isinstance(sddl, str) or isinstance(sddl, security.descriptor))
if isinstance(sddl, str):
sd = security.descriptor.from_sddl(sddl, sid)
elif isinstance(sddl, security.descriptor):
sd = sddl
sddl = sd.as_sddl(sid)
if not use_ntvfs and skip_invalid_chown:
# Check if the owner can be resolved as a UID
(owner_id, owner_type) = passdb.sid_to_id(sd.owner_sid)
if ((owner_type != idmap.ID_TYPE_UID) and (owner_type != idmap.ID_TYPE_BOTH)):
# Check if this particular owner SID was domain admins,
# because we special-case this as mapping to
# 'administrator' instead.
if sd.owner_sid == security.dom_sid("%s-%d" % (domsid, security.DOMAIN_RID_ADMINS)):
administrator = security.dom_sid("%s-%d" % (domsid, security.DOMAIN_RID_ADMINISTRATOR))
(admin_id, admin_type) = passdb.sid_to_id(administrator)
# Confirm we have a UID for administrator
if ((admin_type == idmap.ID_TYPE_UID) or (admin_type == idmap.ID_TYPE_BOTH)):
# Set it, changing the owner to 'administrator' rather than domain admins
sd2 = sd
sd2.owner_sid = administrator
smbd.set_nt_acl(file, security.SECINFO_OWNER |security.SECINFO_GROUP | security.SECINFO_DACL | security.SECINFO_SACL, sd2, service=service)
# and then set an NTVFS ACL (which does not set the posix ACL) to pretend the owner really was set
use_ntvfs = True
else:
raise XattrBackendError("Unable to find UID for domain administrator %s, got id %d of type %d" % (administrator, admin_id, admin_type))
else:
# For all other owning users, reset the owner to root
# and then set the ACL without changing the owner
#
# This won't work in test environments, as it tries a real (rather than xattr-based fake) chown
os.chown(file, 0, 0)
smbd.set_nt_acl(file, security.SECINFO_GROUP | security.SECINFO_DACL | security.SECINFO_SACL, sd, service=service)
if use_ntvfs:
(backend_obj, dbname) = checkset_backend(lp, backend, eadbfile)
ntacl = xattr.NTACL()
ntacl.version = 1
ntacl.info = sd
if dbname is not None:
try:
backend_obj.wrap_setxattr(dbname,
file, xattr.XATTR_NTACL_NAME, ndr_pack(ntacl))
except Exception:
# FIXME: Don't catch all exceptions, just those related to opening
# xattrdb
print "Fail to open %s" % dbname
samba.xattr_native.wrap_setxattr(file, xattr.XATTR_NTACL_NAME,
ndr_pack(ntacl))
else:
samba.xattr_native.wrap_setxattr(file, xattr.XATTR_NTACL_NAME,
ndr_pack(ntacl))
else:
smbd.set_nt_acl(file, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL | security.SECINFO_SACL, sd, service=service)
def ldapmask2filemask(ldm):
"""Takes the access mask of a DS ACE and transform them in a File ACE mask.
"""
RIGHT_DS_CREATE_CHILD = 0x00000001
RIGHT_DS_DELETE_CHILD = 0x00000002
RIGHT_DS_LIST_CONTENTS = 0x00000004
ACTRL_DS_SELF = 0x00000008
RIGHT_DS_READ_PROPERTY = 0x00000010
RIGHT_DS_WRITE_PROPERTY = 0x00000020
RIGHT_DS_DELETE_TREE = 0x00000040
RIGHT_DS_LIST_OBJECT = 0x00000080
RIGHT_DS_CONTROL_ACCESS = 0x00000100
FILE_READ_DATA = 0x0001
FILE_LIST_DIRECTORY = 0x0001
FILE_WRITE_DATA = 0x0002
FILE_ADD_FILE = 0x0002
FILE_APPEND_DATA = 0x0004
FILE_ADD_SUBDIRECTORY = 0x0004
FILE_CREATE_PIPE_INSTANCE = 0x0004
FILE_READ_EA = 0x0008
FILE_WRITE_EA = 0x0010
FILE_EXECUTE = 0x0020
FILE_TRAVERSE = 0x0020
FILE_DELETE_CHILD = 0x0040
FILE_READ_ATTRIBUTES = 0x0080
FILE_WRITE_ATTRIBUTES = 0x0100
DELETE = 0x00010000
READ_CONTROL = 0x00020000
WRITE_DAC = 0x00040000
WRITE_OWNER = 0x00080000
SYNCHRONIZE = 0x00100000
STANDARD_RIGHTS_ALL = 0x001F0000
filemask = ldm & STANDARD_RIGHTS_ALL
if (ldm & RIGHT_DS_READ_PROPERTY) and (ldm & RIGHT_DS_LIST_CONTENTS):
filemask = filemask | (SYNCHRONIZE | FILE_LIST_DIRECTORY |
FILE_READ_ATTRIBUTES | FILE_READ_EA |
FILE_READ_DATA | FILE_EXECUTE)
if ldm & RIGHT_DS_WRITE_PROPERTY:
filemask = filemask | (SYNCHRONIZE | FILE_WRITE_DATA |
FILE_APPEND_DATA | FILE_WRITE_EA |
FILE_WRITE_ATTRIBUTES | FILE_ADD_FILE |
FILE_ADD_SUBDIRECTORY)
if ldm & RIGHT_DS_CREATE_CHILD:
filemask = filemask | (FILE_ADD_SUBDIRECTORY | FILE_ADD_FILE)
if ldm & RIGHT_DS_DELETE_CHILD:
filemask = filemask | FILE_DELETE_CHILD
return filemask
def dsacl2fsacl(dssddl, sid, as_sddl=True):
"""
This function takes an the SDDL representation of a DS
ACL and return the SDDL representation of this ACL adapted
for files. It's used for Policy object provision
"""
ref = security.descriptor.from_sddl(dssddl, sid)
fdescr = security.descriptor()
fdescr.owner_sid = ref.owner_sid
fdescr.group_sid = ref.group_sid
fdescr.type = ref.type
fdescr.revision = ref.revision
aces = ref.dacl.aces
for i in range(0, len(aces)):
ace = aces[i]
if not ace.type & security.SEC_ACE_TYPE_ACCESS_ALLOWED_OBJECT and str(ace.trustee) != security.SID_BUILTIN_PREW2K:
# if fdescr.type & security.SEC_DESC_DACL_AUTO_INHERITED:
ace.flags = ace.flags | security.SEC_ACE_FLAG_OBJECT_INHERIT | security.SEC_ACE_FLAG_CONTAINER_INHERIT
if str(ace.trustee) == security.SID_CREATOR_OWNER:
# For Creator/Owner the IO flag is set as this ACE has only a sense for child objects
ace.flags = ace.flags | security.SEC_ACE_FLAG_INHERIT_ONLY
ace.access_mask = ldapmask2filemask(ace.access_mask)
fdescr.dacl_add(ace)
if not as_sddl:
return fdescr
return fdescr.as_sddl(sid)
```
#### File: samba/provision/sambadns.py
```python
import os
import uuid
import shutil
import time
import ldb
from base64 import b64encode
import samba
from samba.tdb_util import tdb_copy
from samba.ndr import ndr_pack, ndr_unpack
from samba import setup_file
from samba.dcerpc import dnsp, misc, security
from samba.dsdb import (
DS_DOMAIN_FUNCTION_2000,
DS_DOMAIN_FUNCTION_2003,
DS_DOMAIN_FUNCTION_2008_R2
)
from samba.descriptor import (
get_domain_descriptor,
get_domain_delete_protected1_descriptor,
get_domain_delete_protected2_descriptor,
get_dns_partition_descriptor,
get_dns_forest_microsoft_dns_descriptor,
get_dns_domain_microsoft_dns_descriptor
)
from samba.provision.common import (
setup_path,
setup_add_ldif,
setup_modify_ldif,
setup_ldb
)
def get_domainguid(samdb, domaindn):
res = samdb.search(base=domaindn, scope=ldb.SCOPE_BASE, attrs=["objectGUID"])
domainguid = str(ndr_unpack(misc.GUID, res[0]["objectGUID"][0]))
return domainguid
def get_dnsadmins_sid(samdb, domaindn):
res = samdb.search(base="CN=DnsAdmins,CN=Users,%s" % domaindn, scope=ldb.SCOPE_BASE,
attrs=["objectSid"])
dnsadmins_sid = ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
return dnsadmins_sid
class ARecord(dnsp.DnssrvRpcRecord):
def __init__(self, ip_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(ARecord, self).__init__()
self.wType = dnsp.DNS_TYPE_A
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
self.data = ip_addr
class AAAARecord(dnsp.DnssrvRpcRecord):
def __init__(self, ip6_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(AAAARecord, self).__init__()
self.wType = dnsp.DNS_TYPE_AAAA
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
self.data = ip6_addr
class CNameRecord(dnsp.DnssrvRpcRecord):
def __init__(self, cname, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(CNameRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_CNAME
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
self.data = cname
class NSRecord(dnsp.DnssrvRpcRecord):
def __init__(self, dns_server, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(NSRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_NS
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
self.data = dns_server
class SOARecord(dnsp.DnssrvRpcRecord):
def __init__(self, mname, rname, serial=1, refresh=900, retry=600,
expire=86400, minimum=3600, ttl=3600, rank=dnsp.DNS_RANK_ZONE):
super(SOARecord, self).__init__()
self.wType = dnsp.DNS_TYPE_SOA
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
soa = dnsp.soa()
soa.serial = serial
soa.refresh = refresh
soa.retry = retry
soa.expire = expire
soa.mname = mname
soa.rname = rname
self.data = soa
class SRVRecord(dnsp.DnssrvRpcRecord):
def __init__(self, target, port, priority=0, weight=100, serial=1, ttl=900,
rank=dnsp.DNS_RANK_ZONE):
super(SRVRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_SRV
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
srv = dnsp.srv()
srv.nameTarget = target
srv.wPort = port
srv.wPriority = priority
srv.wWeight = weight
self.data = srv
class TXTRecord(dnsp.DnssrvRpcRecord):
def __init__(self, slist, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(TXTRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_TXT
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
stringlist = dnsp.string_list()
stringlist.count = len(slist)
stringlist.str = slist
self.data = stringlist
class TypeProperty(dnsp.DnsProperty):
def __init__(self, zone_type=dnsp.DNS_ZONE_TYPE_PRIMARY):
super(TypeProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_TYPE
self.data = zone_type
class AllowUpdateProperty(dnsp.DnsProperty):
def __init__(self, allow_update=dnsp.DNS_ZONE_UPDATE_SECURE):
super(AllowUpdateProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_ALLOW_UPDATE
self.data = allow_update
class SecureTimeProperty(dnsp.DnsProperty):
def __init__(self, secure_time=0):
super(SecureTimeProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_SECURE_TIME
self.data = secure_time
class NorefreshIntervalProperty(dnsp.DnsProperty):
def __init__(self, norefresh_interval=0):
super(NorefreshIntervalProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_NOREFRESH_INTERVAL
self.data = norefresh_interval
class RefreshIntervalProperty(dnsp.DnsProperty):
def __init__(self, refresh_interval=0):
super(RefreshIntervalProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_REFRESH_INTERVAL
self.data = refresh_interval
class AgingStateProperty(dnsp.DnsProperty):
def __init__(self, aging_enabled=0):
super(AgingStateProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_AGING_STATE
self.data = aging_enabled
class AgingEnabledTimeProperty(dnsp.DnsProperty):
def __init__(self, next_cycle_hours=0):
super(AgingEnabledTimeProperty, self).__init__()
self.wDataLength = 1
self.version = 1;
self.id = dnsp.DSPROPERTY_ZONE_AGING_ENABLED_TIME
self.data = next_cycle_hours
def setup_dns_partitions(samdb, domainsid, domaindn, forestdn, configdn,
serverdn):
domainzone_dn = "DC=DomainDnsZones,%s" % domaindn
forestzone_dn = "DC=ForestDnsZones,%s" % forestdn
descriptor = get_dns_partition_descriptor(domainsid)
setup_add_ldif(samdb, setup_path("provision_dnszones_partitions.ldif"), {
"DOMAINZONE_DN": domainzone_dn,
"FORESTZONE_DN": forestzone_dn,
"SECDESC" : b64encode(descriptor)
})
domainzone_guid = get_domainguid(samdb, domainzone_dn)
forestzone_guid = get_domainguid(samdb, forestzone_dn)
domainzone_guid = str(uuid.uuid4())
forestzone_guid = str(uuid.uuid4())
domainzone_dns = ldb.Dn(samdb, domainzone_dn).canonical_ex_str().strip()
forestzone_dns = ldb.Dn(samdb, forestzone_dn).canonical_ex_str().strip()
protected1_desc = get_domain_delete_protected1_descriptor(domainsid)
protected2_desc = get_domain_delete_protected2_descriptor(domainsid)
setup_add_ldif(samdb, setup_path("provision_dnszones_add.ldif"), {
"DOMAINZONE_DN": domainzone_dn,
"FORESTZONE_DN": forestzone_dn,
"DOMAINZONE_GUID": domainzone_guid,
"FORESTZONE_GUID": forestzone_guid,
"DOMAINZONE_DNS": domainzone_dns,
"FORESTZONE_DNS": forestzone_dns,
"CONFIGDN": configdn,
"SERVERDN": serverdn,
"LOSTANDFOUND_DESCRIPTOR": b64encode(protected2_desc),
"INFRASTRUCTURE_DESCRIPTOR": b64encode(protected1_desc),
})
setup_modify_ldif(samdb, setup_path("provision_dnszones_modify.ldif"), {
"CONFIGDN": configdn,
"SERVERDN": serverdn,
"DOMAINZONE_DN": domainzone_dn,
"FORESTZONE_DN": forestzone_dn,
})
def add_dns_accounts(samdb, domaindn):
setup_add_ldif(samdb, setup_path("provision_dns_accounts_add.ldif"), {
"DOMAINDN": domaindn,
})
def add_dns_container(samdb, domaindn, prefix, domain_sid, dnsadmins_sid, forest=False):
name_map = {'DnsAdmins': str(dnsadmins_sid)}
if forest is True:
sd_val = get_dns_forest_microsoft_dns_descriptor(domain_sid,
name_map=name_map)
else:
sd_val = get_dns_domain_microsoft_dns_descriptor(domain_sid,
name_map=name_map)
# CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
msg = ldb.Message(ldb.Dn(samdb, "CN=MicrosoftDNS,%s,%s" % (prefix, domaindn)))
msg["objectClass"] = ["top", "container"]
msg["nTSecurityDescriptor"] = ldb.MessageElement(sd_val, ldb.FLAG_MOD_ADD,
"nTSecurityDescriptor")
samdb.add(msg)
def add_rootservers(samdb, domaindn, prefix):
rootservers = {}
rootservers["a.root-servers.net"] = "172.16.58.3"
rootservers["b.root-servers.net"] = "192.168.3.11"
rootservers["c.root-servers.net"] = "192.168.127.12"
rootservers["d.root-servers.net"] = "172.16.17.32"
rootservers["e.root-servers.net"] = "172.16.31.10"
rootservers["f.root-servers.net"] = "172.16.31.10"
rootservers["g.root-servers.net"] = "192.168.3.11"
rootservers["h.root-servers.net"] = "172.16.58.3"
rootservers["i.root-servers.net"] = "172.16.17.32"
rootservers["j.root-servers.net"] = "172.16.31.10"
rootservers["k.root-servers.net"] = "172.16.58.3"
rootservers["l.root-servers.net"] = "192.168.3.11"
rootservers["m.root-servers.net"] = "192.168.3.11"
rootservers_v6 = {}
rootservers_v6["a.root-servers.net"] = "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:30"
rootservers_v6["f.root-servers.net"] = "fc00:db20:35b:7399::5"
rootservers_v6["h.root-servers.net"] = "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:235"
rootservers_v6["j.root-servers.net"] = "fc00:e968:6179::de52:7100:30"
rootservers_v6["k.root-servers.net"] = "fdf8:f53e:61e4::18"
rootservers_v6["m.root-servers.net"] = "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"
container_dn = "DC=RootDNSServers,CN=MicrosoftDNS,%s,%s" % (prefix, domaindn)
# Add DC=RootDNSServers,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
msg = ldb.Message(ldb.Dn(samdb, container_dn))
props = []
props.append(ndr_pack(TypeProperty(zone_type=dnsp.DNS_ZONE_TYPE_CACHE)))
props.append(ndr_pack(AllowUpdateProperty(allow_update=dnsp.DNS_ZONE_UPDATE_OFF)))
props.append(ndr_pack(SecureTimeProperty()))
props.append(ndr_pack(NorefreshIntervalProperty()))
props.append(ndr_pack(RefreshIntervalProperty()))
props.append(ndr_pack(AgingStateProperty()))
props.append(ndr_pack(AgingEnabledTimeProperty()))
msg["objectClass"] = ["top", "dnsZone"]
msg["cn"] = ldb.MessageElement("Zone", ldb.FLAG_MOD_ADD, "cn")
msg["dNSProperty"] = ldb.MessageElement(props, ldb.FLAG_MOD_ADD, "dNSProperty")
samdb.add(msg)
# Add DC=@,DC=RootDNSServers,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
record = []
for rserver in rootservers:
record.append(ndr_pack(NSRecord(rserver, serial=0, ttl=0, rank=dnsp.DNS_RANK_ROOT_HINT)))
msg = ldb.Message(ldb.Dn(samdb, "DC=@,%s" % container_dn))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(record, ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
# Add DC=<rootserver>,DC=RootDNSServers,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
for rserver in rootservers:
record = [ndr_pack(ARecord(rootservers[rserver], serial=0, ttl=0, rank=dnsp.DNS_RANK_ROOT_HINT))]
# Add AAAA record as well (How does W2K* add IPv6 records?)
#if rserver in rootservers_v6:
# record.append(ndr_pack(AAAARecord(rootservers_v6[rserver], serial=0, ttl=0)))
msg = ldb.Message(ldb.Dn(samdb, "DC=%s,%s" % (rserver, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(record, ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_at_record(samdb, container_dn, prefix, hostname, dnsdomain, hostip, hostip6):
fqdn_hostname = "%s.%s" % (hostname, dnsdomain)
at_records = []
# SOA record
at_soa_record = SOARecord(fqdn_hostname, "hostmaster.%s" % dnsdomain)
at_records.append(ndr_pack(at_soa_record))
# NS record
at_ns_record = NSRecord(fqdn_hostname)
at_records.append(ndr_pack(at_ns_record))
if hostip is not None:
# A record
at_a_record = ARecord(hostip)
at_records.append(ndr_pack(at_a_record))
if hostip6 is not None:
# AAAA record
at_aaaa_record = AAAARecord(hostip6)
at_records.append(ndr_pack(at_aaaa_record))
msg = ldb.Message(ldb.Dn(samdb, "DC=@,%s" % container_dn))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(at_records, ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_srv_record(samdb, container_dn, prefix, host, port):
srv_record = SRVRecord(host, port)
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(ndr_pack(srv_record), ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_ns_record(samdb, container_dn, prefix, host):
ns_record = NSRecord(host)
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(ndr_pack(ns_record), ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_ns_glue_record(samdb, container_dn, prefix, host):
ns_record = NSRecord(host, rank=dnsp.DNS_RANK_NS_GLUE)
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(ndr_pack(ns_record), ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_cname_record(samdb, container_dn, prefix, host):
cname_record = CNameRecord(host)
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(ndr_pack(cname_record), ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_host_record(samdb, container_dn, prefix, hostip, hostip6):
host_records = []
if hostip:
a_record = ARecord(hostip)
host_records.append(ndr_pack(a_record))
if hostip6:
aaaa_record = AAAARecord(hostip6)
host_records.append(ndr_pack(aaaa_record))
if host_records:
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(host_records, ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_domain_record(samdb, domaindn, prefix, dnsdomain, domainsid, dnsadmins_sid):
# DC=<DNSDOMAIN>,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
sddl = "O:SYG:BAD:AI" \
"(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)" \
"(A;;CC;;;AU)" \
"(A;;RPLCLORC;;;WD)" \
"(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
"(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)" \
"(A;CIID;RPWPCRCCDCLCRCWOWDSDDTSW;;;%s)" \
"(A;CIID;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)" \
"(OA;CIID;RPWPCR;91e647de-d96f-4b70-9557-d63ff4f3ccd8;;PS)" \
"(A;CIID;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
"(A;CIID;LC;;;RU)" \
"(A;CIID;RPWPCRCCLCLORCWOWDSDSW;;;BA)" \
"S:AI" % dnsadmins_sid
sec = security.descriptor.from_sddl(sddl, domainsid)
props = []
props.append(ndr_pack(TypeProperty()))
props.append(ndr_pack(AllowUpdateProperty()))
props.append(ndr_pack(SecureTimeProperty()))
props.append(ndr_pack(NorefreshIntervalProperty(norefresh_interval=168)))
props.append(ndr_pack(RefreshIntervalProperty(refresh_interval=168)))
props.append(ndr_pack(AgingStateProperty()))
props.append(ndr_pack(AgingEnabledTimeProperty()))
msg = ldb.Message(ldb.Dn(samdb, "DC=%s,CN=MicrosoftDNS,%s,%s" % (dnsdomain, prefix, domaindn)))
msg["objectClass"] = ["top", "dnsZone"]
msg["ntSecurityDescriptor"] = ldb.MessageElement(ndr_pack(sec), ldb.FLAG_MOD_ADD,
"nTSecurityDescriptor")
msg["dNSProperty"] = ldb.MessageElement(props, ldb.FLAG_MOD_ADD, "dNSProperty")
samdb.add(msg)
def add_msdcs_record(samdb, forestdn, prefix, dnsforest):
# DC=_msdcs.<DNSFOREST>,CN=MicrosoftDNS,<PREFIX>,<FORESTDN>
msg = ldb.Message(ldb.Dn(samdb, "DC=_msdcs.%s,CN=MicrosoftDNS,%s,%s" %
(dnsforest, prefix, forestdn)))
msg["objectClass"] = ["top", "dnsZone"]
samdb.add(msg)
def add_dc_domain_records(samdb, domaindn, prefix, site, dnsdomain, hostname,
hostip, hostip6):
fqdn_hostname = "%s.%s" % (hostname, dnsdomain)
# Set up domain container - DC=<DNSDOMAIN>,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
domain_container_dn = ldb.Dn(samdb, "DC=%s,CN=MicrosoftDNS,%s,%s" %
(dnsdomain, prefix, domaindn))
# DC=@ record
add_at_record(samdb, domain_container_dn, "DC=@", hostname, dnsdomain,
hostip, hostip6)
# DC=<HOSTNAME> record
add_host_record(samdb, domain_container_dn, "DC=%s" % hostname, hostip,
hostip6)
# DC=_kerberos._tcp record
add_srv_record(samdb, domain_container_dn, "DC=_kerberos._tcp",
fqdn_hostname, 88)
# DC=_kerberos._tcp.<SITENAME>._sites record
add_srv_record(samdb, domain_container_dn, "DC=_kerberos._tcp.%s._sites" %
site, fqdn_hostname, 88)
# DC=_kerberos._udp record
add_srv_record(samdb, domain_container_dn, "DC=_kerberos._udp",
fqdn_hostname, 88)
# DC=_kpasswd._tcp record
add_srv_record(samdb, domain_container_dn, "DC=_kpasswd._tcp",
fqdn_hostname, 464)
# DC=_kpasswd._udp record
add_srv_record(samdb, domain_container_dn, "DC=_kpasswd._udp",
fqdn_hostname, 464)
# DC=_ldap._tcp record
add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp", fqdn_hostname,
389)
# DC=_ldap._tcp.<SITENAME>._sites record
add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp.%s._sites" %
site, fqdn_hostname, 389)
# FIXME: The number of SRV records depend on the various roles this DC has.
# _gc and _msdcs records are added if the we are the forest dc and not subdomain dc
#
# Assumption: current DC is GC and add all the entries
# DC=_gc._tcp record
add_srv_record(samdb, domain_container_dn, "DC=_gc._tcp", fqdn_hostname,
3268)
# DC=_gc._tcp.<SITENAME>,_sites record
add_srv_record(samdb, domain_container_dn, "DC=_gc._tcp.%s._sites" % site,
fqdn_hostname, 3268)
# DC=_msdcs record
add_ns_glue_record(samdb, domain_container_dn, "DC=_msdcs", fqdn_hostname)
# FIXME: Following entries are added only if DomainDnsZones and ForestDnsZones partitions
# are created
#
# Assumption: Additional entries won't hurt on os_level = 2000
# DC=_ldap._tcp.<SITENAME>._sites.DomainDnsZones
add_srv_record(samdb, domain_container_dn,
"DC=_ldap._tcp.%s._sites.DomainDnsZones" % site, fqdn_hostname,
389)
# DC=_ldap._tcp.<SITENAME>._sites.ForestDnsZones
add_srv_record(samdb, domain_container_dn,
"DC=_ldap._tcp.%s._sites.ForestDnsZones" % site, fqdn_hostname,
389)
# DC=_ldap._tcp.DomainDnsZones
add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp.DomainDnsZones",
fqdn_hostname, 389)
# DC=_ldap._tcp.ForestDnsZones
add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp.ForestDnsZones",
fqdn_hostname, 389)
# DC=DomainDnsZones
add_host_record(samdb, domain_container_dn, "DC=DomainDnsZones", hostip,
hostip6)
# DC=ForestDnsZones
add_host_record(samdb, domain_container_dn, "DC=ForestDnsZones", hostip,
hostip6)
def add_dc_msdcs_records(samdb, forestdn, prefix, site, dnsforest, hostname,
hostip, hostip6, domainguid, ntdsguid):
fqdn_hostname = "%s.%s" % (hostname, dnsforest)
# Set up forest container - DC=<DNSDOMAIN>,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
forest_container_dn = ldb.Dn(samdb, "DC=_msdcs.%s,CN=MicrosoftDNS,%s,%s" %
(dnsforest, prefix, forestdn))
# DC=@ record
add_at_record(samdb, forest_container_dn, "DC=@", hostname, dnsforest,
None, None)
# DC=_kerberos._tcp.dc record
add_srv_record(samdb, forest_container_dn, "DC=_kerberos._tcp.dc",
fqdn_hostname, 88)
# DC=_kerberos._tcp.<SITENAME>._sites.dc record
add_srv_record(samdb, forest_container_dn,
"DC=_kerberos._tcp.%s._sites.dc" % site, fqdn_hostname, 88)
# DC=_ldap._tcp.dc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.dc",
fqdn_hostname, 389)
# DC=_ldap._tcp.<SITENAME>._sites.dc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.%s._sites.dc" %
site, fqdn_hostname, 389)
# DC=_ldap._tcp.<SITENAME>._sites.gc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.%s._sites.gc" %
site, fqdn_hostname, 3268)
# DC=_ldap._tcp.gc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.gc",
fqdn_hostname, 3268)
# DC=_ldap._tcp.pdc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.pdc",
fqdn_hostname, 389)
# DC=gc record
add_host_record(samdb, forest_container_dn, "DC=gc", hostip, hostip6)
# DC=_ldap._tcp.<DOMAINGUID>.domains record
add_srv_record(samdb, forest_container_dn,
"DC=_ldap._tcp.%s.domains" % domainguid, fqdn_hostname, 389)
# DC=<NTDSGUID>
add_cname_record(samdb, forest_container_dn, "DC=%s" % ntdsguid,
fqdn_hostname)
def secretsdb_setup_dns(secretsdb, names, private_dir, realm,
dnsdomain, dns_keytab_path, dnspass, key_version_number):
"""Add DNS specific bits to a secrets database.
:param secretsdb: Ldb Handle to the secrets database
:param names: Names shortcut
:param machinepass: Machine password
"""
try:
os.unlink(os.path.join(private_dir, dns_keytab_path))
except OSError:
pass
if key_version_number is None:
key_version_number = 1
setup_ldb(secretsdb, setup_path("secrets_dns.ldif"), {
"REALM": realm,
"DNSDOMAIN": dnsdomain,
"DNS_KEYTAB": dns_keytab_path,
"DNSPASS_B64": b64encode(dnspass),
"KEY_VERSION_NUMBER": str(key_version_number),
"HOSTNAME": names.hostname,
"DNSNAME" : '%s.%s' % (
names.netbiosname.lower(), names.dnsdomain.lower())
})
def create_dns_dir(logger, paths):
"""Write out a DNS zone file, from the info in the current database.
:param logger: Logger object
:param paths: paths object
"""
dns_dir = os.path.dirname(paths.dns)
try:
shutil.rmtree(dns_dir, True)
except OSError:
pass
os.mkdir(dns_dir, 0770)
if paths.bind_gid is not None:
try:
os.chown(dns_dir, -1, paths.bind_gid)
# chmod needed to cope with umask
os.chmod(dns_dir, 0770)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.error("Failed to chown %s to bind gid %u" % (
dns_dir, paths.bind_gid))
def create_zone_file(lp, logger, paths, targetdir, dnsdomain,
hostip, hostip6, hostname, realm, domainguid,
ntdsguid, site):
"""Write out a DNS zone file, from the info in the current database.
:param paths: paths object
:param dnsdomain: DNS Domain name
:param domaindn: DN of the Domain
:param hostip: Local IPv4 IP
:param hostip6: Local IPv6 IP
:param hostname: Local hostname
:param realm: Realm name
:param domainguid: GUID of the domain.
:param ntdsguid: GUID of the hosts nTDSDSA record.
"""
assert isinstance(domainguid, str)
if hostip6 is not None:
hostip6_base_line = " IN AAAA " + hostip6
hostip6_host_line = hostname + " IN AAAA " + hostip6
gc_msdcs_ip6_line = "gc._msdcs IN AAAA " + hostip6
else:
hostip6_base_line = ""
hostip6_host_line = ""
gc_msdcs_ip6_line = ""
if hostip is not None:
hostip_base_line = " IN A " + hostip
hostip_host_line = hostname + " IN A " + hostip
gc_msdcs_ip_line = "gc._msdcs IN A " + hostip
else:
hostip_base_line = ""
hostip_host_line = ""
gc_msdcs_ip_line = ""
# we need to freeze the zone while we update the contents
if targetdir is None:
rndc = ' '.join(lp.get("rndc command"))
os.system(rndc + " freeze " + lp.get("realm"))
setup_file(setup_path("provision.zone"), paths.dns, {
"HOSTNAME": hostname,
"DNSDOMAIN": dnsdomain,
"REALM": realm,
"HOSTIP_BASE_LINE": hostip_base_line,
"HOSTIP_HOST_LINE": hostip_host_line,
"DOMAINGUID": domainguid,
"DATESTRING": time.strftime("%Y%m%d%H"),
"DEFAULTSITE": site,
"NTDSGUID": ntdsguid,
"HOSTIP6_BASE_LINE": hostip6_base_line,
"HOSTIP6_HOST_LINE": hostip6_host_line,
"GC_MSDCS_IP_LINE": gc_msdcs_ip_line,
"GC_MSDCS_IP6_LINE": gc_msdcs_ip6_line,
})
if paths.bind_gid is not None:
try:
os.chown(paths.dns, -1, paths.bind_gid)
# chmod needed to cope with umask
os.chmod(paths.dns, 0664)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.error("Failed to chown %s to bind gid %u" % (
paths.dns, paths.bind_gid))
if targetdir is None:
os.system(rndc + " unfreeze " + lp.get("realm"))
def create_samdb_copy(samdb, logger, paths, names, domainsid, domainguid):
"""Create a copy of samdb and give write permissions to named for dns partitions
"""
private_dir = paths.private_dir
samldb_dir = os.path.join(private_dir, "sam.ldb.d")
dns_dir = os.path.dirname(paths.dns)
dns_samldb_dir = os.path.join(dns_dir, "sam.ldb.d")
# Find the partitions and corresponding filenames
partfile = {}
res = samdb.search(base="@PARTITION", scope=ldb.SCOPE_BASE, attrs=["partition"])
for tmp in res[0]["partition"]:
(nc, fname) = tmp.split(':')
partfile[nc.upper()] = fname
# Create empty domain partition
domaindn = names.domaindn.upper()
domainpart_file = os.path.join(dns_dir, partfile[domaindn])
try:
os.mkdir(dns_samldb_dir)
file(domainpart_file, 'w').close()
# Fill the basedn and @OPTION records in domain partition
dom_ldb = samba.Ldb(domainpart_file)
domainguid_line = "objectGUID: %s\n-" % domainguid
descr = b64encode(get_domain_descriptor(domainsid))
setup_add_ldif(dom_ldb, setup_path("provision_basedn.ldif"), {
"DOMAINDN" : names.domaindn,
"DOMAINGUID" : domainguid_line,
"DOMAINSID" : str(domainsid),
"DESCRIPTOR" : descr})
setup_add_ldif(dom_ldb,
setup_path("provision_basedn_options.ldif"), None)
except:
logger.error(
"Failed to setup database for BIND, AD based DNS cannot be used")
raise
del partfile[domaindn]
# Link dns partitions and metadata
domainzonedn = "DC=DOMAINDNSZONES,%s" % names.domaindn.upper()
forestzonedn = "DC=FORESTDNSZONES,%s" % names.rootdn.upper()
domainzone_file = partfile[domainzonedn]
forestzone_file = partfile[forestzonedn]
metadata_file = "metadata.tdb"
try:
os.link(os.path.join(samldb_dir, metadata_file),
os.path.join(dns_samldb_dir, metadata_file))
os.link(os.path.join(private_dir, domainzone_file),
os.path.join(dns_dir, domainzone_file))
os.link(os.path.join(private_dir, forestzone_file),
os.path.join(dns_dir, forestzone_file))
except OSError:
logger.error(
"Failed to setup database for BIND, AD based DNS cannot be used")
raise
del partfile[domainzonedn]
del partfile[forestzonedn]
# Copy root, config, schema partitions (and any other if any)
# Since samdb is open in the current process, copy them in a child process
try:
tdb_copy(os.path.join(private_dir, "sam.ldb"),
os.path.join(dns_dir, "sam.ldb"))
for nc in partfile:
pfile = partfile[nc]
tdb_copy(os.path.join(private_dir, pfile),
os.path.join(dns_dir, pfile))
except:
logger.error(
"Failed to setup database for BIND, AD based DNS cannot be used")
raise
# Give bind read/write permissions dns partitions
if paths.bind_gid is not None:
try:
os.chown(samldb_dir, -1, paths.bind_gid)
os.chmod(samldb_dir, 0750)
for dirname, dirs, files in os.walk(dns_dir):
for d in dirs:
dpath = os.path.join(dirname, d)
os.chown(dpath, -1, paths.bind_gid)
os.chmod(dpath, 0770)
for f in files:
if f.endswith('.ldb') or f.endswith('.tdb'):
fpath = os.path.join(dirname, f)
os.chown(fpath, -1, paths.bind_gid)
os.chmod(fpath, 0660)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.error(
"Failed to set permissions to sam.ldb* files, fix manually")
else:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.warning("""Unable to find group id for BIND,
set permissions to sam.ldb* files manually""")
def create_dns_update_list(lp, logger, paths):
"""Write out a dns_update_list file"""
# note that we use no variable substitution on this file
# the substitution is done at runtime by samba_dnsupdate, samba_spnupdate
setup_file(setup_path("dns_update_list"), paths.dns_update_list, None)
setup_file(setup_path("spn_update_list"), paths.spn_update_list, None)
def create_named_conf(paths, realm, dnsdomain, dns_backend):
"""Write out a file containing zone statements suitable for inclusion in a
named.conf file (including GSS-TSIG configuration).
:param paths: all paths
:param realm: Realm name
:param dnsdomain: DNS Domain name
:param dns_backend: DNS backend type
:param keytab_name: File name of DNS keytab file
"""
if dns_backend == "BIND9_FLATFILE":
setup_file(setup_path("named.conf"), paths.namedconf, {
"DNSDOMAIN": dnsdomain,
"REALM": realm,
"ZONE_FILE": paths.dns,
"REALM_WC": "*." + ".".join(realm.split(".")[1:]),
"NAMED_CONF": paths.namedconf,
"NAMED_CONF_UPDATE": paths.namedconf_update
})
setup_file(setup_path("named.conf.update"), paths.namedconf_update)
elif dns_backend == "BIND9_DLZ":
setup_file(setup_path("named.conf.dlz"), paths.namedconf, {
"NAMED_CONF": paths.namedconf,
"MODULESDIR" : samba.param.modules_dir(),
})
def create_named_txt(path, realm, dnsdomain, dnsname, private_dir,
keytab_name):
"""Write out a file containing zone statements suitable for inclusion in a
named.conf file (including GSS-TSIG configuration).
:param path: Path of the new named.conf file.
:param realm: Realm name
:param dnsdomain: DNS Domain name
:param private_dir: Path to private directory
:param keytab_name: File name of DNS keytab file
"""
setup_file(setup_path("named.txt"), path, {
"DNSDOMAIN": dnsdomain,
"DNSNAME" : dnsname,
"REALM": realm,
"DNS_KEYTAB": keytab_name,
"DNS_KEYTAB_ABS": os.path.join(private_dir, keytab_name),
"PRIVATE_DIR": private_dir
})
def is_valid_dns_backend(dns_backend):
return dns_backend in ("BIND9_FLATFILE", "BIND9_DLZ", "SAMBA_INTERNAL", "NONE")
def is_valid_os_level(os_level):
return DS_DOMAIN_FUNCTION_2000 <= os_level <= DS_DOMAIN_FUNCTION_2008_R2
def create_dns_legacy(samdb, domainsid, forestdn, dnsadmins_sid):
# Set up MicrosoftDNS container
add_dns_container(samdb, forestdn, "CN=System", domainsid, dnsadmins_sid)
# Add root servers
add_rootservers(samdb, forestdn, "CN=System")
def fill_dns_data_legacy(samdb, domainsid, forestdn, dnsdomain, site, hostname,
hostip, hostip6, dnsadmins_sid):
# Add domain record
add_domain_record(samdb, forestdn, "CN=System", dnsdomain, domainsid,
dnsadmins_sid)
# Add DNS records for a DC in domain
add_dc_domain_records(samdb, forestdn, "CN=System", site, dnsdomain,
hostname, hostip, hostip6)
def create_dns_partitions(samdb, domainsid, names, domaindn, forestdn,
dnsadmins_sid):
# Set up additional partitions (DomainDnsZones, ForstDnsZones)
setup_dns_partitions(samdb, domainsid, domaindn, forestdn,
names.configdn, names.serverdn)
# Set up MicrosoftDNS containers
add_dns_container(samdb, domaindn, "DC=DomainDnsZones", domainsid,
dnsadmins_sid)
add_dns_container(samdb, forestdn, "DC=ForestDnsZones", domainsid,
dnsadmins_sid, forest=True)
def fill_dns_data_partitions(samdb, domainsid, site, domaindn, forestdn,
dnsdomain, dnsforest, hostname, hostip, hostip6,
domainguid, ntdsguid, dnsadmins_sid, autofill=True):
"""Fill data in various AD partitions
:param samdb: LDB object connected to sam.ldb file
:param domainsid: Domain SID (as dom_sid object)
:param site: Site name to create hostnames in
:param domaindn: DN of the domain
:param forestdn: DN of the forest
:param dnsdomain: DNS name of the domain
:param dnsforest: DNS name of the forest
:param hostname: Host name of this DC
:param hostip: IPv4 addresses
:param hostip6: IPv6 addresses
:param domainguid: Domain GUID
:param ntdsguid: NTDS GUID
:param dnsadmins_sid: SID for DnsAdmins group
:param autofill: Create DNS records (using fixed template)
"""
##### Set up DC=DomainDnsZones,<DOMAINDN>
# Add rootserver records
add_rootservers(samdb, domaindn, "DC=DomainDnsZones")
# Add domain record
add_domain_record(samdb, domaindn, "DC=DomainDnsZones", dnsdomain,
domainsid, dnsadmins_sid)
# Add DNS records for a DC in domain
if autofill:
add_dc_domain_records(samdb, domaindn, "DC=DomainDnsZones", site,
dnsdomain, hostname, hostip, hostip6)
##### Set up DC=ForestDnsZones,<DOMAINDN>
# Add _msdcs record
add_msdcs_record(samdb, forestdn, "DC=ForestDnsZones", dnsforest)
# Add DNS records for a DC in forest
if autofill:
add_dc_msdcs_records(samdb, forestdn, "DC=ForestDnsZones", site,
dnsforest, hostname, hostip, hostip6,
domainguid, ntdsguid)
def setup_ad_dns(samdb, secretsdb, domainsid, names, paths, lp, logger,
dns_backend, os_level, site, dnspass=None, hostip=None, hostip6=None,
targetdir=None):
"""Provision DNS information (assuming GC role)
:param samdb: LDB object connected to sam.ldb file
:param secretsdb: LDB object connected to secrets.ldb file
:param domainsid: Domain SID (as dom_sid object)
:param names: Names shortcut
:param paths: Paths shortcut
:param lp: Loadparm object
:param logger: Logger object
:param dns_backend: Type of DNS backend
:param os_level: Functional level (treated as os level)
:param site: Site to create hostnames in
:param dnspass: Password for bind's DNS account
:param hostip: IPv4 address
:param hostip6: IPv6 address
:param targetdir: Target directory for creating DNS-related files for BIND9
"""
if not is_valid_dns_backend(dns_backend):
raise Exception("Invalid dns backend: %r" % dns_backend)
if not is_valid_os_level(os_level):
raise Exception("Invalid os level: %r" % os_level)
if dns_backend == "NONE":
logger.info("No DNS backend set, not configuring DNS")
return
# Add dns accounts (DnsAdmins, DnsUpdateProxy) in domain
logger.info("Adding DNS accounts")
add_dns_accounts(samdb, names.domaindn)
# If dns_backend is BIND9_FLATFILE
# Populate only CN=MicrosoftDNS,CN=System,<FORESTDN>
#
# If dns_backend is SAMBA_INTERNAL or BIND9_DLZ
# Populate DNS partitions
# If os_level < 2003 (DS_DOMAIN_FUNCTION_2000)
# All dns records are in CN=MicrosoftDNS,CN=System,<FORESTDN>
#
# If os_level >= 2003 (DS_DOMAIN_FUNCTION_2003, DS_DOMAIN_FUNCTION_2008,
# DS_DOMAIN_FUNCTION_2008_R2)
# Root server records are in CN=MicrosoftDNS,CN=System,<FORESTDN>
# Domain records are in CN=MicrosoftDNS,CN=System,<FORESTDN>
# Domain records are in CN=MicrosoftDNS,DC=DomainDnsZones,<DOMAINDN>
# Forest records are in CN=MicrosoftDNS,DC=ForestDnsZones,<FORESTDN>
domaindn = names.domaindn
forestdn = samdb.get_root_basedn().get_linearized()
dnsdomain = names.dnsdomain.lower()
dnsforest = dnsdomain
hostname = names.netbiosname.lower()
dnsadmins_sid = get_dnsadmins_sid(samdb, domaindn)
domainguid = get_domainguid(samdb, domaindn)
# Create CN=System
logger.info("Creating CN=MicrosoftDNS,CN=System,%s" % forestdn)
create_dns_legacy(samdb, domainsid, forestdn, dnsadmins_sid)
if os_level == DS_DOMAIN_FUNCTION_2000:
# Populating legacy dns
logger.info("Populating CN=MicrosoftDNS,CN=System,%s" % forestdn)
fill_dns_data_legacy(samdb, domainsid, forestdn, dnsdomain, site,
hostname, hostip, hostip6, dnsadmins_sid)
elif dns_backend in ("SAMBA_INTERNAL", "BIND9_DLZ") and \
os_level >= DS_DOMAIN_FUNCTION_2003:
# Create DNS partitions
logger.info("Creating DomainDnsZones and ForestDnsZones partitions")
create_dns_partitions(samdb, domainsid, names, domaindn, forestdn,
dnsadmins_sid)
# Populating dns partitions
logger.info("Populating DomainDnsZones and ForestDnsZones partitions")
fill_dns_data_partitions(samdb, domainsid, site, domaindn, forestdn,
dnsdomain, dnsforest, hostname, hostip, hostip6,
domainguid, names.ntdsguid, dnsadmins_sid)
if dns_backend.startswith("BIND9_"):
setup_bind9_dns(samdb, secretsdb, domainsid, names, paths, lp, logger,
dns_backend, os_level, site=site, dnspass=dnspass, hostip=hostip,
hostip6=hostip6, targetdir=targetdir)
def setup_bind9_dns(samdb, secretsdb, domainsid, names, paths, lp, logger,
dns_backend, os_level, site=None, dnspass=None, hostip=None,
hostip6=None, targetdir=None, key_version_number=None):
"""Provision DNS information (assuming BIND9 backend in DC role)
:param samdb: LDB object connected to sam.ldb file
:param secretsdb: LDB object connected to secrets.ldb file
:param domainsid: Domain SID (as dom_sid object)
:param names: Names shortcut
:param paths: Paths shortcut
:param lp: Loadparm object
:param logger: Logger object
:param dns_backend: Type of DNS backend
:param os_level: Functional level (treated as os level)
:param site: Site to create hostnames in
:param dnspass: Password for bind's DNS account
:param hostip: IPv4 address
:param hostip6: IPv6 address
:param targetdir: Target directory for creating DNS-related files for BIND9
"""
if (not is_valid_dns_backend(dns_backend) or
not dns_backend.startswith("BIND9_")):
raise Exception("Invalid dns backend: %r" % dns_backend)
if not is_valid_os_level(os_level):
raise Exception("Invalid os level: %r" % os_level)
domaindn = names.domaindn
domainguid = get_domainguid(samdb, domaindn)
secretsdb_setup_dns(secretsdb, names,
paths.private_dir, realm=names.realm,
dnsdomain=names.dnsdomain,
dns_keytab_path=paths.dns_keytab, dnspass=dnspass,
key_version_number=key_version_number)
create_dns_dir(logger, paths)
if dns_backend == "BIND9_FLATFILE":
create_zone_file(lp, logger, paths, targetdir, site=site,
dnsdomain=names.dnsdomain, hostip=hostip,
hostip6=hostip6, hostname=names.hostname,
realm=names.realm, domainguid=domainguid,
ntdsguid=names.ntdsguid)
if dns_backend == "BIND9_DLZ" and os_level >= DS_DOMAIN_FUNCTION_2003:
create_samdb_copy(samdb, logger, paths, names, domainsid, domainguid)
create_named_conf(paths, realm=names.realm,
dnsdomain=names.dnsdomain, dns_backend=dns_backend)
create_named_txt(paths.namedtxt,
realm=names.realm, dnsdomain=names.dnsdomain,
dnsname = "%s.%s" % (names.hostname, names.dnsdomain),
private_dir=paths.private_dir,
keytab_name=paths.dns_keytab)
logger.info("See %s for an example configuration include file for BIND",
paths.namedconf)
logger.info("and %s for further documentation required for secure DNS "
"updates", paths.namedtxt)
```
#### File: samba/tests/libsmb_samba_internal.py
```python
from samba.samba3 import libsmb_samba_internal
from samba.dcerpc import security
from samba.samba3 import param as s3param
from samba import credentials
import samba.tests
import threading
import sys
import os
class LibsmbTestCase(samba.tests.TestCase):
class OpenClose(threading.Thread):
def __init__(self, conn, filename, num_ops):
threading.Thread.__init__(self)
self.conn = conn
self.filename = filename
self.num_ops = num_ops
self.exc = False
def run(self):
c = self.conn
try:
for i in range(self.num_ops):
f = c.create(self.filename, CreateDisposition=3,
DesiredAccess=security.SEC_STD_DELETE)
c.delete_on_close(f, True)
c.close(f)
except Exception:
self.exc = sys.exc_info()
def test_OpenClose(self):
lp = s3param.get_context()
lp.load(os.getenv("SMB_CONF_PATH"))
creds = credentials.Credentials()
creds.set_username(os.getenv("USERNAME"))
creds.set_password(os.getenv("PASSWORD"))
c = libsmb_samba_internal.Conn(os.getenv("SERVER_IP"), "tmp", creds)
mythreads = []
for i in range(3):
t = LibsmbTestCase.OpenClose(c, "test" + str(i), 10)
mythreads.append(t)
for t in mythreads:
t.start()
for t in mythreads:
t.join()
if t.exc:
raise t.exc[0](t.exc[1])
if __name__ == "__main__":
import unittest
unittest.main()
```
#### File: samba/tests/upgradeprovision.py
```python
import os
from samba.upgradehelpers import (usn_in_range, dn_sort,
update_secrets,
construct_existor_expr)
from samba.descriptor import get_diff_sds
from samba.tests.provision import create_dummy_secretsdb
from samba.tests import TestCaseInTempDir
from samba import Ldb
from ldb import SCOPE_BASE
import samba.tests
from samba.dcerpc import security
def dummymessage(a=None, b=None):
pass
class UpgradeProvisionTestCase(TestCaseInTempDir):
"""Some simple tests for individual functions in the provisioning code.
"""
def test_usn_in_range(self):
range = [5, 25, 35, 55]
vals = [3, 26, 56]
for v in vals:
self.assertFalse(usn_in_range(v, range))
vals = [5, 20, 25, 35, 36]
for v in vals:
self.assertTrue(usn_in_range(v, range))
def test_dn_sort(self):
# higher level comes after lower even if lexicographicaly closer
# ie dc=tata,dc=toto (2 levels), comes after dc=toto
# even if dc=toto is lexicographicaly after dc=tata, dc=toto
self.assertEquals(dn_sort("dc=tata,dc=toto", "dc=toto"), 1)
self.assertEquals(dn_sort("dc=zata", "dc=tata"), 1)
self.assertEquals(dn_sort("dc=toto,dc=tata",
"cn=foo,dc=toto,dc=tata"), -1)
self.assertEquals(dn_sort("cn=bar, dc=toto,dc=tata",
"cn=foo, dc=toto,dc=tata"), -1)
def test_get_diff_sds(self):
domsid = security.dom_sid('S-1-5-21')
sddl = "O:SAG:DUD:AI(A;CI;RPWPCRCCLCLORCWOWDSW;;;SA)\
(A;CI;RP LCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)S:AI(AU;CISA;WP;;;WD)"
sddl1 = "O:SAG:DUD:AI(A;CI;RPWPCRCCLCLORCWOWDSW;;;SA)\
(A;CI;RP LCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)S:AI(AU;CISA;WP;;;WD)"
sddl2 = "O:BAG:DUD:AI(A;CI;RPWPCRCCLCLORCWOWDSW;;;SA)\
(A;CI;RP LCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)S:AI(AU;CISA;WP;;;WD)"
sddl3 = "O:SAG:BAD:AI(A;CI;RPWPCRCCLCLORCWOWDSW;;;SA)\
(A;CI;RP LCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)S:AI(AU;CISA;WP;;;WD)"
sddl4 = "O:SAG:DUD:AI(A;CI;RPWPCRCCLCLORCWOWDSW;;;BA)\
(A;CI;RP LCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)S:AI(AU;CISA;WP;;;WD)"
sddl5 = "O:SAG:DUD:AI(A;CI;RPWPCRCCLCLORCWOWDSW;;;SA)\
(A;CI;RP LCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
sddl6 = "O:SAG:DUD:AI(A;CIID;RPWPCRCCLCLORCWOWDSW;;;SA)\
(A;CIID;RP LCLORC;;;AU)(A;CIID;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)\
(A;CI;RPWPCRCCLCLORCWOWDSW;;;SA)\
(A;CI;RP LCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)S:AI(AU;CISA;WP;;;WD)(AU;CIIDSA;WP;;;WD)"
self.assertEquals(get_diff_sds(security.descriptor.from_sddl(sddl, domsid),
security.descriptor.from_sddl(sddl1, domsid),
domsid), "")
txt = get_diff_sds(security.descriptor.from_sddl(sddl, domsid),
security.descriptor.from_sddl(sddl2, domsid),
domsid)
self.assertEquals(txt, "\tOwner mismatch: SA (in ref) BA(in current)\n")
txt = get_diff_sds(security.descriptor.from_sddl(sddl, domsid),
security.descriptor.from_sddl(sddl3, domsid),
domsid)
self.assertEquals(txt, "\tGroup mismatch: DU (in ref) BA(in current)\n")
txt = get_diff_sds(security.descriptor.from_sddl(sddl, domsid),
security.descriptor.from_sddl(sddl4, domsid),
domsid)
txtmsg = "\tPart dacl is different between reference and current here\
is the detail:\n\t\t(A;CI;RPWPCRCCLCLORCWOWDSW;;;BA) ACE is not present in\
the reference\n\t\t(A;CI;RPWPCRCCLCLORCWOWDSW;;;SA) ACE is not present in\
the current\n"
self.assertEquals(txt, txtmsg)
txt = get_diff_sds(security.descriptor.from_sddl(sddl, domsid),
security.descriptor.from_sddl(sddl5, domsid),
domsid)
self.assertEquals(txt, "\tCurrent ACL hasn't a sacl part\n")
self.assertEquals(get_diff_sds(security.descriptor.from_sddl(sddl, domsid),
security.descriptor.from_sddl(sddl6, domsid),
domsid), "")
def test_construct_existor_expr(self):
res = construct_existor_expr([])
self.assertEquals(res, "")
res = construct_existor_expr(["foo"])
self.assertEquals(res, "(|(foo=*))")
res = construct_existor_expr(["foo", "bar"])
self.assertEquals(res, "(|(foo=*)(bar=*))")
class UpdateSecretsTests(samba.tests.TestCaseInTempDir):
def setUp(self):
super(UpdateSecretsTests, self).setUp()
self.referencedb = create_dummy_secretsdb(
os.path.join(self.tempdir, "ref.ldb"))
def _getEmptyDb(self):
return Ldb(os.path.join(self.tempdir, "secrets.ldb"))
def _getCurrentFormatDb(self):
return create_dummy_secretsdb(
os.path.join(self.tempdir, "secrets.ldb"))
def test_trivial(self):
# Test that updating an already up-to-date secretsdb works fine
self.secretsdb = self._getCurrentFormatDb()
self.assertEquals(None,
update_secrets(self.referencedb, self.secretsdb, dummymessage))
def test_update_modules(self):
empty_db = self._getEmptyDb()
update_secrets(self.referencedb, empty_db, dummymessage)
newmodules = empty_db.search(base="@MODULES", scope=SCOPE_BASE)
refmodules = self.referencedb.search(base="@MODULES", scope=SCOPE_BASE)
self.assertEquals(newmodules.msgs, refmodules.msgs)
def tearDown(self):
for name in ["ref.ldb", "secrets.ldb", "secrets.tdb", "secrets.tdb.bak", "secrets.ntdb"]:
path = os.path.join(self.tempdir, name)
if os.path.exists(path):
os.unlink(path)
super(UpdateSecretsTests, self).tearDown()
```
#### File: dist-packages/xdg/IniFile.py
```python
import re, os, stat, io
from xdg.Exceptions import (ParsingError, DuplicateGroupError, NoGroupError,
NoKeyError, DuplicateKeyError, ValidationError,
debug)
import xdg.Locale
from xdg.util import u
import gettext
def is_ascii(s):
"""Return True if a string consists entirely of ASCII characters."""
try:
s.encode('ascii', 'strict')
return True
except UnicodeError:
return False
class IniFile:
defaultGroup = ''
fileExtension = ''
filename = ''
gettext_domain = None
tainted = False
def __init__(self, filename=None):
self.content = dict()
if filename:
self.parse(filename)
def __cmp__(self, other):
return cmp(self.content, other.content)
def parse(self, filename, headers=None):
'''Parse an INI file.
headers -- list of headers the parser will try to select as a default header
'''
# for performance reasons
content = self.content
if not os.path.isfile(filename):
raise ParsingError("File not found", filename)
try:
# The content should be UTF-8, but legacy files can have other
# encodings, including mixed encodings in one file. We don't attempt
# to decode them, but we silence the errors.
fd = io.open(filename, 'r', encoding='utf-8', errors='replace')
except IOError as e:
if debug:
raise e
else:
return
# parse file
for line in fd:
line = line.strip()
# empty line
if not line:
continue
# comment
elif line[0] == '#':
continue
# new group
elif line[0] == '[':
currentGroup = line.lstrip("[").rstrip("]")
if debug and self.hasGroup(currentGroup):
raise DuplicateGroupError(currentGroup, filename)
else:
content[currentGroup] = {}
# key
else:
try:
key, value = line.split("=", 1)
except ValueError:
raise ParsingError("Invalid line: " + line, filename)
key = key.strip() # Spaces before/after '=' should be ignored
try:
if debug and self.hasKey(key, currentGroup):
raise DuplicateKeyError(key, currentGroup, filename)
else:
content[currentGroup][key] = value.strip()
except (IndexError, UnboundLocalError):
raise ParsingError("Parsing error on key, group missing", filename)
fd.close()
self.filename = filename
self.tainted = False
# check header
if headers:
for header in headers:
if header in content:
self.defaultGroup = header
break
else:
raise ParsingError("[%s]-Header missing" % headers[0], filename)
# check for gettext domain
e = self.content.get('Desktop Entry', {})
self.gettext_domain = e.get('X-GNOME-Gettext-Domain',
e.get('X-Ubuntu-Gettext-Domain', None))
# start stuff to access the keys
def get(self, key, group=None, locale=False, type="string", list=False):
# set default group
if not group:
group = self.defaultGroup
# return key (with locale)
if (group in self.content) and (key in self.content[group]):
if locale:
key = self.__addLocale(key, group)
if key.endswith(']') or not self.gettext_domain:
# inline translations
value = self.content[group][key]
else:
value = gettext.dgettext(self.gettext_domain, self.content[group][key])
else:
value = self.content[group][key]
else:
if debug:
if group not in self.content:
raise NoGroupError(group, self.filename)
elif key not in self.content[group]:
raise NoKeyError(key, group, self.filename)
else:
value = ""
if list == True:
values = self.getList(value)
result = []
else:
values = [value]
for value in values:
if type == "boolean":
value = self.__getBoolean(value)
elif type == "integer":
try:
value = int(value)
except ValueError:
value = 0
elif type == "numeric":
try:
value = float(value)
except ValueError:
value = 0.0
elif type == "regex":
value = re.compile(value)
elif type == "point":
x, y = value.split(",")
value = int(x), int(y)
if list == True:
result.append(value)
else:
result = value
return result
# end stuff to access the keys
# start subget
def getList(self, string):
if re.search(r"(?<!\\)\;", string):
list = re.split(r"(?<!\\);", string)
elif re.search(r"(?<!\\)\|", string):
list = re.split(r"(?<!\\)\|", string)
elif re.search(r"(?<!\\),", string):
list = re.split(r"(?<!\\),", string)
else:
list = [string]
if list[-1] == "":
list.pop()
return list
def __getBoolean(self, boolean):
if boolean == 1 or boolean == "true" or boolean == "True":
return True
elif boolean == 0 or boolean == "false" or boolean == "False":
return False
return False
# end subget
def __addLocale(self, key, group=None):
"add locale to key according the current lc_messages"
# set default group
if not group:
group = self.defaultGroup
for lang in xdg.Locale.langs:
langkey = "%s[%s]" % (key, lang)
if langkey in self.content[group]:
return langkey
return key
# start validation stuff
def validate(self, report="All"):
"""Validate the contents, raising ``ValidationError`` if there
is anything amiss.
report can be 'All' / 'Warnings' / 'Errors'
"""
self.warnings = []
self.errors = []
# get file extension
self.fileExtension = os.path.splitext(self.filename)[1]
# overwrite this for own checkings
self.checkExtras()
# check all keys
for group in self.content:
self.checkGroup(group)
for key in self.content[group]:
self.checkKey(key, self.content[group][key], group)
# check if value is empty
if self.content[group][key] == "":
self.warnings.append("Value of Key '%s' is empty" % key)
# raise Warnings / Errors
msg = ""
if report == "All" or report == "Warnings":
for line in self.warnings:
msg += "\n- " + line
if report == "All" or report == "Errors":
for line in self.errors:
msg += "\n- " + line
if msg:
raise ValidationError(msg, self.filename)
# check if group header is valid
def checkGroup(self, group):
pass
# check if key is valid
def checkKey(self, key, value, group):
pass
# check random stuff
def checkValue(self, key, value, type="string", list=False):
if list == True:
values = self.getList(value)
else:
values = [value]
for value in values:
if type == "string":
code = self.checkString(value)
if type == "localestring":
continue
elif type == "boolean":
code = self.checkBoolean(value)
elif type == "numeric":
code = self.checkNumber(value)
elif type == "integer":
code = self.checkInteger(value)
elif type == "regex":
code = self.checkRegex(value)
elif type == "point":
code = self.checkPoint(value)
if code == 1:
self.errors.append("'%s' is not a valid %s" % (value, type))
elif code == 2:
self.warnings.append("Value of key '%s' is deprecated" % key)
def checkExtras(self):
pass
def checkBoolean(self, value):
# 1 or 0 : deprecated
if (value == "1" or value == "0"):
return 2
# true or false: ok
elif not (value == "true" or value == "false"):
return 1
def checkNumber(self, value):
# float() ValueError
try:
float(value)
except:
return 1
def checkInteger(self, value):
# int() ValueError
try:
int(value)
except:
return 1
def checkPoint(self, value):
if not re.match("^[0-9]+,[0-9]+$", value):
return 1
def checkString(self, value):
return 0 if is_ascii(value) else 1
def checkRegex(self, value):
try:
re.compile(value)
except:
return 1
# write support
def write(self, filename=None, trusted=False):
if not filename and not self.filename:
raise ParsingError("File not found", "")
if filename:
self.filename = filename
else:
filename = self.filename
if os.path.dirname(filename) and not os.path.isdir(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with io.open(filename, 'w', encoding='utf-8') as fp:
# An executable bit signifies that the desktop file is
# trusted, but then the file can be executed. Add hashbang to
# make sure that the file is opened by something that
# understands desktop files.
if trusted:
fp.write(u("#!/usr/bin/env xdg-open\n"))
if self.defaultGroup:
fp.write(u("[%s]\n") % self.defaultGroup)
for (key, value) in self.content[self.defaultGroup].items():
fp.write(u("%s=%s\n") % (key, value))
fp.write(u("\n"))
for (name, group) in self.content.items():
if name != self.defaultGroup:
fp.write(u("[%s]\n") % name)
for (key, value) in group.items():
fp.write(u("%s=%s\n") % (key, value))
fp.write(u("\n"))
# Add executable bits to the file to show that it's trusted.
if trusted:
oldmode = os.stat(filename).st_mode
mode = oldmode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(filename, mode)
self.tainted = False
def set(self, key, value, group=None, locale=False):
# set default group
if not group:
group = self.defaultGroup
if locale == True and len(xdg.Locale.langs) > 0:
key = key + "[" + xdg.Locale.langs[0] + "]"
try:
self.content[group][key] = value
except KeyError:
raise NoGroupError(group, self.filename)
self.tainted = (value == self.get(key, group))
def addGroup(self, group):
if self.hasGroup(group):
if debug:
raise DuplicateGroupError(group, self.filename)
else:
self.content[group] = {}
self.tainted = True
def removeGroup(self, group):
existed = group in self.content
if existed:
del self.content[group]
self.tainted = True
else:
if debug:
raise NoGroupError(group, self.filename)
return existed
def removeKey(self, key, group=None, locales=True):
# set default group
if not group:
group = self.defaultGroup
try:
if locales:
for name in list(self.content[group]):
if re.match("^" + key + xdg.Locale.regex + "$", name) and name != key:
del self.content[group][name]
value = self.content[group].pop(key)
self.tainted = True
return value
except KeyError as e:
if debug:
if e == group:
raise NoGroupError(group, self.filename)
else:
raise NoKeyError(key, group, self.filename)
else:
return ""
# misc
def groups(self):
return self.content.keys()
def hasGroup(self, group):
return group in self.content
def hasKey(self, key, group=None):
# set default group
if not group:
group = self.defaultGroup
return key in self.content[group]
def getFileName(self):
return self.filename
```
#### File: dist-packages/xdiagnose/application.py
```python
from __future__ import absolute_import, print_function, unicode_literals
import gtk
class Application(object):
def __init__(self):
self.pages = {}
self.current_page = None
# Top level UI containers
self.window = self.create_top_window()
self.frame = self.create_top_frame()
# Create UI elements
self.accelerators = gtk.AccelGroup()
# Assemble the UI
self.window.add(self.frame)
# Positions
self.window.set_default_size(400, 300)
self.window.set_size_request(400, 300)
def create_top_window(self):
win = gtk.Window(gtk.WINDOW_TOPLEVEL)
win.connect('delete_event', self.on_delete)
win.set_border_width(10)
return win
def create_top_frame(self):
frame = gtk.Frame("")
self.page_title = gtk.Label("")
frame.set_label_widget(self.page_title)
frame.set_label_align(0.0, 0.0)
return frame
def create_button(self, primary_text, secondary_text):
button = gtk.Button(primary_text)
button_label = button.get_children()[0]
button_label.set_markup(
"<b>%s</b>\n<small>%s</small>"
%(primary_text, secondary_text))
button_label.set_alignment(0.0, 0.0)
button.set_relief(gtk.RELIEF_NONE)
button.show()
return button
def create_nav_bar(self, next_page=None, prev_page=None):
hbox_navigation = gtk.HButtonBox()
if prev_page is not None:
hbox_navigation.set_property("layout-style", gtk.BUTTONBOX_START)
prev_button = gtk.Button("Back")
prev_button.connect("clicked", self.on_page, prev_page)
hbox_navigation.pack_start(prev_button)
if next_page is not None:
hbox_navigation.set_property("layout-style", gtk.BUTTONBOX_END)
next_button = gtk.Button("Next")
next_button.connect("clicked", self.on_page, next_page)
hbox_navigation.pack_end(next_button)
return hbox_navigation
def create_page(self, text):
page = gtk.VBox(spacing=5)
page.set_border_width(10)
label = gtk.Label(text)
label.set_line_wrap(True)
label.set_use_markup(True)
label.set_alignment(0.0, 0.0)
page.pack_start(label, expand=False)
return page
def on_delete(self, widget, event):
gtk.main_quit()
def on_close(self, widget):
gtk.main_quit()
```
#### File: dist-packages/xdiagnose/info.py
```python
from __future__ import absolute_import, print_function, unicode_literals
'''High level package information'''
PROGNAME = 'xdiagnose'
VERSION = '3.0'
URL = 'http://launchpad.net/xdiagnose'
EMAIL = '<EMAIL>'
DATE_STARTED = '2010-11-04'
DATE_COPYRIGHT = '2011'
LICENSE_URL = 'http://www.gnu.org/copyleft/gpl.html'
SHORT_DESCRIPTION = 'Analysis tools for troubleshooting X.org problems'
DESCRIPTION = """
This package is a friendly GUI application for diagnosing several
common X.org problems.
"""
class _contributor:
'''Information about a person contributing to this project'''
def __init__(self, name, email, started=None, roles=None, translating=None):
self.name = name
self.email = email
self.started = started
if roles is None:
self.roles = []
elif type(roles) is not list:
self.roles = [roles]
else:
self.roles = roles
self.translation_languages = translating
return
def to_dict(self):
'''Returns the object in a dict suitable for json'''
return self.__dict__
@property
def display_email(self):
'''Formatted string version of email address'''
if self.email:
return '<%s>' % self.email
else:
return ''
@property
def display_roles(self):
'''Formatted string version of roles list'''
if self.roles:
return '[%s]' % ','.join(self.roles)
else:
return ''
LEAD_DEVELOPER = _contributor(
'<NAME>', '<EMAIL>', started='2010-11-04',
roles=['lead', 'developer'], translating=None,
)
CONTRIBUTORS = [
_contributor(
'<NAME>', '<EMAIL>', started='2012-01-21',
roles=['developer', 'translator'], translating=None),
_contributor(
'<NAME>', '<EMAIL>', started='2012-08-10',
roles=['developer'], translating=None),
]
if __name__ == "__main__":
print(PROGNAME, VERSION, URL)
print("Copyright (C) %s %s <%s>" % (
DATE_COPYRIGHT, LEAD_DEVELOPER.name, LEAD_DEVELOPER.email))
print()
for contributor in CONTRIBUTORS:
print("%s %s %s" % (
contributor.name,
contributor.display_email,
contributor.display_roles))
```
#### File: xdiagnose/utils/config.py
```python
import os
from file_io import load_file
from lists import to_list
from text import to_bool
class InvalidConfig():
def __init__(self, item):
self.item = item
def __str__(self):
return "Invalid configuration: %s" %(self.item)
class Config(object):
'''Yet another INI-style config file parser.
Assumes a "key = value" style file, with # anywhere on line to
indicate a comment.
Lines can be continued with either backslash (\) or a trailing
comma, if the subsequent line is space-indented.
All keys from the file are loaded as data members of this class so
can be easily referenced as "config.key".
If a key name includes one or more periods (.) it is converted into
a dict. So, "foo.bar.baz = doh.blah, 42.1" in the config file would be
referenced in code as "foo['bar']['baz'] = 'doh.blah, 42.1'.
The 'include' keyword is supported as a way to import the contents
of another file, which is then parsed and handled as above, with all
elements brought into the current namespace.
'''
def __init__(self, filename=None, lines=None):
if filename is not None:
self._filename = os.path.expanduser(filename)
if os.path.exists(self._filename):
self.load(load_file(self._filename))
if lines is not None:
self.load(lines=lines)
@property
def filename(self):
"""The name of the file the config was loaded from.
Returns None if config was provided directly during
initialization.
"""
try:
return self._filename
except:
return None
def clear(self):
"""Deletes all config data from object"""
for key, value in self.__dict__.items():
if key.startswith('_'):
continue
del self.__dict__[key]
def copy(self, obj):
"""Copies contents of another config object"""
if obj is None or len(obj.__dict__.keys()) < 1:
return
for key, value in obj.__dict__.items():
if key.startswith('_'):
continue
old_value = self.__dict__.get(key, None)
if old_value is not None and type(old_value) != type(value):
if type(old_value) is list and type(value) is str:
value = to_list(value)
else:
raise InvalidConfig("key %s (type %s) given for a type %s" %(
key, type(value), type(old_value)))
self.__dict__[key] = value
def set(self, option, value):
"""Sets an option, handling dots as a path of dicts"""
def _recurse_set(parent_dict, fields, value):
field = fields[0]
if len(fields) == 1:
if type(parent_dict.get(field,None)) is list:
parent_dict[field] = to_list(value)
elif type(parent_dict.get(field,None)) is bool:
parent_dict[field] = to_bool(value)
else:
parent_dict[field] = value
return
if field not in parent_dict:
parent_dict[field] = {}
elif type(parent_dict[field]) is not dict:
buf = parent_dict[field]
parent_dict[field] = {'': buf}
_recurse_set(parent_dict[field], fields[1:], value)
_recurse_set(self.__dict__, option.split('.'), value)
def get(self, option, default=None):
"""Retrieves an option, with dots navigating dict tree"""
def _recurse_get(parent_dict, fields):
field = fields[0]
if field not in parent_dict:
parent_dict[field] = ''
if type(parent_dict[field]) is not dict:
return parent_dict[field]
return _recurse_get(parent_dict[field], fields[1:])
return _recurse_get(self.__dict__, option.split('.'))
def load(self, lines=None):
"""Parses given lines into the config"""
if not lines:
return
if type(lines) is not list:
lines = lines.split("\n")
assert(type(lines) is list)
possible_continuation = False
last_option = None
for line in lines:
if len(line.strip()) == 0:
last_option = None
possible_continuation = False
continue
# We can continue only if this line starts with space and
# the prior line ended in a continuation character (\ or ,)
if possible_continuation and not line[0].isspace():
possible_continuation = False
last_option = None
line = line.split('#')[0].strip()
# Check for possible continuation to the next line
if line.endswith('\\'):
possible_continuation = True
line = line[:-1].strip()
elif line.endswith(','):
possible_continuation = True
# Add the option to ourself
if '=' in line:
option, value = line.split('=', 1)
option = option.strip()
if option:
last_option = option
self.set(option, value.strip())
# Line continues from previous, just append to prior value
elif possible_continuation and last_option:
old_value = self.get(last_option)
if type(old_value) is list:
old_value.extend(to_list(line))
else:
old_value += " " + line
self.set(last_option, old_value)
# Import another config file
elif line[:8] == 'include ':
filename = line[8:].strip()
lines = load_file(filename)
self.load(lines)
possible_continuation = False
last_option = None
def __str__(self):
def _value_to_str(key, value):
assert(type(value) is not dict)
if value is None:
return "%s=\n" %(key)
elif type(value) is list:
return "%s=%s\n" %(key, ', '.join(str(x) for x in value))
else:
return "%s=%s\n" %(key, str(value))
def _items_to_str(parent, items):
text = ''
for key, value in items:
if parent is not None:
param = "%s.%s" %(parent, key)
else:
param = key
if type(value) is dict:
text += _items_to_str(param,value.items())
else:
text += _value_to_str(param,value)
return text
return _items_to_str(None, self.__iter__())
def __len__(self):
return len(list(self.__iter__()))
def __getitem__(self, key):
return self.__dict__.get(key, None)
def __iter__(self):
keys = self.__dict__.items()
keys.sort()
for key,value in keys:
if key.startswith('_') or key=='':
continue
yield (key,value)
def __contains__(self, item):
if item.startswith('_'):
return False
return item in self.__dict__.keys()
if __name__ == "__main__":
#config = Config("~/.config/user-dirs.dirs")
#config = Config("~/.taskhelmrc")
config = Config("~/.taskrc")
print(config)
print()
print(config.shell['prompt'])
print(config.color)
config.clear()
assert( str(config) == '')
```
#### File: xdiagnose/utils/json_io.py
```python
import os
import json
import jsonpickle
from debug import ERR
from file_io import load_file
# TODO: Tests
class JsonIO(object):
def __init__(self, filename):
self.filename = filename
jsonpickle.set_encoder_options('simplejson', sort_keys=True, indent=4)
def convert_from_dict(self):
'''Provides handling of post-processing of data.
By default, this just passes through the data unchanged.
Subclasses can override this routine to define their own
custom conversion logic.
This routine must return a function which takes a data dict
and return a class object.
'''
def converter(data):
return data
return converter
def convert_to_dict(self):
'''Handles conversion of an object to a serializable dict.
By default, this just passes through the data unchanged.
Subclasses can override this routine to define their own
custom conversion logic.
This routine must return a function that converts a data
object into a plain dict.
'''
def converter(data):
return data
return converter
def read(self):
lines = load_file(self.filename)
if not lines:
return None
json_data = jsonpickle.decode('\n'.join(lines))
return json_data
def write(self, data):
ftmp = self.filename+'.tmp'
pathname = os.path.dirname(self.filename)
if pathname and not os.path.exists(pathname):
os.makedirs(pathname)
try:
if os.path.exists(ftmp):
os.unlink(ftmp)
file = open(ftmp, 'w')
text = jsonpickle.encode(data)
file.write(text + "\n")
file.close()
except IOError:
ERR("Failed to save %s to file %s" %(type(data), ftmp))
raise
return
try:
os.rename(ftmp, self.filename)
except IOError:
os.unlink(self.filename)
os.rename(ftmp, self.filename)
# vi:set ts=4 sw=4 expandtab:
```
#### File: xdiagnose/utils/readurl.py
```python
import urllib2
def readurl(url):
try:
fin = urllib2.urlopen(url)
content = fin.read()
fin.close()
return content
except:
return None
```
#### File: xdiagnose/utils/text.py
```python
from __future__ import absolute_import, print_function, unicode_literals
from decimal import Decimal
def quote(msg):
"""
Similar to urllib.quote but for glibs GMarkup
@param msg: string to quote
@returns: quoted string
"""
msg = msg.replace('&', '&')
msg = msg.replace('<', '<')
msg = msg.replace('>', '>')
return msg
def o2str(obj):
"""
Convert a unicode, decimal.Decimal, datetime object, etc. to a str.
Converts lists and tuples of objects into lists of strings.
"""
retval = None
if type(obj) == str:
return obj
# Type 'unicode' no longer exists in python3
# elif type(obj) == unicode:
# return obj.encode('ascii', 'ignore')
elif type(obj) == Decimal:
return str(obj)
elif type(obj) == list or type(obj) is tuple:
new_list = []
for item in obj:
new_list.append(o2str(item))
return new_list
elif str(type(obj)) == "<type 'datetime.datetime'>":
return obj.ctime()
else:
#print str(type(obj))
return obj
def to_bool(value):
"""
Converts 'something' to boolean. Raises exception for invalid formats
Possible True values: 1, True, '1', 'TRue', 'yes', 'y', 't'
Possible False values: 0, False, None, [], {}, '', '0', 'faLse', 'no', 'n', 'f', 0.0
"""
if type(value) == type(''):
if value.lower() in ("yes", "y", "true", "t", "1"):
return True
if value.lower() in ("no", "n", "false", "f", "0", "none", ""):
return False
raise Exception('Invalid value for boolean conversion: ' + value)
return bool(value)
def o2float(value):
'''Converts strings like 42%, 123M, 1.2B into floating point numbers
Returned values are in millions, so '1.2B' returns 1200
'''
if value is None:
return 0.0
elif type(value) is float:
return value
elif type(value) is int:
return float(value)
elif value == '--':
return 0.0
value = value.replace(',','')
last = value[len(value)-1]
if last == 'M':
return float(value[:-1])
elif last == 'B':
return float(value[:-1]) * 1000
elif last == '%':
return float(value[:-1])/100.0
elif last == ')' and value[0] == '(':
return -1 * o2float(value[1:-1])
try:
return float(value)
except ValueError:
sys.stderr.write("ofloat: Could not convert '%s' to float\n" %(value))
raise
if __name__ == "__main__":
test_cases = [
('true', True),
('t', True),
('yes', True),
('y', True),
('1', True),
('false', False),
('f', False),
('no', False),
('n', False),
('0', False),
('', False),
(1, True),
(0, False),
(1.0, True),
(0.0, False),
([], False),
({}, False),
((), False),
([1], True),
({1:2}, True),
((1,), True),
(None, False),
(object(), True),
]
for test, expected in test_cases:
assert to_bool(test) == expected, "to_bool("+test+") failed to return "+expected
```
#### File: xdiagnose/utils/url_io.py
```python
import urllib
import lxml.html
from text import o2str
def tables_from_url(url):
data = urllib.urlopen(url).read().decode('utf-8', 'replace')
tree = lxml.html.fromstring(o2str(data))
tables = []
for tbl in tree.iterfind('.//table'):
tele = []
for tr in tbl.iterfind('.//tr'):
try:
text = [e.strip() for e in tr.xpath('.//text()') if
len(e.strip()) > 0]
tele.append(text)
except:
print(tr)
raise
yield tele
def data_from_url(url):
'''Looks up first non-trivial data table from url'''
for t in tables_from_url(url):
if len(t) >= 5:
return t
raise Exception("No usable data returned from %s" %(url))
# vi:set ts=4 sw=4 expandtab:
```
#### File: dist-packages/xdiagnose/x_pkg_names.py
```python
import re
def compile_table(table):
for regex in table:
regex['rc'] = re.compile(regex['re'], re.IGNORECASE)
def invert_dict(d):
return dict([[v,k] for k,v in d.items()])
def group_match_capitalize(match):
return match.group(1).capitalize()
def group_match_lower(match):
return match.group(1).lower()
deb_to_fdo_mapping = {
# Identically named
'libdmx': 'libdmx',
'libfontenc': 'libfontenc',
'liblbxutil': 'liblbxutil',
'libpciaccess': 'libpciaccess',
'libxkbcommon': 'libxkbcommon',
'libxkbui': 'libxkbui',
'libxkbfile': 'libxkbfile',
'libxtrans': 'libxtrans',
'wayland': 'wayland',
# Changed names that don't follow the standard rules
'wayland-demos': 'wayland',
'libdrm': 'drm',
'drm-snapshot': 'drm',
'xorg-server': 'xserver',
'x11proto-core': 'x11proto',
'xfonts-encodings': 'encodings',
'libfs': 'libFS',
'libice': 'libICE',
'libsm': 'libSM',
'libxcalibrate': 'libXCalibrate',
'libxres': 'libXRes',
'libxscrnsaver': 'libXScrnSaver',
'libxtrap': 'libXTrap',
'libxprintapputil': 'libXprintAppUtil',
'libxvmc': 'libXvMC',
'libxprintutil': 'libXprintUtil',
}
# Note: Duplicate values will get mapped to just one key
fdo_to_deb_mapping = invert_dict(deb_to_fdo_mapping)
# "Standard" Debian-X renaming rules
fdo_to_deb_rename_rules = [
{ 're': r'^xf86-(.*)$', 'sub': r'xserver-xorg-\1', },
{ 're': r'^(.*)proto$', 'sub': r'x11proto-\1', },
]
compile_table(fdo_to_deb_rename_rules)
# Inverse of Debian-X renaming rules
deb_to_fdo_rename_rules = [
{ 're': r'^lib([a-z])', 'sub': r'lib\1', 'func': group_match_capitalize},
{ 're': r'^lib(.*)wm$', 'sub': r'lib\1WM', },
{ 're': r'^xtrans(.*)', 'sub': r'libxtrans\1', },
{ 're': r'^x11proto-(.*)$', 'sub': r'\1proto', },
{ 're': r'^xserver-xorg-(.*)$', 'sub': r'xf86-\1', },
]
compile_table(deb_to_fdo_rename_rules)
def lookup(name, mapping, rules):
if name is None:
return None
# Lookup the package name
pkg = mapping.get(name, None)
if pkg is not None:
return pkg
# Use standard rename rules
for rule in rules:
m = rule['rc'].search(name)
if not m:
continue
if 'func' in rule:
text = re.sub(rule['re'], rule['func'], name)
pat = str(rule.get('sub',None))
return str.replace(pat, '\\1', text)
else:
return rule['rc'].sub(rule['sub'], name)
# Not found; assume the same package name applies
return name
def debpkg_to_fdopkg(name):
return lookup(name, deb_to_fdo_mapping, deb_to_fdo_rename_rules)
def fdopkg_to_debpkg(name):
return lookup(name, fdo_to_deb_mapping, fdo_to_deb_rename_rules)
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Usage: %prog <package-name>")
sys.exit(1)
package = sys.argv[1]
print(lookup(package,
deb_to_fdo_mapping,
deb_to_fdo_rename_rules))
``` |
{
"source": "jianwen-xie/3DDescriptorNet",
"score": 2
} |
#### File: 3DDescriptorNet/util/custom_ops.py
```python
import numpy as np
import tensorflow as tf
def leaky_relu(input_, leakiness=0.2):
assert leakiness <= 1
return tf.maximum(input_, leakiness * input_)
def fully_connected(input_, output_dim, name="fc"):
shape = input_.shape
return conv3d(input_, output_dim, kernal=list(shape[1:4]), strides=(1, 1, 1), padding="VALID", name=name)
def up_sample(input_, scale=4, name="up_sample"):
with tf.variable_scope(name):
w = tf.Variable(tf.constant(1, shape=(1, 1, 1, 1, 1)), name="w")
return tf.nn.conv3d_transpose(input_, w, output_shape=(), strides=scale, padding="VALID")
def convt3d(input_, output_shape, kernal=(5, 5, 5), strides=(2, 2, 2), padding='SAME', activation_fn=None,
name="convt3d"):
assert type(kernal) in [list, tuple, int]
assert type(strides) in [list, tuple, int]
assert type(padding) in [list, tuple, int, str]
if type(kernal) == list or type(kernal) == tuple:
[k_d, k_h, k_w] = list(kernal)
else:
k_d = k_h = k_w = kernal
if type(strides) == list or type(strides) == tuple:
[d_d, d_h, d_w] = list(strides)
else:
d_d = d_h = d_w = strides
output_shape = list(output_shape)
output_shape[0] = tf.shape(input_)[0]
with tf.variable_scope(name):
if type(padding) in [tuple, list, int]:
if type(padding) == int:
p_d = p_h = p_w = padding
else:
[p_d, p_h, p_w] = list(padding)
pad_ = [0, p_d, p_h, p_w, 0]
input_ = tf.pad(input_, [[p, p] for p in pad_], "CONSTANT")
padding = 'VALID'
w = tf.get_variable('w', [k_d, k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=0.001))
convt = tf.nn.conv3d_transpose(input_, w, output_shape=tf.stack(output_shape, axis=0),
strides=[1, d_d, d_h, d_w, 1], padding=padding)
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
convt = tf.nn.bias_add(convt, biases)
if activation_fn != None:
convt = activation_fn(convt)
return convt
def conv3d(input_, output_dim, kernal=(5, 5, 5), strides=(2, 2, 2), padding='SAME', activation_fn=None, name="conv3d"):
if type(kernal) == list or type(kernal) == tuple:
[k_d, k_h, k_w] = list(kernal)
else:
k_d = k_h = k_w = kernal
if type(strides) == list or type(strides) == tuple:
[d_d, d_h, d_w] = list(strides)
else:
d_d = d_h = d_w = strides
with tf.variable_scope(name):
if type(padding) == list or type(padding) == tuple:
padding = [0] + list(padding) + [0]
input_ = tf.pad(input_, [[p, p] for p in padding], "CONSTANT")
padding = 'VALID'
w = tf.get_variable('w', [k_d, k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.random_normal_initializer(stddev=0.001))
conv = tf.nn.conv3d(input_, w, strides=[1, d_d, d_h, d_w, 1], padding=padding)
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, biases)
if activation_fn != None:
conv = activation_fn(conv)
return conv
``` |
{
"source": "jianwen-xie/GridCell-3D",
"score": 2
} |
#### File: GridCell-3D/code/main.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import time
import os
from gridcell_multidir_3d import GridCell_multidir_3d
from data_io import Data_Generator
from utils import *
from path_planning import Path_planning_3D, perform_path_planning
from matplotlib import pyplot as plt
from matplotlib import cm
import math
import argparse
from scipy.io import savemat
from mayavi.mlab import *
parser = argparse.ArgumentParser()
# training parameters
parser.add_argument('--batch_size', type=int, default=200000, help='Batch size of training images')
parser.add_argument('--num_epochs', type=int, default=8000, help='Number of epochs to train')
parser.add_argument('--lr', type=float, default=0.03, help='Initial learning rate for descriptor')
parser.add_argument('--beta1', type=float, default=0.9, help='Beta1 in Adam optimizer')
# simulated data parameters
parser.add_argument('--place_size', type=float, default=1.0, help='Size of the square place')
parser.add_argument('--max_vel1', type=float, default=39, help='maximum of velocity in loss1')
parser.add_argument('--min_vel1', type=float, default=1, help='minimum of velocity in loss1')
parser.add_argument('--max_vel2', type=float, default=3, help='maximum of velocity in loss2')
parser.add_argument('--min_vel2', type=float, default=1, help='minimum of velocity in loss2')
parser.add_argument('--sigma', metavar='N', type=float, nargs='+', default=[0.1], help='sd of gaussian kernel')
parser.add_argument('--dtype1', type=int, default=1, help='type of loss1')
# model parameters
parser.add_argument('--place_dim', type=int, default=64000, help='Dimensions of place, should be N^3')
parser.add_argument('--num_group', type=int, default=8, help='Number of groups of grid cells') # 16
parser.add_argument('--block_size', type=int, default=8, help='Size of each block')
parser.add_argument('--iter', type=int, default=0, help='Number of iter')
parser.add_argument('--lamda', type=float, default=0.1, help='Hyper parameter to balance two loss terms') # 0.1
parser.add_argument('--GandE', type=float, default=1, help='1: Gaussian kernel; 0: Exponential kernel')
parser.add_argument('--lamda2', type=float, default=5000, help='Hyper parameter to balance two loss terms')
parser.add_argument('--motion_type', type=str, default='continuous', help='True if in testing mode')
parser.add_argument('--num_step', type=int, default=1, help='Number of steps in path integral')
parser.add_argument('--save_memory', type=bool, default=False, help='True if in testing mode')
# utils train
parser.add_argument('--training_output_dir', type=str, default='training_result', help='The output directory for saving training results')
parser.add_argument('--testing_output_dir', type=str, default='testing_result', help='The output directory for saving testing results')
parser.add_argument('--log_step', type=int, default=200, help='Number of mini batches to save output results') # 500
# utils test
parser.add_argument('--mode', type=str, default='2', help='0: training / 1: visualizing / 2: path integral')
parser.add_argument('--test_num', type=int, default=20, help='Number of testing steps used in path integral')
parser.add_argument('--project_to_point', type=bool, default=False, help='True if in testing path integral mode')
parser.add_argument('--ckpt', type=str, default='model.ckpt-7999', help='Checkpoint path to load')
parser.add_argument('--num_testing_path_integral', type=int, default=1000, help='Number of testing cases for path integral')
parser.add_argument('--gpu', type=str, default='0', help='Which gpu to use')
FLAGS = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu
def train(model, sess, output_dir):
log_dir = os.path.join(output_dir, 'log')
if FLAGS.GandE == 1:
model_dir = os.path.join(output_dir, 'gau_model')
elif FLAGS.GandE == 0:
model_dir = os.path.join(output_dir, 'exp_model')
syn_dir = os.path.join(output_dir, 'learned_patterns')
syn_path_dir = os.path.join(output_dir, 'path_integral')
log_file = os.path.join(output_dir, 'testing_error.txt')
if tf.gfile.Exists(log_dir):
tf.gfile.DeleteRecursively(log_dir)
tf.gfile.MakeDirs(log_dir)
if not tf.gfile.Exists(model_dir):
tf.gfile.MakeDirs(model_dir)
if not tf.gfile.Exists(syn_dir):
tf.gfile.MakeDirs(syn_dir)
if not tf.gfile.Exists(syn_path_dir):
tf.gfile.MakeDirs(syn_path_dir)
# build model
model.build_model()
model.path_integral(FLAGS.test_num)
lamda_list = np.linspace(FLAGS.lamda, FLAGS.lamda, FLAGS.num_epochs)
# initialize training
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=20)
writer = tf.summary.FileWriter(log_dir, sess.graph)
# make graph immutable
tf.get_default_graph().finalize()
# store graph in protobuf
with open(model_dir + '/graph.proto', 'w') as f:
f.write(str(tf.get_default_graph().as_graph_def()))
data_generator = Data_Generator(max=FLAGS.place_size, num_interval=model.num_interval,
to_use_3D_map=True)
place_pair_val1 = data_generator.generate(1000, dtype=FLAGS.dtype1)
place_seq_val2 = data_generator.generate(1000, velocity=model.velocity2, num_step=model.num_step, dtype=2)
# train
start_time = time.time()
for epoch in range(FLAGS.num_epochs):
if epoch < FLAGS.iter:
lamda_list[epoch] = 0
place_pair1 = data_generator.generate(FLAGS.batch_size, dtype=FLAGS.dtype1)
place_seq2 = data_generator.generate(FLAGS.batch_size, velocity=model.velocity2, num_step=model.num_step, dtype=2)
loss1_avg, loss2_avg, reg_avg, loss3_avg, loss4_avg = [], [], [], [], []
# update weights
feed_dict = dict()
feed_dict.update({model.place_before1: place_pair1['before'],
model.place_after1: place_pair1['after'],
model.vel1: place_pair1['vel'],
model.place_seq2: place_seq2['seq'],
model.lamda: lamda_list[epoch]})
feed_dict[model.vel2] = place_seq2['vel'] if model.motion_type == 'continuous' \
else place_seq2['vel_idx']
summary, loss1, loss2, reg, loss3, loss4, dp1, dp2 = sess.run([model.summary_op, model.loss1,
model.loss2, model.reg, model.loss3,
model.loss4,
model.dp1, model.dp2, model.loss_update,
model.apply_grads], feed_dict=feed_dict)[:8]
loss1_avg.append(loss1)
loss2_avg.append(loss2)
reg_avg.append(reg)
loss3_avg.append(loss3)
loss4_avg.append(loss4)
writer.add_summary(summary, epoch)
writer.flush()
if epoch % 10 == 0:
loss1_avg, loss2_avg, loss3_avg, loss4_avg, reg_avg = np.mean(np.asarray(loss1_avg)), np.mean(
np.asarray(loss2_avg)), \
np.mean(np.asarray(loss3_avg)), np.mean(
np.asarray(loss4_avg)), \
np.mean(np.asarray(reg_avg))
feed_dict = dict()
feed_dict.update({model.place_before1: place_pair_val1['before'],
model.place_after1: place_pair_val1['after'],
model.vel1: place_pair_val1['vel'],
model.place_seq2: place_seq_val2['seq'],
model.lamda: lamda_list[epoch]})
feed_dict[model.vel2] = place_seq_val2['vel'] if model.motion_type == 'continuous' \
else place_seq_val2['vel_idx']
loss_val = sess.run(model.loss, feed_dict=feed_dict)
end_time = time.time()
print(
'#{:s} Epoch #{:d}, train loss1: {:.4f}, train loss2: {:.4f}, reg: {:.4f}, val loss: {:.4f} time: {:.2f}s'
.format(output_dir, epoch, loss1_avg, loss2_avg, reg_avg, loss_val, end_time - start_time))
start_time = time.time()
# report a testing error in the task of path integral and record it in a file
if epoch + 1 == FLAGS.num_epochs or (epoch + 1) % FLAGS.log_step == 0:
print("****************** saving check point and computing testing error in path integral ****************")
# save check point
saver.save(sess, "%s/%s" % (model_dir, 'model.ckpt'), global_step=epoch)
# store learned patterns
visualize_3D_grid_cell(model, sess, syn_dir, epoch)
# show one case of testing
place_seq_test_single = data_generator.generate(1, velocity=model.velocity2, num_step=FLAGS.test_num,
dtype=2, test=True)
test_path_integral(model, sess, place_seq_test_single, visualize=True, test_dir=syn_path_dir, epoch=epoch)
# compute a testing error on a number of testing cases
place_seq_test = data_generator.generate(FLAGS.num_testing_path_integral, velocity=model.velocity2, num_step=FLAGS.test_num, dtype=2,
test=True)
err = test_path_integral(model, sess, place_seq_test)
print("****************** (epoch %s) error of path integral in %s testing cases: %s" % (str(epoch), str(FLAGS.num_testing_path_integral), str(err)))
if log_file is not None:
with open(log_file, "a") as f:
print('epoch = %d , error = %02f' % (epoch, err), file=f)
def test_path_integral(model, sess, place_seq_test, visualize=False, test_dir=None, epoch=None):
err = np.zeros(shape=len(place_seq_test['seq']))
place_min, place_max = 100, -100
for i in range(len(place_seq_test['seq'])):
feed_dict = {model.place_init_test: place_seq_test['seq'][i, 0],
model.vel2_test: place_seq_test['vel'][i]} if model.motion_type == 'continuous' \
else {model.place_init_test: place_seq_test['seq'][i, 0],
model.vel2_test: place_seq_test['vel_idx'][i]}
place_seq_pd_pt_value, place_seq_predict_value, place_seq_predict_gp_value = \
sess.run([model.place_seq_pd_pt, model.place_seq_pd, model.place_seq_pd_gp], feed_dict=feed_dict)
place_seq_gt = place_seq_test['seq'][i, 1:]
err[i] = np.mean(np.sqrt(np.sum((place_seq_gt - place_seq_pd_pt_value) ** 2, axis=1)))
if place_seq_predict_value.min() < place_min:
place_min = place_seq_predict_value.min()
if place_seq_predict_value.max() > place_max:
place_max = place_seq_predict_value.max()
if visualize:
if not tf.gfile.Exists(test_dir):
tf.gfile.MakeDirs(test_dir)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(place_seq_gt[:, 0], place_seq_gt[:, 1], place_seq_gt[:, 2], color="blue", label='ground truth')
ax.scatter(place_seq_gt[0, 0], place_seq_gt[0, 1], place_seq_gt[0, 2], color="blue", marker='o')
ax.scatter(place_seq_gt[-1, 0], place_seq_gt[-1, 1], place_seq_gt[-1, 2], color="blue", marker='x')
ax.plot(place_seq_pd_pt_value[:, 0], place_seq_pd_pt_value[:, 1], place_seq_pd_pt_value[:, 2],
linestyle='dashed', color="red", label='predicted')
ax.scatter(place_seq_pd_pt_value[0, 0], place_seq_pd_pt_value[0, 1], place_seq_pd_pt_value[0, 2], color="red",
marker='o')
ax.scatter(place_seq_pd_pt_value[-1, 0], place_seq_pd_pt_value[-1, 1], place_seq_pd_pt_value[-1, 2], color="red",
marker='x')
ax.legend()
ax.set_xlabel('x axis')
ax.set_ylabel('y axis')
ax.set_zlabel('z axis')
#ax.grid(False)
plt.savefig(os.path.join(test_dir, str(epoch) + '_id_' + str(i) + '.png'))
plt.close()
err = np.mean(err)
return err
def visualize_3D_grid_cell(model, sess, test_dir, epoch=0, slice_to_show=20):
# only showing one 2D slice of the 3D grid patterns
weights_A_value = sess.run(model.weights_A)
if not tf.gfile.Exists(test_dir):
tf.gfile.MakeDirs(test_dir)
np.save(os.path.join(test_dir, 'weights.npy'), weights_A_value)
# print out A
weights_A_value_transform = weights_A_value.transpose(3, 0, 1, 2)
# fig_sz = np.ceil(np.sqrt(len(weights_A_value_transform)))
plt.figure(figsize=(model.block_size, model.num_group))
for i in range(len(weights_A_value_transform)):
weight_to_draw = weights_A_value_transform[i]
plt.subplot(model.num_group, model.block_size, i + 1)
# showing one slice (2D) of 3D grid patterns
weight_to_draw_all = weight_to_draw[slice_to_show, :, :]
draw_heatmap_2D(weight_to_draw_all, vmin=weight_to_draw_all.min(), vmax=weight_to_draw_all.max())
plt.savefig(os.path.join(test_dir, '3D_patterns_epoch_' + str(epoch) + '.png'))
def main(_):
model = GridCell_multidir_3d(FLAGS)
with tf.Session() as sess:
if FLAGS.mode == "1": # visualize learned patterns
# load model
assert FLAGS.ckpt is not None, 'no checkpoint provided.'
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
saver.restore(sess, os.path.join(FLAGS.training_output_dir, 'gau_model', FLAGS.ckpt))
print('Loading checkpoint {}.'.format(os.path.join(FLAGS.training_output_dir, 'gau_model', FLAGS.ckpt)))
test_dir = os.path.join(FLAGS.testing_output_dir, 'test_for_patterns_visualization')
print("Testing.... please check folder %s " % test_dir)
visualize_3D_grid_cell(model, sess, test_dir)
elif FLAGS.mode == "2": # test path integral
model.path_integral(FLAGS.test_num, project_to_point=FLAGS.project_to_point)
# load model
assert FLAGS.ckpt is not None, 'no checkpoint provided.'
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
saver.restore(sess, os.path.join(FLAGS.training_output_dir, 'gau_model', FLAGS.ckpt))
print('Loading checkpoint {}.'.format(os.path.join(FLAGS.training_output_dir, 'gau_model', FLAGS.ckpt)))
data_generator_test = Data_Generator(max=FLAGS.place_size, to_use_3D_map=True, num_interval=model.num_interval)
place_pair_test = data_generator_test.generate(FLAGS.num_testing_path_integral, velocity=model.velocity2, num_step=FLAGS.test_num, dtype=2, test=True)
syn_path_dir_testing = os.path.join(FLAGS.testing_output_dir, 'testing_path_integral')
tf.gfile.MakeDirs(syn_path_dir_testing)
print("Testing.... please check folder %s " % syn_path_dir_testing)
err = test_path_integral(model, sess, place_pair_test, test_dir=syn_path_dir_testing, visualize=True)
print("error of path integral in %s testing cases: %s" % (str(FLAGS.num_testing_path_integral), str(err)))
elif FLAGS.mode == "0":
print('Start training 3D grid cells')
train(model, sess, FLAGS.training_output_dir)
else:
return NotImplementedError
if __name__ == '__main__':
tf.app.run()
```
#### File: GridCell-3D/code/path_planning.py
```python
import tensorflow as tf
import numpy as np
import os
import argparse
import math
from gridcell_multidir_3d import GridCell_multidir_3d
from custom_ops import block_diagonal
from data_io import Data_Generator
from matplotlib import pyplot as plt
from utils import draw_heatmap_2D, draw_3D_path_to_target, draw_path_to_target_gif
import itertools
class Path_planning_3D():
def __init__(self, grid_cell_model, max_step=80, max_err=1):
self.model = grid_cell_model
# build model
self.start = tf.placeholder(shape=[3], dtype=tf.float32)
self.target = tf.placeholder(shape=[3], dtype=tf.float32)
self.max_step, self.max_err = max_step, max_err
# self.path_planning(max_step, max_err)
def path_planning(self, num_step, num_dir):
step = tf.constant(0)
grid_start = self.model.get_grid_code(self.start)
grid_target = self.model.get_grid_code(self.target)
place_seq, _ = self.model.localization_model(self.model.weights_A, grid_start, self.model.grid_cell_dim)
place_seq = tf.expand_dims(place_seq, axis=0)
place_seq_point = tf.expand_dims(self.start, axis=0)
# velocity = self.model.velocity2
theta = np.linspace(0, np.pi, num_dir + 1)[:num_dir]
omega = np.linspace(0, 2*np.pi, num_dir + 1)[:num_dir]
r = 2.0
velocity = np.zeros(shape=(num_dir ** 2, 3), dtype=np.float32)
index = 0
for i_theta in theta:
for i_omega in omega:
velocity[index, 0] = r * np.sin(i_theta) * np.cos(i_omega)
velocity[index, 1] = r * np.sin(i_theta) * np.sin(i_omega)
velocity[index, 2] = r * np.cos(i_theta)
index = index + 1
num_vel = len(velocity)
vel_list = []
interval_length = 1.0 / (self.model.num_interval - 1)
for t in range(num_step):
vel_list.append(velocity * (t + 1))
r = 1.0
velocity2 = np.zeros(shape=(num_dir ** 2, 3), dtype=np.float32)
index = 0
for i_theta in theta:
for i_omega in omega:
velocity2[index, 0] = r * np.sin(i_theta) * np.cos(i_omega)
velocity2[index, 1] = r * np.sin(i_theta) * np.sin(i_omega)
velocity2[index, 2] = r * np.cos(i_theta)
index = index + 1
vel_list.append(velocity2)
vel_list = np.concatenate(vel_list, axis=0)
M = self.model.construct_motion_matrix(tf.cast(velocity * interval_length, tf.float32), reuse=tf.AUTO_REUSE)
M2 = self.model.construct_motion_matrix(tf.cast(velocity2 * interval_length, tf.float32), reuse=tf.AUTO_REUSE)
place_max = tf.zeros(shape=(1, len(vel_list)))
grid_code = tf.tile(tf.expand_dims(grid_start, axis=0), [num_vel, 1])
grid_next_pool = []
for t in range(num_step):
grid_code = self.model.motion_model(M, grid_code)
grid_next_pool.append(grid_code)
grid_code = tf.tile(tf.expand_dims(grid_start, axis=0), [num_vel, 1])
grid_code = self.model.motion_model(M2, grid_code)
grid_next_pool.append(grid_code)
self.grid_next_pool = tf.concat(grid_next_pool, axis=0)
grid_code_list = tf.expand_dims(self.grid_next_pool, axis=0)
def cond(step, grid_current, place_seq, place_seq_point, place_max, grid_code_list):
return tf.logical_and(step < self.max_step,
tf.sqrt(tf.reduce_sum((tf.to_float(place_seq_point[-1] - self.target)) ** 2)) > self.max_err)
def body(step, grid_current, place_seq, place_seq_point, place_max, grid_code_list):
# grid_current = self.model.get_grid_code(place_seq_point[-1])
grid_code = tf.tile(tf.expand_dims(grid_current, axis=0), [num_vel, 1])
grid_next_pool = []
for t in range(num_step):
grid_code = self.model.motion_model(M, grid_code)
grid_next_pool.append(grid_code)
grid_code = tf.tile(tf.expand_dims(grid_current, axis=0), [num_vel, 1])
grid_code = self.model.motion_model(M2, grid_code)
grid_next_pool.append(grid_code)
grid_next_pool = tf.concat(grid_next_pool, axis=0)
grid_code_list = tf.concat((grid_code_list, tf.expand_dims(grid_next_pool, axis=0)), axis=0)
direction_pool = tf.reduce_sum(grid_target * grid_next_pool, axis=1)
place_next_pool, _ = self.model.localization_model(self.model.weights_A, grid_next_pool, self.model.grid_cell_dim)
p_max = tf.reduce_max(tf.reshape(place_next_pool, [-1, self.model.place_dim]), axis=1)
g_max = tf.reduce_max(grid_next_pool, axis=1)
mask = p_max > 0.5
place_max = tf.concat([place_max, tf.expand_dims(p_max, axis=0)], axis=0)
grid_next_pool, direction_pool = tf.boolean_mask(grid_next_pool, mask), tf.boolean_mask(direction_pool, mask)
vel_pool = tf.boolean_mask(vel_list, mask)
pick_idx = tf.argmax(direction_pool)
grid_current = grid_next_pool[pick_idx]
place_predict, _ = self.model.localization_model(self.model.weights_A, grid_current, self.model.grid_cell_dim)
# place_point_predict = tf.cast(place_point_predict, tf.float32)
place_pt = place_seq_point[-1] + tf.cast(vel_pool[pick_idx], tf.float32)
place_seq = tf.concat([place_seq, tf.expand_dims(place_predict, axis=0)], axis=0)
place_seq_point = tf.concat([place_seq_point, tf.expand_dims(place_pt, axis=0)], axis=0)
return tf.add(step, 1), grid_current, place_seq, place_seq_point, place_max, grid_code_list
_, self.grid_current, place_seq, place_seq_point, self.place_max, self.grid_code_list = tf.while_loop(cond, body, [step, grid_start, place_seq, place_seq_point, place_max, grid_code_list],
shape_invariants=[step.get_shape(), grid_start.get_shape(),
tf.TensorShape([None, self.model.num_interval, self.model.num_interval, self.model.num_interval]),
tf.TensorShape([None, 3]),
tf.TensorShape([None, num_vel * (num_step + 1)]),
tf.TensorShape([None, num_vel * (num_step + 1), self.model.grid_cell_dim])])
self.place_seq, self.place_seq_point = place_seq, place_seq_point
def perform_path_planning(planning_model, sess, num_test=1000, max_step=40,
output_dir=None, test_dir_name='test_path_planning'):
output_dir = os.path.join(output_dir, test_dir_name)
if tf.gfile.Exists(output_dir):
tf.gfile.DeleteRecursively(output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
success = 0
success_step = 0
nbin = 4
nvel = np.zeros(shape=nbin+1)
count = np.zeros(shape=nbin+1)
a = 0.1
for tt in range(num_test):
# Sample destination and starting point
target_value = np.random.choice(planning_model.model.num_interval - 4, [100, 3]) + 2
start_value = np.random.choice(planning_model.model.num_interval - 4, [100, 3]) + 2
select_idx = np.where(np.sqrt(np.sum((target_value - start_value) ** 2, axis=1)) > 20)
target_value, start_value = target_value[select_idx[0][0]], start_value[select_idx[0][0]]
# Do path planning
feed_dict = {planning_model.start: start_value, planning_model.target: target_value}
place_seq_value, place_seq_point_value, grid_next_pool, grid_list = sess.run([planning_model.place_seq, planning_model.place_seq_point, planning_model.grid_next_pool, planning_model.grid_code_list], feed_dict=feed_dict)
if len(place_seq_value) < max_step:
success = success + 1
success_step = success_step + len(place_seq_value)
vel_seq = np.diff(place_seq_point_value, axis=0)
vel_seq = np.sqrt(np.sum(np.square(vel_seq), axis=1))
nseq = len(vel_seq)
bin_sz = int(np.floor(nseq / nbin))
for i in range(nbin):
nvel[i] = nvel[i] + np.sum(vel_seq[i * bin_sz: max((i+1) * bin_sz, nseq)])
count[i] = count[i] + max((i+1) * bin_sz, nseq) - i * bin_sz
nvel[-1] = nvel[-1] + vel_seq[nseq-1]
count[-1] = count[-1] + 1
if tt < 100:
draw_3D_path_to_target(planning_model.model.num_interval, place_seq_point_value, target_value)
plt.savefig(os.path.join(output_dir, 'test%02d.png' % tt))
plt.close()
nvel = nvel / count
success_pro = success / float(num_test)
success_step = success_step / float(success)
print(nvel)
print('Proportion of success %02f, average success step %02f' % (success_pro, success_step))
return success_pro, success_step
def main(_):
parser = argparse.ArgumentParser()
# training parameters
parser.add_argument('--lr', type=float, default=0.05, help='Initial learning rate for descriptor')
parser.add_argument('--beta1', type=float, default=0.9, help='Beta1 in Adam optimizer')
# simulated data parameters
parser.add_argument('--to_use_3D_map', type=bool, default=True, help='to use 3D map or not')
parser.add_argument('--place_size', type=float, default=1.0, help='Size of the square place')
parser.add_argument('--max_vel1', type=float, default=39, help='maximum of velocity in loss1')
parser.add_argument('--min_vel1', type=float, default=1, help='minimum of velocity in loss1')
parser.add_argument('--max_vel2', type=float, default=3, help='maximum of velocity in loss2')
parser.add_argument('--min_vel2', type=float, default=1, help='minimum of velocity in loss2')
parser.add_argument('--sigma', metavar='N', type=float, nargs='+', default=[0.3], help='sd of gaussian kernel')
parser.add_argument('--num_data', type=int, default=30000, help='Number of simulated data points')
# model parameters
parser.add_argument('--place_dim', type=int, default=64000, help='Dimensions of place, should be N^3 or N^2')
parser.add_argument('--num_group', type=int, default=8, help='Number of groups of grid cells')
parser.add_argument('--block_size', type=int, default=8, help='Size of each block')
parser.add_argument('--lamda', type=float, default=0.1, help='Hyper parameter to balance two loss terms')
parser.add_argument('--lamda2', type=float, default=1, help='Hyper parameter to balance two loss terms')
parser.add_argument('--motion_type', type=str, default='continuous', help='True if in testing mode')
parser.add_argument('--num_step', type=int, default=1, help='Number of steps in path integral')
parser.add_argument('--GandE', type=float, default=0, help='1: Gaussian kernel; 0: Exponential kernel')
parser.add_argument('--save_memory', type=bool, default=True, help='True if in testing mode')
# planning parameters
parser.add_argument('--num_test', type=int, default=30, help='Maximum number of steps')
parser.add_argument('--num_dir', type=int, default=90, help='number of directions to search')
parser.add_argument('--planning_step', type=int, default=1, help='Maximum number of steps') #10
parser.add_argument('--max_step', type=int, default=80, help='Maximum number of steps')
parser.add_argument('--max_err', type=float, default=None, help='')
# utils
parser.add_argument('--training_output_dir', type=str, default='training_result',
help='Checkpoint path to load the model')
parser.add_argument('--testing_output_dir', type=str, default='testing_result',
help='The output directory for saving testing results')
parser.add_argument('--ckpt', type=str, default='model.ckpt-7999', help='Checkpoint name to load')
parser.add_argument('--M_file', type=str, default='M.npy', help='Estimated M DILE')
parser.add_argument('--test_dir_name', type=str, default='test_path_planning', help='name of folder for output')
parser.add_argument('--gpu', type=str, default='0', help='Which gpu to use')
FLAGS = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu
model = GridCell_multidir_3d(FLAGS)
planning_model = Path_planning_3D(model, FLAGS.max_step)
planning_model.path_planning(FLAGS.planning_step, FLAGS.num_dir)
with tf.Session() as sess:
ckpt_file = os.path.join(FLAGS.training_output_dir, 'exp_model', FLAGS.ckpt)
# Load checkpoint
assert FLAGS.ckpt is not None, 'no checkpoint provided.'
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
print('Loading checkpoint {}.'.format(ckpt_file))
saver.restore(sess, ckpt_file)
print("Testing.... please check folder %s " % os.path.join(FLAGS.testing_output_dir, FLAGS.test_dir_name))
perform_path_planning(planning_model, sess, num_test=FLAGS.num_test, max_step=FLAGS.max_step,
output_dir=FLAGS.testing_output_dir, test_dir_name=FLAGS.test_dir_name)
if __name__ == '__main__':
tf.app.run()
``` |
{
"source": "Jianwen-Xu/NLoN-PY",
"score": 3
} |
#### File: NLoN-PY/nlon_py/model.py
```python
from time import time
from joblib.logger import PrintTime
import matplotlib.pyplot as plt
import numpy as np
from numpy.core.fromnumeric import shape
import pandas as pd
import seaborn as sns
# explicitly require this experimental feature
from sklearn.experimental import enable_halving_search_cv
from sklearn.feature_selection import SelectKBest, chi2, f_classif
from sklearn.metrics import (ConfusionMatrixDisplay, auc,
classification_report, confusion_matrix, f1_score,
plot_roc_curve, roc_auc_score, roc_curve)
from sklearn.model_selection import (HalvingGridSearchCV,
StratifiedShuffleSplit, cross_val_score,
cross_validate, train_test_split)
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from glmnet import LogitNet
from xgboost import XGBClassifier
from nlon_py.data.make_data import get_category_dict
from nlon_py.features import NLoNFeatures
names = ["Naive Bayes", "Nearest Neighbors", "SVM", "glmnet", "XGB"]
classifiers = [GaussianNB(),
KNeighborsClassifier(),
SVC(kernel='rbf', gamma=0.01, C=10, probability=True, random_state=0),
LogitNet(),
XGBClassifier()]
dict_name_classifier = dict(zip(names, classifiers))
def NLoNModel(X, y, features='C3_FE', model_name='SVM', stand=True, kbest=True, n_classes=7):
if model_name in dict_name_classifier:
clf = dict_name_classifier[model_name]
else:
raise RuntimeError('Param model_name should be in ' + names.__str__())
# X_train, X_test, y_train, y_test = train_test_split(
# X, y, test_size=0.4, random_state=0, stratify=y)
pipeline_steps = []
if stand:
stand_scaler = StandardScaler()
pipeline_steps.append(('standardscaler', stand_scaler))
if kbest:
anova_filter = SelectKBest(f_classif, k=100)
pipeline_steps.append(('selectkbest', anova_filter))
pipeline_steps.append(('clf', clf))
nlon_clf = Pipeline(steps=pipeline_steps)
# print(nlon_clf)
# X_train = NLoNFeatures.fit_transform(X_train, feature_type=features)
# nlon_clf.fit(X_train, y_train)
# X_test = NLoNFeatures.transform(X_test, feature_type=features)
# score = nlon_clf.score(X_test, y_test)
# y_pred = nlon_clf.predict(X_test)
# y_score = nlon_clf.predict_proba(X_test)
# f1 = f1_score(y_test, y_pred, average='macro')
# if n_classes > 2:
# auc = roc_auc_score(y_test, y_score, multi_class='ovr')
# else:
# auc = roc_auc_score(y_test,y_score[:,1])
# print(f'{model_name}: {score:.2f} accuracy')
# print(f'F1: {f1:.3f}, AUC: {auc:.3f}')
# if n_classes > 2:
# plot_multiclass_roc(nlon_clf, X_test, y_test, n_classes)
# else:
# X = NLoNFeatures.transform(X, feature_type=features)
# plot_twoclass_roc(nlon_clf, X, y, cv=10)
# plot_confusion_matrix(y_test, y_pred, n_classes)
return nlon_clf
def SearchParams_SVM(X, y):
C_range = [0.01, 0.1, 1, 10, 100]
Gamma = [1e-2, 1e-3, 1e-4]
param_grid = [{'svc__kernel': ['rbf'], 'svc__gamma': Gamma, 'svc__C': C_range},
{'svc__kernel': ['linear'], 'svc__C': C_range}]
stand_scaler = StandardScaler()
anova_filter = SelectKBest(f_classif, k=100)
clf = SVC(probability=True)
svm_pipeline = make_pipeline(stand_scaler, anova_filter, clf)
search = HalvingGridSearchCV(
svm_pipeline, param_grid, random_state=0, scoring='roc_auc_ovr')
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=0)
X_train = NLoNFeatures.fit_transform(X_train)
search.fit(X_train, y_train)
print("Best parameters set found on development set:")
print(search.best_params_)
print()
print("Details of parameters:")
means = search.cv_results_['mean_test_score']
stds = search.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, search.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
X_test = NLoNFeatures.transform(X_test)
y_pred = search.predict(X_test)
y_score = search.predict_proba(X_test)
f1 = f1_score(y_test, y_pred, average='weighted')
auc = roc_auc_score(y_test, y_score, multi_class='ovr')
print(
f'Best parameters {search.best_params_} with AUC {auc:.3f}, F1 {f1:.3f}')
def CompareModels(X, y, cv=None):
anova_filter = SelectKBest(f_classif, k=100)
if cv is None:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=0)
X_train = NLoNFeatures.fit_transform(X_train)
X_test = NLoNFeatures.transform(X_test)
for key, clf in dict_name_classifier.items():
nlon_clf = make_pipeline(anova_filter, clf)
t0 = time()
print(f'{key} start...')
nlon_clf.fit(X_train, y_train)
print(f'{key} train done in {(time() - t0):0.3f}s')
y_pred = nlon_clf.predict(X_test)
y_score = nlon_clf.predict_proba(X_test)
f1 = f1_score(y_test, y_pred, average='weighted')
auc = roc_auc_score(y_test, y_score, multi_class='ovr')
print(f"{key} test done in {(time() - t0):0.3f}s")
print(f'{key}: AUC {auc:.3f}, F1 {f1:.3f}')
print()
else:
X = NLoNFeatures.transform(X)
for key, clf in dict_name_classifier.items():
nlon_clf = make_pipeline(anova_filter, clf)
scores = cross_validate(
nlon_clf, X, y, cv=cv, scoring=('roc_auc_ovr', 'f1_micro'))
auc = scores['test_roc_auc_ovr']
f1 = scores['test_f1_micro']
fit_time = scores['fit_time']
score_time = scores['score_time']
print()
print(f'{key}')
print(f'AUC: mean {auc.mean():.3f}, std {auc.std():.3f}')
print(f'F1: mean {f1.mean():.3f}, std {f1.std():.3f}')
print(
f'Fit time: {fit_time.mean():.3f}, Score time: {score_time.mean():.3f}')
print()
def ValidateModel(model, X, y, isOri=False, feature_type='C3_FE', cv=10):
X = NLoNFeatures.transform(X, feature_type)
if isOri:
scores = cross_validate(
model, X, y, cv=cv, scoring=('roc_auc','precision','recall','f1_macro'))
auc = scores['test_roc_auc']
prc=scores['test_precision']
rec=scores['test_recall']
f1 = scores['test_f1_macro']
else:
scores = cross_validate(
model, X, y, cv=cv, scoring=('roc_auc_ovr','precision_macro','recall_macro','f1_macro'))
auc = scores['test_roc_auc_ovr']
prc=scores['test_precision_macro']
rec=scores['test_recall_macro']
f1 = scores['test_f1_macro']
# print('10-fold cross-validation')
print(f'AUC: mean {auc.mean():.3f}, std {auc.std():.3f}')
print(f'F1: mean {f1.mean():.3f}, std {f1.std():.3f}')
# print(f'precision: mean {prc.mean():.3f}, std {prc.std():.3f}')
# print(f'recall: mean {rec.mean():.3f}, std {rec.std():.3f}')
def NLoNPredict(clf, X, features='C3_FE'):
result = clf.predict(NLoNFeatures.transform(X, feature_type=features))
category_dict = get_category_dict()
result = np.vectorize(category_dict.get)(result)
return list(result)
def plot_multiclass_roc(clf, X_test, y_test, n_classes, figsize=(11, 7)):
y_score = clf.decision_function(X_test)
# structures
fpr = dict()
tpr = dict()
roc_auc = dict()
# calculate dummies once
y_test_dummies = pd.get_dummies(y_test, drop_first=False).values
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_dummies[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# roc for each class
fig, ax = plt.subplots(figsize=figsize)
ax.plot([0, 1], [0, 1], 'k--')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic (ROC)')
category_dict = get_category_dict()
for i in range(n_classes):
ax.plot(
fpr[i], tpr[i], label=f'ROC curve (AUC = {roc_auc[i]:.2f}) for label {category_dict[i+1]}')
ax.legend(loc="best")
ax.grid(alpha=.4)
sns.despine()
plt.show()
plt.savefig('roc_curve_multi.png')
def plot_twoclass_roc(clf, X, y, cv=None):
if cv is None:
plot_roc_curve(clf, X, y, name='NLoN for two class')
plt.show()
plt.savefig('roc_curve.png')
else:
cv = StratifiedShuffleSplit(n_splits=cv, random_state=0)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
fig, ax = plt.subplots()
for i, (train_index, test_index) in enumerate(cv.split(X, y)):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf.fit(X_train, y_train)
viz = plot_roc_curve(
clf, X_test, y_test, name='ROC fold {}'.format(i), alpha=0.3, lw=1, ax=ax)
interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(viz.roc_auc)
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (
mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title="Receiver operating characteristic (ROC)")
ax.legend(loc="lower right")
plt.show()
plt.savefig('roc_curve_cv.png')
def plot_confusion_matrix(y_test, y_pred, n_classes):
category_dict = get_category_dict()
labels_name = [v for k, v in category_dict.items() if k <= n_classes]
cm = confusion_matrix(y_test, y_pred)
cm_display = ConfusionMatrixDisplay(cm,display_labels=labels_name)
cm_display.plot()
plt.show()
plt.savefig('confusion_matrix_multi.png')
print('confusion_matrix printed.')
``` |
{
"source": "Jianx-Gao/OpenMMLab-Edu",
"score": 2
} |
#### File: OpenMMLab-Edu/demo/det_demo.py
```python
from base import *
from MMEdu import MMDetection
def only_infer_demo():
img = 'car_plate.png'
model = MMDetection(backbone="FasterRCNN", dataset_path='../dataset/det/coco')
model.inference(infer_data=img, show=True, rpn_threshold=0.7, rcnn_threshold=0.7)
def continue_train_demo():
model = MMDetection(backbone='FasterRCNN')
model.num_classes = 1
model.load_dataset(path='../dataset/det/coco')
model.save_fold = "../checkpoints/det_model/plate"
model.train(epochs=3, checkpoint='../checkpoints/det_model/plate/latest.pth', validate=True, Frozen_stages=1)
def normal_train_demo():
model = MMDetection(backbone='FasterRCNN')
model.num_classes = 1
model.load_dataset(path='../dataset/det/coco')
model.save_fold = "../checkpoints/det_model/plate"
model.train(epochs=100, validate=True, Frozen_stages=1)
if __name__ == "__main__":
only_infer_demo()
# continue_train_demo()
# normal_train_demo()
```
#### File: OpenMMLab-Edu/demo/gen_singan.py
```python
from base import *
from MMEdu import MMGeneration
def only_infer_demo():
img = 'demo/balloons.png'
model = MMGeneration(backbone="SinGAN")
model.inference(is_trained=False, infer_data=img, save_path = "../results/gen_result.jpg")
def normal_train_demo():
model = MMGeneration(backbone='SinGAN')
model.load_dataset(path='../dataset/gen/balloons.png')
model.save_fold = "../checkpoints/gen"
model.train(epoch=90, validate=True, inverse=True)
# model.inference(is_trained=True,
# pretrain_model = 'checkpoints/gen_model/ckpt/shoes2edges/latest.pth',
# infer_data= 'demo/184_AB.jpg',
# save_path = "results/gen_result.jpg")
def continue_train_demo():
model = MMGeneration(backbone='SinGAN')
model.load_dataset(path='balloons.png')
model.save_fold = "../checkpoints/gen"
model.train(epoch=15, checkpoint='../checkpoints/gen/singan_balloons_20210406_191047-8fcd94cf.pth', validate=True, inverse=True)
if __name__ == "__main__":
only_infer_demo()
# normal_train_demo()
# continue_train_demo()
```
#### File: OpenMMLab-Edu/demo/seg_demo.py
```python
from MMEdu import MMSegmentation
def only_infer_demo():
img = r'Image_11L.png'
model = MMSegmentation(backbone='UNet')
result = model.inference(image=img)
print(result)
def simple_train_demo():
model = MMSegmentation()
model.load_dataset(path='data/')
model.train(epochs=60000, validate=False)
# 以下代码可测试训练出的模型的效果
#model.inference(is_trained=True, image=r'Image_11L.png')
def normal_train_demo():
model = MMSegmentation(backbone='UNet')
model.num_classes = 19
model.save = 'new_checkpoints/'
model.load_dataset(path='data/')
model.train(epochs=40000, validate=False)
# 以下代码可测试训练出的模型的效果
# model.inference(is_trained=True, pretrain_model='new_checkpoints/latest.pth', image='fruit_dataset/test/banana_test_1.jpg')
def continue_train_demo():
model = MMSegmentation(backbone='UNet')
model.num_classes = 19
model.save = 'new_checkpoints/'
model.load_dataset(path='')
model.train(epochs=10, validate=False, checkpoint='checkpoints/latest.pth')
if __name__ == "__main__":
# only_infer_demo()
simple_train_demo()
# normal_train_demo()
# continue_train_demo()
``` |
{
"source": "Jianxiang-Wang/Pytorch-code-for-classification-of-public-dataset-or-custom-dataset",
"score": 2
} |
#### File: Jianxiang-Wang/Pytorch-code-for-classification-of-public-dataset-or-custom-dataset/dataset.py
```python
import torch
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
#using own dataset or not
own_dataset = False
train_data_folder = './Dataset/train/'
test_data_folder = './Dataset/test/'
val_data_folder = './Dataset/val/'
resize_weight = 32
resize_height = 32
# Data_transform
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.Resize((resize_weight,resize_height)),
#transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.Grayscale(1), #for grayscale
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
transforms.Normalize((0.4914,), (0.2023,))
])
transform_test = transforms.Compose([
transforms.Resize((resize_weight,resize_height)),
transforms.Grayscale(1), #for grayscale
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
transforms.Normalize((0.4914, ), (0.2023,))
])
transform_val = transforms.Compose([
transforms.Resize((resize_weight,resize_height)),
transforms.Grayscale(1), #for grayscale
transforms.ToTensor(),
#transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
transforms.Normalize((0.4914, ), (0.2023,))
])
def load():
if (own_dataset):
print('using custom dataset')
trainset = ImageFolder(train_data_folder, transform_train)
testset = ImageFolder(test_data_folder, transform_test)
valset = ImageFolder(val_data_folder, transform_val)
trainloader = DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testloader = DataLoader(testset, batch_size=128, shuffle=False, num_workers=2)
valloader = DataLoader(valset, batch_size=128, shuffle=False, num_workers=2)
else:
print('using public dataset')
trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
valset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_val)
trainloader = DataLoader(trainset, batch_size=256, shuffle=True, num_workers=2)
testloader = DataLoader(testset, batch_size=256, shuffle=False, num_workers=2)
valloader = DataLoader(valset, batch_size=256, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
return trainloader, testloader, valloader, classes
``` |
{
"source": "Jianxiang-Wang/Pytorch-code-for-time-series-classification",
"score": 3
} |
#### File: Jianxiang-Wang/Pytorch-code-for-time-series-classification/validation.py
```python
from sklearn import metrics
import torch
from models import *
import torch.backends.cudnn as cudnn
import seaborn as sns
import matplotlib.pyplot as plt
from dataset import load
#define the net
device = 'cuda' if torch.cuda.is_available() else 'cpu'
net = LSTM(3, 10, 2, 3)
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
net.load_state_dict(torch.load('./checkpoint/ckpt.pth'))
net = net.module
#loading data
_, _, valloader, classes = load()
def validation():
print(net.classifier)
#print(net)
net.eval()
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(valloader):
inputs, targets = inputs.to(device).float(), targets.to(device)
inputs = inputs.view(-1,300,3)
outputs = net(inputs)
# Confusion Matrix
print("Confusion Matrix...")
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
Accuracy = 100.*correct/total
predicted = predicted.cpu().numpy()
targets = targets.data.cpu().numpy()
cm = metrics.confusion_matrix(targets, predicted)
print(cm)
print('Accuracy=',Accuracy,"%")
figure = plt.figure(figsize=(8, 8))
sns.heatmap(cm, annot=True, cmap='Blues')
plt.ylim(0, 10)
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.show()
if __name__=='__main__':
validation()
``` |
{
"source": "jianxiaoguo/controller",
"score": 2
} |
#### File: apps_extra/social_core/actions.py
```python
from urllib.parse import quote
from social_core.utils import sanitize_redirect, user_is_authenticated, \
user_is_active, partial_pipeline_data, setting_url
def do_auth(backend, redirect_name='next'):
# Save any defined next value into session
data = backend.strategy.request_data(merge=False)
# Save extra data into session.
for field_name in backend.setting('FIELDS_STORED_IN_SESSION', []):
if field_name in data:
backend.strategy.session_set(field_name, data[field_name])
else:
backend.strategy.session_set(field_name, None)
# uri = None
if redirect_name in data:
# Check and sanitize a user-defined GET/POST next field value
redirect_uri = data[redirect_name]
if backend.setting('SANITIZE_REDIRECTS', True):
allowed_hosts = backend.setting('ALLOWED_REDIRECT_HOSTS', []) + \
[backend.strategy.request_host()]
redirect_uri = sanitize_redirect(allowed_hosts, redirect_uri)
backend.strategy.session_set(
redirect_name,
redirect_uri or backend.setting('LOGIN_REDIRECT_URL')
)
response = backend.start()
url = response.url.split('?')[1]
def form2json(form_data):
from urllib.parse import parse_qs, urlparse
query = urlparse('?' + form_data).query
params = parse_qs(query)
return {key: params[key][0] for key in params}
from django.core.cache import cache
cache.set("oidc_key_" + data.get('key', ''), form2json(url).get('state'), 60 * 10)
return response
def do_complete(backend, login, user=None, redirect_name='next',
*args, **kwargs):
data = backend.strategy.request_data()
is_authenticated = user_is_authenticated(user)
user = user if is_authenticated else None
partial = partial_pipeline_data(backend, user, *args, **kwargs)
if partial:
user = backend.continue_pipeline(partial)
# clean partial data after usage
backend.strategy.clean_partial_pipeline(partial.token)
else:
user = backend.complete(user=user, *args, **kwargs)
# pop redirect value before the session is trashed on login(), but after
# the pipeline so that the pipeline can change the redirect if needed
redirect_value = backend.strategy.session_get(redirect_name, '') or \
data.get(redirect_name, '')
# check if the output value is something else than a user and just
# return it to the client
user_model = backend.strategy.storage.user.user_model()
if user and not isinstance(user, user_model):
return user
if is_authenticated:
if not user:
url = setting_url(backend, redirect_value, 'LOGIN_REDIRECT_URL')
else:
url = setting_url(backend, redirect_value,
'NEW_ASSOCIATION_REDIRECT_URL',
'LOGIN_REDIRECT_URL')
elif user:
if user_is_active(user):
# catch is_new/social_user in case login() resets the instance
is_new = getattr(user, 'is_new', False)
social_user = user.social_user
login(backend, user, social_user)
# store last login backend name in session
backend.strategy.session_set('social_auth_last_login_backend',
social_user.provider)
if is_new:
url = setting_url(backend,
'NEW_USER_REDIRECT_URL',
redirect_value,
'LOGIN_REDIRECT_URL')
else:
url = setting_url(backend, redirect_value,
'LOGIN_REDIRECT_URL')
else:
if backend.setting('INACTIVE_USER_LOGIN', False):
social_user = user.social_user
login(backend, user, social_user)
url = setting_url(backend, 'INACTIVE_USER_URL', 'LOGIN_ERROR_URL',
'LOGIN_URL')
else:
url = setting_url(backend, 'LOGIN_ERROR_URL', 'LOGIN_URL')
if redirect_value and redirect_value != url:
redirect_value = quote(redirect_value)
url += ('&' if '?' in url else '?') + \
'{0}={1}'.format(redirect_name, redirect_value)
if backend.setting('SANITIZE_REDIRECTS', True):
allowed_hosts = backend.setting('ALLOWED_REDIRECT_HOSTS', []) + \
[backend.strategy.request_host()]
url = sanitize_redirect(allowed_hosts, url) or \
backend.setting('LOGIN_REDIRECT_URL')
response = backend.strategy.redirect(url)
social_auth = user.social_auth.filter(provider='drycc').\
order_by('-modified').last()
response.set_cookie("name", user.username,
max_age=social_auth.extra_data.get('expires_in'))
response.set_cookie("id_token", social_auth.extra_data.get('id_token'),
max_age=social_auth.extra_data.get('expires_in'))
from django.core.cache import cache
cache.set("oidc_state_" + data.get('state'),
{'token': social_auth.extra_data.get('id_token', 'fail'),
'username': user.username},
60 * 10)
return response
```
#### File: rootfs/api/authentication.py
```python
import logging
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.utils.translation import gettext_lazy as _
from rest_framework import authentication
from rest_framework.authentication import TokenAuthentication, \
get_authorization_header
from rest_framework import exceptions
from api.oauth import OAuthManager
logger = logging.getLogger(__name__)
class AnonymousAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
"""
Authenticate the request for anyone!
"""
return AnonymousUser(), None
class DryccAuthentication(TokenAuthentication):
def authenticate(self, request):
if 'Drycc' in request.META.get('HTTP_USER_AGENT', ''):
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != self.keyword.lower().encode():
return None
if len(auth) == 1:
msg = _('Invalid token header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid token header. Token string should not contain spaces.') # noqa
raise exceptions.AuthenticationFailed(msg)
try:
token = auth[1].decode()
except UnicodeError:
msg = _('Invalid token header. Token string should not contain invalid characters.') # noqa
raise exceptions.AuthenticationFailed(msg)
return cache.get_or_set(
token, lambda: self._get_user(token), settings.OAUTH_CACHE_USER_TIME), None # noqa
return super(DryccAuthentication, self).authenticate(request) # noqa
@staticmethod
def _get_user(key):
from api import serializers
try:
user_info = OAuthManager().get_user_by_token(key)
if not user_info.get('email'):
user_info['email'] = OAuthManager().get_email_by_token(key)
return serializers.UserSerializer.update_or_create(user_info)
except Exception as e:
logger.info(e)
raise exceptions.AuthenticationFailed(_('Verify token fail.'))
```
#### File: rootfs/api/permissions.py
```python
import base64
from rest_framework import permissions
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from api import manager
from api.models import Blocklist, App
def get_app_status(app):
blocklist = Blocklist.get_blocklist(app)
if blocklist:
return False, blocklist.remark
if settings.WORKFLOW_MANAGER_URL is not None:
status = manager.User().get_status(app.owner.pk)
if not status["is_active"]:
return False, status["message"]
return True, None
def has_app_permission(request, obj):
if isinstance(obj, App) or hasattr(obj, 'app'):
app = obj if isinstance(obj, App) else obj.app
is_ok, message = get_app_status(app)
if is_ok:
if request.user.is_superuser:
return True, None
elif app.owner == request.user:
return True, None
elif request.user.is_staff or request.user.has_perm('use_app', app):
if request.method != 'DELETE':
return True, None
else:
return False, "User does not have permission to delete"
else:
return is_ok, message
return False, "App object does not exist or does not have permission."
class IsAnonymous(permissions.BasePermission):
"""
View permission to allow anonymous users.
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return type(request.user) is AnonymousUser
class IsOwner(permissions.BasePermission):
"""
Object-level permission to allow only owners of an object to access it.
Assumes the model instance has an `owner` attribute.
"""
def has_object_permission(self, request, view, obj):
if hasattr(obj, 'owner'):
return obj.owner == request.user
else:
return False
class IsOwnerOrAdmin(permissions.BasePermission):
"""
Object-level permission to allow only owners of an object or administrators to access it.
Assumes the model instance has an `owner` attribute.
"""
def has_object_permission(self, request, view, obj):
if request.user.is_superuser:
return True
if hasattr(obj, 'owner'):
return obj.owner == request.user
else:
return False
class IsAppUser(permissions.BasePermission):
"""
Object-level permission to allow owners or collaborators to access
an app-related model.
"""
def has_object_permission(self, request, view, obj):
return has_app_permission(request, obj)[0]
class IsAdmin(permissions.BasePermission):
"""
View permission to allow only admins.
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return request.user.is_superuser
class IsAdminOrSafeMethod(permissions.BasePermission):
"""
View permission to allow only admins to use unsafe methods
including POST, PUT, DELETE.
This allows
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return request.method in permissions.SAFE_METHODS or request.user.is_superuser
class HasBuilderAuth(permissions.BasePermission):
"""
View permission to allow builder to perform actions
with a special HTTP header
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
auth_header = request.environ.get('HTTP_X_DRYCC_BUILDER_AUTH')
if not auth_header:
return False
return auth_header == settings.BUILDER_KEY
class IsWorkflowManager(permissions.BasePermission):
"""
View permission to allow workflow manager to perform actions
with a special HTTP header
"""
def has_permission(self, request, view):
if request.META.get("HTTP_AUTHORIZATION"):
token = request.META.get(
"HTTP_AUTHORIZATION").split(" ")[1].encode("utf8")
access_key, secret_key = base64.b85decode(token).decode("utf8").split(":")
if settings.WORKFLOW_MANAGER_ACCESS_KEY == access_key:
if settings.WORKFLOW_MANAGER_SECRET_KEY == secret_key:
return True
return False
```
#### File: api/tests/test_domain.py
```python
from unittest import mock
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.conf import settings
from rest_framework.authtoken.models import Token
from api.models import Domain
from api.tests import DryccTestCase
import idna
User = get_user_model()
class DomainTest(DryccTestCase):
"""Tests creation of domains"""
fixtures = ['tests.json']
def setUp(self):
self.user = User.objects.get(username='autotest')
self.token = Token.objects.get(user=self.user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
self.app_id = self.create_app()
def tearDown(self):
# make sure every test has a clean slate for k8s mocking
cache.clear()
def test_response_data(self):
"""Test that the serialized response contains only relevant data."""
app_id = self.create_app()
response = self.client.post(
'/v2/apps/{}/domains'.format(app_id),
{'domain': 'test-domain.example.com'}
)
self.assertEqual(response.status_code, 201, response.data)
for key in response.data:
self.assertIn(key, ['uuid', 'owner', 'created', 'updated', 'app', 'domain'])
expected = {
'owner': self.user.username,
'app': app_id,
'domain': 'test-domain.example.com'
}
self.assertDictContainsSubset(expected, response.data)
def test_strip_dot(self):
"""Test that a dot on the right side of the domain gets stripped"""
domain = 'autotest.127.0.0.1.xip.io.'
msg = "failed on '{}'".format(domain)
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
# Create
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, msg)
# Fetch
domain = 'autotest.127.0.0.1.xip.io' # stripped version
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), domain]),
expected, msg)
def test_manage_idn_domain(self):
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
test_domains = [
'ドメイン.テスト',
'xn--eckwd4c7c.xn--zckzah',
'xn--80ahd1agd.ru',
'домена.ru',
'*.домена.испытание',
'täst.königsgäßchen.de',
'xn--tst-qla.xn--knigsgsschen-lcb0w.de',
'ドメイン.xn--zckzah',
'xn--eckwd4c7c.テスト',
'täst.xn--knigsgsschen-lcb0w.de',
'*.xn--tst-qla.königsgäßchen.de'
]
for domain in test_domains:
msg = "failed on '{}'".format(domain)
# Generate ACE and Unicode variant for domain
if domain.startswith("*."):
ace_domain = "*." + idna.encode(domain[2:]).decode("utf-8", "strict")
unicode_domain = "*." + idna.decode(ace_domain[2:])
else:
ace_domain = idna.encode(domain).decode("utf-8", "strict")
unicode_domain = idna.decode(ace_domain)
# Create
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, msg)
# Fetch
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), ace_domain]),
sorted(expected), msg)
# Verify creation failure for same domain with different encoding
if ace_domain != domain:
response = self.client.post(url, {'domain': ace_domain})
self.assertEqual(response.status_code, 400, msg)
# Verify creation failure for same domain with different encoding
if unicode_domain != domain:
response = self.client.post(url, {'domain': unicode_domain})
self.assertEqual(response.status_code, 400, msg)
# Delete
url = '/v2/apps/{app_id}/domains/{hostname}'.format(hostname=domain,
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204, msg)
# Verify removal
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
self.assertEqual(1, response.data['count'], msg)
# verify only app domain is left
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)],
expected, msg)
# Use different encoding for creating and deleting (ACE)
if ace_domain != domain:
# Create
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, msg)
# Fetch
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), ace_domain]),
sorted(expected), msg)
# Delete
url = '/v2/apps/{app_id}/domains/{hostname}'.format(hostname=ace_domain,
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204, msg)
# Verify removal
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
self.assertEqual(1, response.data['count'], msg)
# verify only app domain is left
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)],
expected, msg)
# Use different encoding for creating and deleting (Unicode)
if unicode_domain != domain:
# Create
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, msg)
# Fetch
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), ace_domain]),
sorted(expected), msg)
# Delete
url = '/v2/apps/{app_id}/domains/{hostname}'.format(hostname=unicode_domain,
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204, msg)
# Verify removal
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
self.assertEqual(1, response.data['count'], msg)
# verify only app domain is left
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)],
expected, msg)
def test_manage_domain(self):
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
test_domains = [
'test-domain.example.com',
'django.paas-sandbox',
'django.paas--sandbox',
'domain',
'not.too.loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong',
'3com.com',
'domain1',
'3333.xyz',
'w3.example.com',
'MYDOMAIN.NET',
'autotest.127.0.0.1.xip.io',
'*.drycc.example.com'
]
for domain in test_domains:
msg = "failed on '{}'".format(domain)
# Create
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, msg)
# Fetch
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
sorted(["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN), domain]),
sorted(expected), msg)
# Delete
url = '/v2/apps/{app_id}/domains/{hostname}'.format(hostname=domain,
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204, msg)
# Verify removal
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
response = self.client.get(url)
self.assertEqual(1, response.data['count'], msg)
# verify only app domain is left
expected = [data['domain'] for data in response.data['results']]
self.assertEqual(
["%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)],
expected, msg)
def test_delete_domain_does_not_exist(self):
"""Remove a domain that does not exist"""
url = '/v2/apps/{app_id}/domains/{domain}'.format(domain='test-domain.example.com',
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
def test_delete_domain_does_not_remove_latest(self):
"""https://github.com/drycc/drycc/issues/3239"""
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
test_domains = [
'test-domain.example.com',
'django.paas-sandbox',
]
for domain in test_domains:
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, response.data)
url = '/v2/apps/{app_id}/domains/{domain}'.format(domain=test_domains[0],
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204, response.data)
with self.assertRaises(Domain.DoesNotExist):
Domain.objects.get(domain=test_domains[0])
def test_delete_domain_does_not_remove_default(self):
domain = "%s.%s" % (self.app_id, settings.PLATFORM_DOMAIN)
url = '/v2/apps/{app_id}/domains/{domain}'.format(domain=domain,
app_id=self.app_id)
response = self.client.delete(url)
self.assertEqual(response.status_code, 403, response.data)
def test_delete_domain_does_not_remove_others(self):
"""https://github.com/drycc/drycc/issues/3475"""
self.test_delete_domain_does_not_remove_latest()
self.assertEqual(Domain.objects.all().count(), 2)
def test_manage_domain_invalid_app(self):
# Create domain
url = '/v2/apps/{app_id}/domains'.format(app_id="this-app-does-not-exist")
response = self.client.post(url, {'domain': 'test-domain.example.com'})
self.assertEqual(response.status_code, 404)
# verify
url = '/v2/apps/{app_id}/domains'.format(app_id='this-app-does-not-exist')
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_manage_domain_invalid_domain(self):
url = '/v2/apps/{app_id}/domains'.format(app_id=self.app_id)
test_domains = [
'this_is_an.invalid.domain',
'this-is-an.invalid.1',
'django.pa--assandbox',
'too.looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong',
'foo.*.bar.com',
'test.local.drycc.cc',
'*',
'a' * 300,
'.'.join(['a'] * 128)
]
for domain in test_domains:
msg = "failed on \"{}\"".format(domain)
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 400, msg)
def test_admin_can_add_domains_to_other_apps(self):
"""If a non-admin user creates an app, an administrator should be able to add
domains to it.
"""
user = User.objects.get(username='autotest2')
token = Token.objects.get(user=user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
app_id = self.create_app()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = '/v2/apps/{}/domains'.format(app_id)
response = self.client.post(url, {'domain': 'example.drycc.example.com'})
self.assertEqual(response.status_code, 201, response.data)
def test_unauthorized_user_cannot_modify_domain(self):
"""
An unauthorized user should not be able to modify other domains.
Since an unauthorized user should not know about the application at all, these
requests should return a 404.
"""
app_id = self.create_app()
unauthorized_user = User.objects.get(username='autotest2')
unauthorized_token = Token.objects.get(user=unauthorized_user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + unauthorized_token)
url = '/v2/apps/{}/domains'.format(app_id)
response = self.client.post(url, {'domain': 'example.com'})
self.assertEqual(response.status_code, 403)
def test_kubernetes_service_failure(self):
"""
Cause an Exception in kubernetes services
"""
app_id = self.create_app()
# scheduler.svc.update exception
with mock.patch('scheduler.resources.service.Service.update'):
domain = 'foo.com'
url = '/v2/apps/{}/domains'.format(app_id)
response = self.client.post(url, {'domain': domain})
self.assertEqual(response.status_code, 201, response.data)
```
#### File: api/tests/test_tags.py
```python
import json
import requests_mock
from django.core.cache import cache
from django.contrib.auth import get_user_model
from rest_framework.authtoken.models import Token
from api.tests import adapter, DryccTransactionTestCase
User = get_user_model()
@requests_mock.Mocker(real_http=True, adapter=adapter)
class TestTags(DryccTransactionTestCase):
"""Tests setting and updating config values"""
fixtures = ['tests.json']
def setUp(self):
self.user = User.objects.get(username='autotest')
self.token = Token.objects.get(user=self.user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
def tearDown(self):
# make sure every test has a clean slate for k8s mocking
cache.clear()
def test_tags(self, mock_requests):
"""
Test that tags can be set on an application
"""
app_id = self.create_app()
# check default
url = '/v2/apps/{app_id}/config'.format(**locals())
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.data)
self.assertIn('tags', response.data)
self.assertEqual(response.data['tags'], {})
# set some tags
body = {'tags': json.dumps({'environ': 'dev'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
tags1 = response.data
# check tags again
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.data)
self.assertIn('tags', response.data)
tags = response.data['tags']
self.assertIn('environ', tags)
self.assertEqual(tags['environ'], 'dev')
# set an additional value
body = {'tags': json.dumps({'rack': '1'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
tags2 = response.data
self.assertNotEqual(tags1['uuid'], tags2['uuid'])
tags = response.data['tags']
self.assertIn('rack', tags)
self.assertEqual(tags['rack'], '1')
self.assertIn('environ', tags)
self.assertEqual(tags['environ'], 'dev')
# read the limit again
response = self.client.get(url)
self.assertEqual(response.status_code, 200, response.data)
tags3 = response.data
self.assertEqual(tags2, tags3)
tags = response.data['tags']
self.assertIn('rack', tags)
self.assertEqual(tags['rack'], '1')
self.assertIn('environ', tags)
self.assertEqual(tags['environ'], 'dev')
# unset a value
body = {'tags': json.dumps({'rack': None})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
tags4 = response.data
self.assertNotEqual(tags3['uuid'], tags4['uuid'])
self.assertNotIn('rack', json.dumps(response.data['tags']))
# set valid values
body = {'tags': json.dumps({'kubernetes.io/hostname': '172.17.8.100'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
body = {'tags': json.dumps({'is.valid': 'is-also_valid'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
body = {'tags': json.dumps({'host.the-name.com/is.valid': 'valid'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 201, response.data)
body = {'tags': json.dumps({'host.the-name.com/does.no.exist': 'valid'})}
response = self.client.post(url, body)
self.assertContains(
response,
'Addition of host.the-name.com/does.no.exist=valid is the cause',
status_code=400
)
# set invalid values
body = {'tags': json.dumps({'valid': 'in\nvalid'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
body = {'tags': json.dumps({'host.name.com/notvalid-': 'valid'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
body = {'tags': json.dumps({'valid': 'invalid.'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
body = {'tags': json.dumps({'host.name.com/,not.valid': 'valid'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
long_tag = 'a' * 300
body = {'tags': json.dumps({'{}/not.valid'.format(long_tag): 'valid'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
body = {'tags': json.dumps({'this&foo.com/not.valid': 'valid'})}
response = self.client.post(url, body)
self.assertEqual(response.status_code, 400, response.data)
# disallow put/patch/delete
response = self.client.put(url)
self.assertEqual(response.status_code, 405, response.data)
response = self.client.patch(url)
self.assertEqual(response.status_code, 405, response.data)
response = self.client.delete(url)
self.assertEqual(response.status_code, 405, response.data)
```
#### File: api/tests/test_workflow_manager.py
```python
import base64
from django.core.cache import cache
from django.conf import settings
from django.contrib.auth import get_user_model
from rest_framework.authtoken.models import Token
from api.tests import adapter, DryccTransactionTestCase
import requests_mock
User = get_user_model()
@requests_mock.Mocker(real_http=True, adapter=adapter)
class ManagerTest(DryccTransactionTestCase):
"""Tests setting and updating config values"""
fixtures = ['tests.json']
def setUp(self):
self.user = User.objects.get(username='autotest')
self.token = Token.objects.get(user=self.user).key
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
self.app_id = self.create_app()
self.user_id = 7
# workflow manager token
token = base64.b85encode(b"%s:%s" % (
settings.WORKFLOW_MANAGER_ACCESS_KEY.encode("utf8"),
settings.WORKFLOW_MANAGER_SECRET_KEY.encode("utf8"),
)).decode("utf8")
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
def tearDown(self):
# Restore default tags to empty string
settings.DRYCC_DEFAULT_CONFIG_TAGS = ''
# make sure every test has a clean slate for k8s mocking
cache.clear()
def test_block(self, mock_requests):
response = self.client.post(
'/v2/manager/{}/{}/block/'.format("users", 7),
data={'remark': 'Arrears blockade'},
)
self.assertEqual(response.status_code, 201)
def test_unblock(self, mock_requests):
response = self.client.post(
'/v2/manager/{}/{}/block/'.format("users", 7),
data={'remark': 'Arrears blockade'},
)
self.assertEqual(response.status_code, 201)
response = self.client.delete(
'/v2/manager/{}/{}/unblock/'.format("users", 7),
)
self.assertEqual(response.status_code, 204)
```
#### File: scheduler/tests/test_pod_resources.py
```python
import unittest
from scheduler.resources.pod import Pod
class TestSchedulerPodResources(unittest.TestCase):
def test_manifest_limits(self):
cpu_cases = [
{"app_type": "web", "cpu": {"cmd": "2"},
"expected": None},
{"app_type": "web", "cpu": {"web": "2"},
"expected": {"limits": {"cpu": "2"}}},
{"app_type": "web", "cpu": {"web": "0/3"},
"expected": {"requests": {"cpu": "0"}, "limits": {"cpu": "3"}}},
{"app_type": "web", "cpu": {"web": "4/5"},
"expected": {"requests": {"cpu": "4"}, "limits": {"cpu": "5"}}},
{"app_type": "web", "cpu": {"web": "400m/500m"},
"expected": {"requests": {"cpu": "400m"}, "limits": {"cpu": "500m"}}},
{"app_type": "web", "cpu": {"web": "0.6/0.7"},
"expected": {"requests": {"cpu": "0.6"}, "limits": {"cpu": "0.7"}}},
]
mem_cases = [
{"app_type": "web", "memory": {"cmd": "2G"},
"expected": None},
{"app_type": "web", "memory": {"web": "200M"},
"expected": {"limits": {"memory": "200Mi"}}},
{"app_type": "web", "memory": {"web": "0/3G"},
"expected": {"requests": {"memory": "0"}, "limits": {"memory": "3Gi"}}},
{"app_type": "web", "memory": {"web": "400M/500MB"},
"expected": {"requests": {"memory": "400Mi"}, "limits": {"memory": "500Mi"}}},
]
for caze in cpu_cases:
manifest = Pod("").manifest("",
"",
"",
app_type=caze["app_type"],
cpu=caze["cpu"])
self._assert_resources(caze, manifest)
for caze in mem_cases:
manifest = Pod("").manifest("",
"",
"",
app_type=caze["app_type"],
memory=caze["memory"])
self._assert_resources(caze, manifest)
def _assert_resources(self, caze, manifest):
resources_parent = manifest["spec"]["containers"][0]
expected = caze["expected"]
if expected:
self.assertEqual(resources_parent["resources"], expected, caze)
else:
self.assertTrue(resources_parent["resources"] == {}, caze)
```
#### File: scheduler/tests/test_pvc.py
```python
from scheduler import KubeHTTPException
from scheduler.tests import TestCase
from scheduler.utils import generate_random_name
class PVCTest(TestCase):
"""Tests scheduler pod calls"""
def create(self, namespace=None, name=generate_random_name(), **kwargs):
"""
Helper function to create and verify a pvc on the namespace
"""
namespace = self.namespace if namespace is None else namespace
# these are all required even if it is kwargs...
kwargs = {
'size': '500M',
'storage_class': 'default'
}
pvc = self.scheduler.pvc.create(namespace, name, **kwargs)
self.assertEqual(pvc.status_code, 201, pvc.json())
return name
def test_create_failure(self):
with self.assertRaises(
KubeHTTPException,
msg='failed to create pvc doesnotexist in Namespace {}: 404 Not Found'.format(self.namespace) # noqa
):
self.create('doesnotexist', 'doesnotexist')
def test_create(self):
self.scheduler.ns.create("test-pvc")
self.create(namespace="test-pvc")
def test_delete_failure(self):
# test failure
with self.assertRaises(
KubeHTTPException,
msg='failed to delete pvc foo in Namespace {}: 404 Not Found'.format(self.namespace) # noqa
):
self.scheduler.pvc.delete(self.namespace, 'foo')
def test_delete(self):
# test success
name = self.create()
response = self.scheduler.pvc.delete(self.namespace, name)
data = response.json()
self.assertEqual(response.status_code, 200, data)
def test_get_pvcs(self):
# test success
name = self.create()
response = self.scheduler.pvc.get(self.namespace)
data = response.json()
self.assertEqual(response.status_code, 200, data)
self.assertIn('items', data)
self.assertEqual(1, len(data['items']), data['items'])
# simple verify of data
self.assertEqual(data['items'][0]['metadata']['name'], name, data)
def test_get_pvc(self):
# test success
name = self.create()
response = self.scheduler.pvc.get(self.namespace, name)
data = response.json()
self.assertEqual(response.status_code, 200, data)
# simple verify of data
self.assertEqual(data['metadata']['name'], name, data)
def test_get_pvcs_failure(self):
# test failure
with self.assertRaises(
KubeHTTPException,
msg='failed to get Pod doesnotexist in Namespace {}: 404 Not Found'.format(self.namespace) # noqa
):
self.scheduler.pvc.get(self.namespace, 'doesnotexist')
``` |
{
"source": "jianxiaoguo/helmbroker",
"score": 2
} |
#### File: rootfs/helmbroker/broker.py
```python
import os
import time
import shutil
from typing import Union, List, Optional
from openbrokerapi.catalog import ServicePlan
from openbrokerapi.errors import ErrInstanceAlreadyExists, ErrAsyncRequired, \
ErrBindingAlreadyExists, ErrBadRequest, ErrInstanceDoesNotExist, \
ServiceException
from openbrokerapi.service_broker import ServiceBroker, Service, \
ProvisionDetails, ProvisionedServiceSpec, ProvisionState, GetBindingSpec, \
BindDetails, Binding, BindState, UnbindDetails, UnbindSpec, \
UpdateDetails, UpdateServiceSpec, DeprovisionDetails, \
DeprovisionServiceSpec, LastOperation, OperationState
from .utils import get_instance_path, get_chart_path, get_plan_path, \
get_addon_path, get_addon_updateable, get_addon_bindable, InstanceLock, \
load_instance_meta, load_binding_meta, dump_instance_meta, \
load_addons_meta
from .tasks import provision, bind, deprovision, update
class HelmServiceBroker(ServiceBroker):
def catalog(self) -> Union[Service, List[Service]]:
services = load_addons_meta()
service_objs = []
for _, addons in services.items():
plans_objs = []
for plan in addons['plans']:
plans_objs.append(ServicePlan(**plan))
addons['plans'] = plans_objs
service_objs.append(Service(**addons))
return service_objs
def provision(self,
instance_id: str,
details: ProvisionDetails,
async_allowed: bool,
**kwargs) -> ProvisionedServiceSpec:
instance_path = get_instance_path(instance_id)
if os.path.exists(instance_path):
raise ErrInstanceAlreadyExists()
if not async_allowed:
raise ErrAsyncRequired()
os.makedirs(instance_path, exist_ok=True)
chart_path, plan_path = (
get_chart_path(instance_id), get_plan_path(instance_id))
addon_chart_path, addon_plan_path = (
get_addon_path(details.service_id, details.plan_id))
shutil.copytree(addon_chart_path, chart_path)
shutil.copytree(addon_plan_path, plan_path)
data = {
"id": instance_id,
"details": {
"service_id": details.service_id,
"plan_id": details.plan_id,
"context": details.context,
"parameters": details.parameters,
},
"last_operation": {
"state": OperationState.IN_PROGRESS.value,
"operation": "provision",
"description": (
"provision %s in progress at %s" % (
instance_id, time.time()))
}
}
with InstanceLock(instance_id):
dump_instance_meta(instance_id, data)
provision.delay(instance_id, details)
return ProvisionedServiceSpec(state=ProvisionState.IS_ASYNC)
def get_binding(self,
instance_id: str,
binding_id: str,
**kwargs
) -> GetBindingSpec:
data = load_binding_meta(instance_id)
return GetBindingSpec(
data["credentials"],
)
def bind(self,
instance_id: str,
binding_id: str,
details: BindDetails,
async_allowed: bool,
**kwargs
) -> Binding:
is_addon_bindable = get_addon_bindable(details.service_id)
if not is_addon_bindable:
raise ErrBadRequest(
msg="Instance %s does not bindable" % instance_id)
instance_meta = load_instance_meta(instance_id)
if not (instance_meta and
instance_meta['last_operation']['state'] == 'succeeded'):
raise ErrBadRequest(
msg="This instance %s is not ready" % instance_id)
instance_path = get_instance_path(instance_id)
if os.path.exists(f'{instance_path}/bind.json'):
raise ErrBindingAlreadyExists()
chart_path, plan_path = (
get_chart_path(instance_id), get_plan_path(instance_id))
shutil.copy(f'{plan_path}/bind.yaml', f'{chart_path}/templates')
bind(instance_id, binding_id, details, async_allowed, **kwargs)
data = load_binding_meta(instance_id)
if data["last_operation"]["state"] == OperationState.SUCCEEDED.value:
return Binding(state=BindState.SUCCESSFUL_BOUND,
credentials=data["credentials"])
else:
raise ServiceException(data["last_operation"]["description"])
def unbind(self,
instance_id: str,
binding_id: str,
details: UnbindDetails,
async_allowed: bool,
**kwargs
) -> UnbindSpec:
instance_path = get_instance_path(instance_id)
binding_info = f'{instance_path}/binding.json'
if os.path.exists(binding_info):
os.remove(binding_info)
return UnbindSpec(is_async=False)
def update(self,
instance_id: str,
details: UpdateDetails,
async_allowed: bool,
**kwargs
) -> UpdateServiceSpec:
instance_path = get_instance_path(instance_id)
if not os.path.exists(instance_path):
raise ErrBadRequest(msg="Instance %s does not exist" % instance_id)
is_plan_updateable = get_addon_updateable(details.service_id)
if not is_plan_updateable:
raise ErrBadRequest(
msg="Instance %s does not updateable" % instance_id)
if not async_allowed:
raise ErrAsyncRequired()
if details.plan_id is not None:
plan_path = get_plan_path(instance_id)
# delete the pre plan
shutil.rmtree(plan_path, ignore_errors=True)
_, addon_plan_path = get_addon_path(
details.service_id, details.plan_id)
# add the new plan
shutil.copytree(addon_plan_path, plan_path)
update.delay(instance_id, details)
return UpdateServiceSpec(is_async=True)
def deprovision(self,
instance_id: str,
details: DeprovisionDetails,
async_allowed: bool,
**kwargs) -> DeprovisionServiceSpec:
if not os.path.exists(get_instance_path(instance_id)):
raise ErrInstanceDoesNotExist()
with InstanceLock(instance_id):
data = load_instance_meta(instance_id)
operation = data["last_operation"]["operation"]
if operation == "provision":
if not async_allowed:
raise ErrAsyncRequired()
deprovision.delay(instance_id)
elif operation == "deprovision":
return DeprovisionServiceSpec(
is_async=True, operation=operation)
return DeprovisionServiceSpec(is_async=True)
def last_operation(self,
instance_id: str,
operation_data: Optional[str],
**kwargs
) -> LastOperation:
data = load_instance_meta(instance_id)
return LastOperation(
OperationState(data["last_operation"]["state"]),
data["last_operation"]["description"]
)
def last_binding_operation(self,
instance_id: str,
binding_id: str,
operation_data: Optional[str],
**kwargs
) -> LastOperation:
data = load_binding_meta(instance_id)
return LastOperation(
OperationState(data["last_operation"]["state"]),
data["last_operation"]["description"]
)
```
#### File: rootfs/helmbroker/loader.py
```python
import os
import shutil
import tarfile
import requests
import yaml
from .config import ADDONS_PATH, CONFIG_PATH
from .utils import dump_addons_meta
def download_file(url, dest):
if not os.path.exists(dest):
os.system(f'mkdir -p {dest}')
filename = url.split('/')[-1]
file = requests.get(url)
with open(f"{dest}/{filename}", 'wb') as f:
f.write(file.content)
if filename.endswith(".yaml") or filename.endswith(".yml"):
return yaml.load(file.content.decode(encoding="utf-8"),
Loader=yaml.Loader)
def read_file(filename):
if not os.path.exists(filename):
return
with open(filename, 'r') as f:
file_content = f.read()
return file_content
def save_file(content, dest, filename):
if not os.path.exists(dest):
os.system(f'mkdir -p {dest}')
with open(f"{dest}/{filename}", 'w') as f:
f.write(content)
def extract_tgz(tgz_file, dest):
if not os.path.exists(tgz_file):
return
if not os.path.exists(dest):
os.system(f'mkdir -p {dest}')
tarobj = tarfile.open(tgz_file, "r:gz")
for tarinfo in tarobj:
tarobj.extract(tarinfo.name, dest)
tarobj.close()
def addons_meta_file():
meta_files = []
# get meta.yaml
for root, dirnames, filenames in os.walk(ADDONS_PATH):
for filename in filenames:
if filename == 'meta.yaml':
meta_files.append(os.path.join(root, filename))
meta_files = [meta_file.split(ADDONS_PATH)[1] for meta_file in meta_files]
addons_meta = []
plans_meta = []
for meta_file in meta_files:
if len(meta_file.split('/')) == 3:
addons_meta.append(meta_file.split('/')[1:])
else:
plans_meta.append(meta_file.split('/')[1:])
addons_dict = {}
for addon_meta in addons_meta:
with open(f'{ADDONS_PATH}/{"/".join(addon_meta)}', 'r') as f:
meta = yaml.load(f.read(), Loader=yaml.Loader)
meta['tags'] = meta.get('tags').split(', ') if meta.get('tags') else [] # noqa
meta['plans'] = []
addons_dict[meta['name']] = meta
for plan_meta in plans_meta:
with open(f'{ADDONS_PATH}/{"/".join(plan_meta)}', 'r') as f:
addons_mata = yaml.load(f.read(), Loader=yaml.Loader)
addons_dict[f'{"-".join(plan_meta[0].split("-")[0:-1])}']['plans'].append(addons_mata) # noqa
dump_addons_meta(addons_dict)
def load_addons(repository):
if not repository:
return
index_name = repository['url'].split('/')[-1]
local_index_file = f'{ADDONS_PATH}/{index_name}'
# download index.yaml
remote_index = requests.get(repository['url']).content.decode(
encoding="utf-8")
# compare index.yaml, is update
local_index = read_file(local_index_file)
if local_index and remote_index == local_index:
return
# delete old repository catalog
if os.path.exists(ADDONS_PATH):
shutil.rmtree(ADDONS_PATH, ignore_errors=True)
else:
os.makedirs(ADDONS_PATH, exist_ok=True)
# new index
save_file(remote_index, ADDONS_PATH, index_name)
remote_index = yaml.load(remote_index, Loader=yaml.Loader)
# save index.yaml addons
for k, v in remote_index.get('entries', {}).items():
for _ in v:
url = "/".join(repository["url"].split("/")[0:-1])
tgz_name = f'{_["name"]}-{_["version"]}'
addon_tgz_url = f'{url}/{tgz_name}.tgz'
download_file(addon_tgz_url, ADDONS_PATH)
extract_tgz(f'{ADDONS_PATH}/{tgz_name}.tgz',
f'{ADDONS_PATH}/{tgz_name}')
addons_meta_file()
if __name__ == '__main__':
with open(f'{CONFIG_PATH}/repositories', 'r') as f:
repositories = yaml.load(f.read(), Loader=yaml.Loader)
load_addons(repositories[0])
``` |
{
"source": "jianxiaoguo/manager",
"score": 2
} |
#### File: management/commands/fee.py
```python
import logging
from django.core.management.base import BaseCommand
from django.db.models import Count
from django.utils.timezone import now
from api.models import Instance, Funding, Message
from api.models.measurement import config_fee, volume_fee, network_fee
from api.utils import date2timestamp
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
end = date2timestamp(now().date())
start = end - 86400
instances = Instance.objects.values_list('cluster_id', 'owner_id', 'app_id'). \
filter(timestamp__gte=start, timestamp__lt=end).order_by('owner_id'). \
annotate(Count('app_id'))
for instance in instances:
config_fee(instance, start, end)
volume_fee(instance, start, end)
network_fee(instance, start, end)
fs = Funding.objects.distinct('owner_id').filter(credit__lt=0).order_by(
'owner_id', '-created').values_list('owner_id', 'credit')
msgs = []
for f in fs:
msg_data = {
'owner_id': f[0],
'code': 2,
'sender': 'system',
'body': 'You are in arrears, please recharge in time.',
}
msg = Message(
**msg_data
)
msgs.append(msg)
Message.objects.bulk_create(msgs)
self.stdout.write("done")
```
#### File: rootfs/api/middleware.py
```python
from api import __version__
class APIVersionMiddleware(object):
"""
Include that REST API version with each response.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
"""
Include the manager's REST API major and minor version in
a response header.
"""
response = self.get_response(request)
# clients shouldn't care about the patch release
version = __version__.rsplit('.', 1)[0]
response['DRYCC_API_VERSION'] = version
response['DRYCC_PLATFORM_VERSION'] = __version__
return response
```
#### File: rootfs/api/pipeline.py
```python
def update_user(backend, user, response, *args, **kwargs):
user.username = response.get('username')
user.email = response.get('email')
user.first_name = response.get('first_name')
user.last_name = response.get('last_name')
user.is_superuser = response.get('is_superuser')
user.is_staff = response.get('is_staff')
user.is_active = response.get('is_active')
user.save()
def load_extra_data(backend, details, response, uid, user, *args, **kwargs):
social = kwargs.get('social') or \
backend.strategy.storage.user.get_social_auth(backend.name, uid)
if social:
extra_data = backend.extra_data(user, uid, response, details,
*args, **kwargs)
social.set_extra_data(extra_data)
```
#### File: api/settings/testing.py
```python
import random
import string
import os
from api.settings.production import * # noqa
from api.settings.production import DATABASES
# A boolean that turns on/off debug mode.
# https://docs.djangoproject.com/en/2.2/ref/settings/#debug
DEBUG = True
# If set to True, Django's normal exception handling of view functions
# will be suppressed, and exceptions will propagate upwards
# https://docs.djangoproject.com/en/2.2/ref/settings/#debug-propagate-exceptions
DEBUG_PROPAGATE_EXCEPTIONS = True
# router information
ROUTER_HOST = 'drycc-router.example.com'
ROUTER_PORT = 80
# randomize test database name so we can run multiple unit tests simultaneously
DATABASES['default']['NAME'] = "unittest-{}".format(''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(8)))
DATABASES['default']['USER'] = 'postgres'
# use DB name to isolate the data for each test run
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': DATABASES['default']['NAME'],
'KEY_PREFIX': DATABASES['default']['NAME'],
}
}
DRYCC_DEFAULT_CONFIG_TAGS = os.environ.get('DRYCC_DEFAULT_CONFIG_TAGS', '')
DRYCC_APP_STORAGE_CLASS = os.environ.get('DRYCC_APP_STORAGE_CLASS', '')
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return None
MIGRATION_MODULES = DisableMigrations()
```
#### File: rootfs/api/views.py
```python
import logging
import django
from django.db.models import Q, Sum
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.views.generic import View
from rest_framework import status
from rest_framework.response import Response
from api import models, serializers
from api.exceptions import ServiceUnavailable
from api.viewset import NormalUserViewSet, DryccViewSet
from api.workflow_proxy import WorkflowProxy
logger = logging.getLogger(__name__)
class ReadinessCheckView(View):
"""
Simple readiness check view to determine DB connection / query.
"""
def get(self, request):
try:
import django.db
with django.db.connection.cursor() as c:
c.execute("SELECT 0")
except django.db.Error as e:
raise ServiceUnavailable("Database health check failed") from e
return HttpResponse("OK")
head = get
class LivenessCheckView(View):
"""
Simple liveness check view to determine if the server
is responding to HTTP requests.
"""
def get(self, request):
return HttpResponse("OK")
head = get
# drycc manager request
class MeasurementsViewSet(DryccViewSet):
def create(self, request, *args, **kwargs):
for _ in request.data:
_["cluster_id"] = request.cluster.pk
return super(MeasurementsViewSet, self).create(request, **kwargs)
class MeasurementsConfigViewSet(MeasurementsViewSet):
serializer_class = serializers.ConfigListSerializer
class MeasurementsVolumeViewSet(MeasurementsViewSet):
serializer_class = serializers.VolumeListSerializer
class MeasurementsNetworksViewSet(MeasurementsViewSet):
serializer_class = serializers.NetworkListSerializer
class MeasurementsInstancesViewSet(MeasurementsViewSet):
serializer_class = serializers.InstanceListSerializer
class MeasurementsResourcesViewSet(MeasurementsViewSet):
serializer_class = serializers.ResourceListSerializer
# UI request
class UserCsrfViewSet(NormalUserViewSet):
def get(self, request, *args, **kwargs):
import re
token = re.findall(r'csrftoken=(.*?);', request.headers['Cookie']+';')[0]
return Response({'token': token})
# TODO debug
# token = [_ for _ in request.headers['Cookie'].split('; ') if 'csrftoken' in _][0].split('=')[-1]
# token = request.headers['Cookie'].split('csrftoken=')[-1]
# res = Response({'token': token})
# res.set_cookie('csrftoken', token, samesite=None, secure=True)
# return res
class UserManagementViewSet(NormalUserViewSet):
serializer_class = serializers.UserSerializer
def retrieve(self, request, *args, **kwargs):
serializer = self.get_serializer(request.user, many=False)
return Response(serializer.data)
class ClustersViewSet(NormalUserViewSet):
model = models.Cluster
serializer_class = serializers.ClustersSerializer
def get_queryset(self, *args, **kwargs):
return self.model.objects.all(*args, **kwargs)
def get_object(self, **kwargs):
cluster = get_object_or_404(self.model, cluster_id=kwargs['cluster_id'])
return cluster
class ListViewSet(NormalUserViewSet):
def get_queryset(self, **kwargs):
serializer = self.serializer_class(data=self.request.query_params)
serializer.is_valid(raise_exception=True)
serializerlist = serializers.ListSerializer(
data=self.request.query_params)
serializerlist.is_valid(raise_exception=True)
q = Q(owner=self.request.user)
if serializerlist.validated_data.get('section'):
q &= Q(created__range=serializerlist.validated_data.get('section'))
return self.model.objects.filter(q, **serializer.validated_data)
class BillsViewSet(ListViewSet):
model = models.Bill
serializer_class = serializers.BillsSerializer
class BillsProductViewSet(NormalUserViewSet):
model = models.Bill
serializer_class = serializers.BillsProductSerializer
def get_queryset(self, **kwargs):
serializer = serializers.BillsProductSerializer(
data=self.request.query_params)
serializer.is_valid(raise_exception=True)
serializerlist = serializers.ListSerializer(
data=self.request.query_params)
serializerlist.is_valid(raise_exception=True)
q = Q(owner=self.request.user)
if serializerlist.validated_data.get('section'):
q &= Q(created__range=serializerlist.validated_data.get('section'))
return self.model.objects.filter(q, **serializer.validated_data).\
order_by('resource_type', 'created').\
extra(select={'created':"TO_CHAR(api_bill.created, 'YYYY-MM')"}).\
values('resource_type', 'created').\
annotate(sum_total_price=Sum('total_price'))
class BillsAppViewSet(NormalUserViewSet):
model = models.Bill
serializer_class = serializers.BillsProductSerializer
def get_queryset(self, **kwargs):
serializer = serializers.BillsProductSerializer(
data=self.request.query_params)
serializer.is_valid(raise_exception=True)
serializerlist = serializers.ListSerializer(
data=self.request.query_params)
serializerlist.is_valid(raise_exception=True)
q = Q(owner=self.request.user)
if serializerlist.validated_data.get('section'):
q &= Q(created__range=serializerlist.validated_data.get('section'))
return self.model.objects.filter(q, **serializer.validated_data).\
order_by('cluster__name', 'app_id', 'created').\
extra(select={'created':"TO_CHAR(api_bill.created, 'YYYY-MM')"}).\
values('created','cluster__name', 'app_id').\
annotate(sum_total_price=Sum('total_price'))
class FundingsViewSet(ListViewSet):
model = models.Funding
serializer_class = serializers.FundingsSerializer
class MessagesViewSet(ListViewSet):
model = models.Message
serializer_class = serializers.MessagesSerializer
class MessageViewSet(NormalUserViewSet):
model = models.Message
serializer_class = serializers.MessagesSerializer
def get_object(self):
return get_object_or_404(self.model, uuid=self.kwargs['pk'])
def update(self, request, *args, **kwargs):
msg = self.get_object()
msg = serializers.MessagesSerializer(data=request.data,
instance=msg,
partial=True)
msg.is_valid(raise_exception=True)
msg.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class ClusterProxyViewSet(NormalUserViewSet):
def get_cluster(self):
cluster = get_object_or_404(models.Cluster,
name=self.kwargs['name'])
return cluster
def list(self, request, *args, **kwargs):
try:
token = request.user.social_auth.filter(provider='drycc').last(). \
extra_data.get('id_token')
except AttributeError:
return Response(status=401)
cluster = self.get_cluster()
wfp = WorkflowProxy(token).get(
url=cluster.ingress + '/v2/' + kwargs.get('proxy_url'),
**request.query_params)
if wfp.status_code == 200:
res = wfp.json()
if not isinstance(res, dict):
return Response(res)
if res.get('previous'):
res['previous'] = request.build_absolute_uri().split('?')[0] + \
'?' + res['previous'].split('?')[1]
if res.get('next'):
res['next'] = request.build_absolute_uri().split('?')[0] + '?' \
+ res['next'].split('?')[1]
return Response(res)
elif wfp.status_code in [401, 403, 404]:
return Response(status=wfp.status_code)
else:
return Response(wfp.content, status=wfp.status_code)
def delete(self, request, *args, **kwargs):
try:
token = request.user.social_auth.filter(provider='drycc').last(). \
extra_data.get('id_token')
except AttributeError:
return Response(status=401)
cluster = self.get_cluster()
wfp = WorkflowProxy(token).delete(
url=cluster.ingress + '/v2/' + kwargs.get('proxy_url'),
**request.data)
if wfp.status_code == 204:
return Response(status=wfp.status_code)
else:
return Response(data=wfp.content, status=wfp.status_code)
def post(self, request, *args, **kwargs):
try:
token = request.user.social_auth.filter(provider='drycc').last(). \
extra_data.get('id_token')
except AttributeError:
return Response(status=401)
cluster = self.get_cluster()
wfp = WorkflowProxy(token).post(
url=cluster.ingress + '/v2/' + kwargs.get('proxy_url'),
**request.data)
if wfp.status_code in [200]:
return Response(wfp.json(), status=wfp.status_code)
elif wfp.status_code in [201, 204]:
return Response(status=wfp.status_code)
else:
return Response(data=wfp.content, status=wfp.status_code)
``` |
{
"source": "jianxiaoguo/smartdns",
"score": 2
} |
#### File: smartdns/smartdns/monitor.py
```python
from functools import partial
from twisted.internet import task, ssl, reactor
from twisted.web.client import readBody, Agent, ResponseFailed, BrowserLikePolicyForHTTPS
from twisted.web.http_headers import Headers
from twisted.web.iweb import IPolicyForHTTPS
from twisted.internet.error import TimeoutError, ConnectError, TCPTimedOutError
from urllib.parse import urlparse
from zope.interface import implementer
@implementer(IPolicyForHTTPS)
class SmartClientContextFactory(object):
def __init__(self):
self.default_policy = BrowserLikePolicyForHTTPS
def creatorForNetloc(self, hostname, port):
return ssl.CertificateOptions(verify=False)
class Monitor(object):
def __init__(self, ip_set, monitor):
self.ip_set = ip_set
self.monitor = monitor
self.black_mapping = {}
def _check(self, host, ip):
url = self.monitor['url'].replace(host, ip, 1).encode("utf8")
agent = Agent(reactor, contextFactory=SmartClientContextFactory(), connectTimeout=30)
agent.request(b'GET', url, headers=Headers({"host": [host, ]})).addCallbacks(
BlackMappingChecker(
ip, self.black_mapping), BlackMappingAdder(ip, self.black_mapping))
def check(self, ip):
return self.black_mapping[ip] < self.monitor["frequency"]
def start(self):
host = urlparse(self.monitor["url"]).netloc.split(":")[0]
for ip in self.ip_set:
self.black_mapping[ip] = 0
task.LoopingCall(partial(self._check, host=host, ip=ip)).start(
self.monitor["interval"])
class BlackMappingAdder(object):
def __init__(self, ip, black_mapping):
self.ip = ip
self.black_mapping = black_mapping
def __call__(self, failure):
self.black_mapping[self.ip] += 1
failure.trap(ResponseFailed, TimeoutError, ConnectError, TCPTimedOutError)
class BlackMappingChecker(object):
def __init__(self, ip, black_mapping):
self.ip = ip
self.black_mapping = black_mapping
def __call__(self, response):
if response.code < 500:
self.black_mapping[self.ip] = 0
else:
self.black_mapping[self.ip] += 1
finished = readBody(response)
finished.addCallback(lambda body: None) # ignore
return finished
class MonitorMapping(object):
def __init__(self, config, amapping):
self.config = config
self.monitor_mapping = self._make_monitor_mapping(amapping)
def _make_monitor_mapping(self, amapping):
monitor_mapping = {}
for name, item in amapping.items():
if name in self.config:
ip_set = set()
monitor = Monitor(ip_set, self.config[name])
for key, value in item.items():
if key == 'ttl':
continue
else:
ip_set.update(value.split(' '))
monitor.start()
monitor_mapping[name] = monitor
return monitor_mapping
def check(self, name, ip):
if name in self.monitor_mapping:
return self.monitor_mapping[name].check(ip)
return True
``` |
{
"source": "jianxingdong/classifier",
"score": 2
} |
#### File: classifier/common/tcseg.py
```python
import os
import sys
sys.path.append('../common')
import TCWordSeg
OUT_WORD = TCWordSeg.OUT_WORD
OUT_PHRASE = TCWordSeg.OUT_PHRASE
OUT_SUBPHRASE = TCWordSeg.OUT_SUBPHRASE
TC_ENGU = TCWordSeg.TC_ENGU
TC_GU = TCWordSeg.TC_GU
TC_POS = TCWordSeg.TC_POS
TC_USR = TCWordSeg.TC_USR
TC_S2D = TCWordSeg.TC_S2D
TC_U2L = TCWordSeg.TC_U2L
TC_CLS = TCWordSeg.TC_CLS
TC_RUL = TCWordSeg.TC_RUL
TC_CN = TCWordSeg.TC_CN
TC_T2S = TCWordSeg.TC_T2S
TC_PGU = TCWordSeg.TC_PGU
TC_LGU = TCWordSeg.TC_LGU
TC_SGU = TCWordSeg.TC_SGU
TC_CUT = TCWordSeg.TC_CUT
TC_TEXT = TCWordSeg.TC_TEXT
TC_CONV = TCWordSeg.TC_CONV
TC_WMUL = TCWordSeg.TC_WMUL
TC_PMUL = TCWordSeg.TC_PMUL
TC_ASC = TCWordSeg.TC_ASC
TC_SECPOS = TCWordSeg.TC_SECPOS
TC_GBK = TCWordSeg.TC_GBK
TC_UTF8 = TCWordSeg.TC_UTF8
TC_NEW_RES = TCWordSeg.TC_NEW_RES
TC_SYN = TCWordSeg.TC_SYN
TC_LN = TCWordSeg.TC_LN
TC_WGU = TCWordSeg.TC_WGU
TC_A = TCWordSeg.TC_A
TC_AD = TCWordSeg.TC_AD
TC_AN = TCWordSeg.TC_AN
TC_B = TCWordSeg.TC_B
TC_C = TCWordSeg.TC_C
TC_D = TCWordSeg.TC_D
TC_E = TCWordSeg.TC_E
TC_F = TCWordSeg.TC_F
TC_G = TCWordSeg.TC_G
TC_H = TCWordSeg.TC_H
TC_I = TCWordSeg.TC_I
TC_J = TCWordSeg.TC_J
TC_K = TCWordSeg.TC_K
TC_L = TCWordSeg.TC_L
TC_M = TCWordSeg.TC_M
TC_N = TCWordSeg.TC_N
TC_NR = TCWordSeg.TC_NR
TC_NRF = TCWordSeg.TC_NRF
TC_NRG = TCWordSeg.TC_NRG
TC_NS = TCWordSeg.TC_NS
TC_NT = TCWordSeg.TC_NT
TC_NZ = TCWordSeg.TC_NZ
TC_NX = TCWordSeg.TC_NX
TC_O = TCWordSeg.TC_O
TC_P = TCWordSeg.TC_P
TC_Q = TCWordSeg.TC_Q
TC_R = TCWordSeg.TC_R
TC_S = TCWordSeg.TC_S
TC_T = TCWordSeg.TC_T
TC_U = TCWordSeg.TC_U
TC_V = TCWordSeg.TC_V
TC_VD = TCWordSeg.TC_VD
TC_VN = TCWordSeg.TC_VN
TC_W = TCWordSeg.TC_W
TC_X = TCWordSeg.TC_X
TC_Y = TCWordSeg.TC_Y
TC_Z = TCWordSeg.TC_Z
TC_AG = TCWordSeg.TC_AG
TC_BG = TCWordSeg.TC_BG
TC_DG = TCWordSeg.TC_DG
TC_MG = TCWordSeg.TC_MG
TC_NG = TCWordSeg.TC_NG
TC_QG = TCWordSeg.TC_QG
TC_RG = TCWordSeg.TC_RG
TC_TG = TCWordSeg.TC_TG
TC_VG = TCWordSeg.TC_VG
TC_YG = TCWordSeg.TC_YG
TC_ZG = TCWordSeg.TC_ZG
TC_SOS = TCWordSeg.TC_SOS
TC_EOS = TCWordSeg.TC_EOS
TC_UNK = TCWordSeg.TC_UNK
TC_WWW = TCWordSeg.TC_WWW
TC_TELE = TCWordSeg.TC_TELE
TC_EMAIL = TCWordSeg.TC_EMAIL
g_seghandle = None
def tcinit(SEG_MODE=TC_ENGU|TC_U2L|TC_POS|TC_S2D|TC_T2S|TC_CN|TC_TEXT|TC_CUT|TC_USR|TC_T2S):
global g_seghandle
if g_seghandle:
return False
TCWordSeg.TCInitSeg('../data/tc_seg')
g_seghandle = TCWordSeg.TCCreateSegHandle(SEG_MODE)
return g_seghandle
def tcuninit():
global g_seghandle
if not g_seghandle:
return False
TCWordSeg.TCCloseSegHandle(g_seghandle)
TCWordSeg.TCUnInitSeg()
return True
def tcsegment(string):
ret = []
global g_seghandle
TCWordSeg.TCSegment(g_seghandle, string.encode('gbk'))
rescount = TCWordSeg.TCGetResultCnt(g_seghandle)
last_pos = 0
for i in range(rescount):
wordpos = TCWordSeg.TCGetAt(g_seghandle, i);
word = wordpos.word.decode('gbk')
pos = wordpos.pos
bcw = wordpos.bcw
cls = wordpos.cls
idx = last_pos
last_pos = idx + len(word)
ret.append((word, pos, bcw, cls, idx))
return ret
if __name__ == '__main__':
tcinit()
for item in tcsegment(sys.argv[1].decode('utf8')):
print item[0].encode('utf8'), item[1], item[4]
tcuninit()
``` |
{
"source": "JianxingHuang/ecint",
"score": 2
} |
#### File: ecint/preprocessor/kind.py
```python
from abc import ABCMeta, abstractmethod
import yaml
__all__ = ['SetsFromYaml', 'DZVPSets']
_E_WITH_Q = {'H': '1', 'He': '2', 'Li': '3', 'Be': '4', 'B': '3', 'C': '4', 'N': '5', 'O': '6', 'F': '7', 'Ne': '8',
'Na': '9', 'Mg': '2', 'Al': '3', 'Si': '4', 'P': '5', 'S': '6', 'Cl': '7', 'Ar': '8', 'K': '9', 'Ca': '10',
'Sc': '11', 'Ti': '12', 'V': '13', 'Cr': '14', 'Mn': '15', 'Fe': '16', 'Co': '17', 'Ni': '18', 'Cu': '11',
'Zn': '12', 'Ga': '3', 'Ge': '4', 'As': '5', 'Se': '6', 'Br': '7', 'Kr': '8',
'Rb': '9', 'Sr': '10', 'Y': '11', 'Zr': '12', 'Nb': '13', 'Mo': '14', 'Tc': '15', 'Ru': '8', 'Rh': '9',
'Pd': '18', 'Ag': '11', 'Cd': '12', 'In': '3', 'Sn': '4', 'Sb': '5', 'Te': '6', 'I': '7', 'Xe': '8',
'Cs': '9', 'Ba': '10', 'La': '11', 'Hf': '12', 'Ta': '5', 'W': '6', 'Re': '7', 'Os': '8', 'Ir': '9',
'Pt': '18', 'Au': '19', 'Hg': '12', 'Tl': '3', 'Pb': '4', 'Bi': '5', 'Po': '6', 'At': '7', 'Rn': '8'}
class BaseSets(metaclass=ABCMeta):
def __init__(self, structure):
self.structure = structure
self.elements = set(self.structure.symbols)
@property
@abstractmethod
def kind_section(self):
pass
class SetsFromYaml(BaseSets):
def __init__(self, structure, kind_section_config_path):
super(SetsFromYaml, self).__init__(structure)
self.kind_section_config = self.load_kind_section_config_file(kind_section_config_path)
@property
def kind_section(self):
if self.kind_section_config.keys() == self.elements:
kind_section_list = []
for k, v in self.kind_section_config.items():
one_kind_section = {'_': k}
one_kind_section.update(v)
kind_section_list.append(one_kind_section)
else:
raise ValueError('Elements in input structure and configuration file do not match')
return kind_section_list
@staticmethod
def load_kind_section_config_file(kind_section_config_path):
try:
with open(kind_section_config_path, 'r') as f:
kind_section_config = yaml.load(f)
except IOError:
print('Can not find file {}'.format(kind_section_config_path))
return kind_section_config
class TZV2PSets(BaseSets):
@property
def kind_section(self):
kind_section_list = []
for e in self.elements:
one_kind_section = {'_': e, 'BASIS_SET': 'TZV2P-GTH',
'POTENTIAL': 'GTH-BLYP-q{}'.format(_E_WITH_Q[e])}
kind_section_list.append(one_kind_section)
return kind_section_list
class DZVPSets(BaseSets):
@property
def kind_section(self):
kind_section_list = []
for e in self.elements:
one_kind_section = {'_': e, 'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',
'POTENTIAL': 'GTH-PBE-q{}'.format(_E_WITH_Q[e])}
kind_section_list.append(one_kind_section)
return kind_section_list
```
#### File: ecint/preprocessor/utils.py
```python
from ase.io import read
from aiida.orm import StructureData
import json
def path2structure(structure_path, cell, pbc=True):
"""
:param structure_path:
:param cell: (list) cell parameters, can be set as [(1,0,0), (0,1,0), (0,0,1)] or like [1, 1, 1, 90, 90, 90],
units: angstrom, angle
:param pbc: periodic boundary conditions, can be set as [False, False, True]
:return: StructureData
"""
atoms = read(structure_path)
atoms.set_cell(cell)
if (atoms.cell == 0).all():
raise ValueError('The cell parameters can not be all zero.')
else:
atoms.set_pbc(pbc)
# structure = StructureData(ase=atoms)
return atoms
def load_json(json_path):
with open(json_path) as f:
d = json.load(f)
return d
def inp2json(cp2k_input):
# TODO: need edit, parse cp2k input file to json format
pass
``` |
{
"source": "JianxinMa/clrec_v1.0",
"score": 2
} |
#### File: clrec_v1.0/clrec_v1_local_sasrec/main.py
```python
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from dataset import load_data, evaluate, BatchSampler
from model import Model
FLAGS = tf.app.flags
FLAGS.DEFINE_integer('seed', -1, '')
FLAGS.DEFINE_string('dataset', 'beauty', 'steam/beauty/video/ml-1m/ml-20m')
FLAGS.DEFINE_integer('batch_size', 128, '')
FLAGS.DEFINE_float('lr', 0.001, '')
FLAGS.DEFINE_integer('maxlen', 50, '')
FLAGS.DEFINE_integer('hidden_units', 50, '')
FLAGS.DEFINE_integer('num_blocks', 2, '')
FLAGS.DEFINE_integer('num_epochs', 100, '')
FLAGS.DEFINE_integer('num_heads', 5, '')
FLAGS.DEFINE_float('dropout_rate', 0.5, '')
FLAGS.DEFINE_float('l2_emb', 0.0, '')
FLAGS = FLAGS.FLAGS
def set_rng_seed(seed):
np.random.seed(seed)
tf.set_random_seed(seed)
def main(_):
if FLAGS.seed == -1:
FLAGS.seed = np.random.randint(int(2e9))
print('seed=%d' % FLAGS.seed)
set_rng_seed(FLAGS.seed)
print('[ config ] ', ' '.join(
['--%s=%s' % (k, v) for k, v in FLAGS.flag_values_dict().items()]))
dataset = load_data(FLAGS.dataset)
[user_train, _, _, usernum, itemnum] = dataset
num_clicks = 0.0
for u in user_train:
num_clicks += len(user_train[u])
print('average sequence length: %.2f' % (num_clicks / len(user_train)))
num_batch = 2 * int(num_clicks / FLAGS.batch_size) + 1
sampler = BatchSampler(user_train, usernum, itemnum,
batch_size=FLAGS.batch_size, maxlen=FLAGS.maxlen,
n_workers=7)
model = Model(usernum, itemnum, FLAGS)
print('All global variables:')
for v in tf.global_variables():
if v in tf.trainable_variables():
print('\t', v, 'trainable')
# else:
# print('\t', v)
sess_conf = tf.ConfigProto()
sess_conf.gpu_options.allow_growth = True
sess_conf.allow_soft_placement = True
# hooks = [tf.train.ProfilerHook(save_steps=10, output_dir='.')]
hooks = None
with tf.train.MonitoredTrainingSession(
config=sess_conf, hooks=hooks) as sess:
total_time = 0.0
t0 = time.time()
for epoch in range(1, FLAGS.num_epochs + 1):
for _ in tqdm(range(num_batch), total=num_batch, ncols=70,
leave=False, unit='b'):
u, seq, pos, neg = sampler.next_batch()
loss, _ = sess.run([model.loss, model.train_op],
{model.u: u, model.input_seq: seq,
model.pos: pos, model.neg: neg,
model.is_training: True})
assert not np.isnan(loss)
assert not np.isinf(loss)
if epoch % 1 == 0:
t1 = time.time() - t0
total_time += t1
print('epoch: %d, time: %f(s)' % (epoch, total_time))
t_val = evaluate(model, dataset, FLAGS, sess, testing=False)
print('val (HR@1: %.4f, HR@5: %.4f, HR@10: %.4f, '
'NDCG@5: %.4f, NDCG@10: %.4f, MRR: %.4f)' % t_val)
t_tst = evaluate(model, dataset, FLAGS, sess, testing=True)
print('tst (HR@1: %.4f, HR@5: %.4f, HR@10: %.4f, '
'NDCG@5: %.4f, NDCG@10: %.4f, MRR: %.4f)' % t_tst)
print() # tqdm may overwritten this line
t0 = time.time()
sampler.close()
print("Done")
if __name__ == '__main__':
tf.app.run()
``` |
{
"source": "jianxinwang/GenEsysV",
"score": 2
} |
#### File: GenEsysV/core/forms.py
```python
from crispy_forms.helper import FormHelper
from django import forms
from django.contrib.auth.models import User
from django.db.models import Q
from core.models import (Dataset, FilterField, FilterFieldChoice, SavedSearch,
Study, DocumentReview)
EXIST_CHOICES = [('', '----'), ("only", "only"), ("excluded", "excluded")]
class StudyForm(forms.Form):
# You have to comment out study choices before migrating
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
user_group_ids = [group.id for group in user.groups.all()]
user_dataset = Dataset.objects.select_related('study').filter(
Q(allowed_groups__in=user_group_ids) | Q(is_public=True)).distinct()
user_studies = [ele.study.id for ele in user_dataset]
STUDY_CHOICES = [(ele.id, ele.name)
for ele in Study.objects.filter(id__in=user_studies)]
STUDY_CHOICES.insert(0, ('', '---'))
self.fields['study'] = forms.ChoiceField(
label='Study', choices=STUDY_CHOICES)
class DatasetForm(forms.Form):
def __init__(self, study_obj, user, *args, **kwargs):
super().__init__(*args, **kwargs)
user_group_ids = [group.id for group in user.groups.all()]
user_dataset = Dataset.objects.filter(study=study_obj)
user_dataset = user_dataset.filter(
Q(allowed_groups__in=user_group_ids) | Q(is_public=True)).distinct()
DATASET_CHOICES = [(ele.id, ele.description)
for ele in user_dataset]
DATASET_CHOICES.insert(0, ('', '---'))
self.fields['dataset'] = forms.ChoiceField(
label='Dataset', choices=DATASET_CHOICES)
class AnalysisTypeForm(forms.Form):
def __init__(self, dataset_obj, user, *args, **kwargs):
super().__init__(*args, **kwargs)
ANALYSIS_TYPE_CHOICES = [(analysis.id, analysis.name)
for analysis in dataset_obj.analysis_type.all()]
ANALYSIS_TYPE_CHOICES.insert(0, ('', '---'))
self.fields['analysis_type'] = forms.ChoiceField(
label='Analysis Type', choices=ANALYSIS_TYPE_CHOICES)
class FilterFormPart(forms.Form):
"""Filter Form Part is used to create snippet. Filter Form is used to validate the POST data"""
def __init__(self, fields, MEgroup=None, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in fields:
if field.tooltip:
tooltip = ' <i data-toggle="popover" data-trigger="hover" data-content="%s" class="fa fa-info-circle" aria-hidden="true"></i>' % (
field.tooltip)
else:
tooltip = ''
label = '%s %s %s' % (
field.display_text, field.in_line_tooltip if field.in_line_tooltip else '', tooltip)
field_name = '%d' % (field.id)
if field.form_type.name == "CharField" and field.widget_type.name == "TextInput":
self.fields[field_name] = forms.CharField(
label=label, required=False)
if field.default_value:
self.fields[field_name].initial = field.default_value
if MEgroup:
self.fields[field_name].widget.attrs.update(
{'groupId': MEgroup})
elif field.form_type.name == "MultipleChoiceField" and field.widget_type.name == "SelectMultiple":
# CHOICES = [(ele.value, ' '.join(ele.value.split('_')))
# for ele in
# FilterFieldChoice.objects.filter(filter_field=field).order_by('pk')]
CHOICES = [(ele.value, ' '.join(ele.value.split('_')))
for ele in field.filterfieldchoice_set.all().order_by('pk')]
self.fields[field_name] = forms.MultipleChoiceField(
label=label, required=False, choices=CHOICES)
if MEgroup:
self.fields[field_name].widget.attrs.update(
{'groupId': MEgroup})
elif field.form_type.name == "ChoiceField" and field.widget_type.name == "Select":
if field.es_filter_type.name == 'filter_exists':
self.fields[field_name] = forms.ChoiceField(
label=label, required=False, choices=EXIST_CHOICES)
elif field.es_filter_type.name == 'nested_filter_exists':
self.fields[field_name] = forms.ChoiceField(
label=label, required=False, choices=EXIST_CHOICES)
else:
# CHOICES = [(ele.value, ele.value) for ele in FilterFieldChoice.objects.filter(
# filter_field=field).order_by('pk')]
CHOICES = [(ele.value, ele.value)
for ele in field.filterfieldchoice_set.all().order_by('pk')]
CHOICES.insert(0, ('', '----'))
self.fields[field_name] = forms.ChoiceField(
label=label, required=False, choices=CHOICES)
if MEgroup:
self.fields[field_name].widget.attrs.update(
{'groupId': MEgroup})
elif field.form_type.name == "CharField" and field.widget_type.name == "Textarea":
self.fields[field_name] = forms.CharField(
widget=forms.Textarea(), label=label, required=False)
if MEgroup:
self.fields[field_name].widget.attrs.update(
{'groupId': MEgroup})
elif field.form_type.name == "CharField" and field.widget_type.name == "UploadField":
self.fields[field_name] = forms.CharField(widget=forms.Textarea(
attrs={'rows': 4, 'class': 'upload-field'}), label=label, required=False)
if MEgroup:
self.fields[field_name].widget.attrs.update(
{'groupId': MEgroup})
class FilterForm(forms.Form):
"""Filter Form Part is used to create snippet. Filter Form is used to validate the POST data"""
def __init__(self, dataset, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in FilterField.objects.filter(dataset=dataset).select_related(
'widget_type', 'form_type', 'es_filter_type'):
if field.tooltip:
tooltip = ' <i data-toggle="popover" data-trigger="hover" data-content="%s" class="fa fa-info-circle" aria-hidden="true"></i>' % (
field.tooltip)
else:
tooltip = ''
label = '%s %s %s' % (field.display_text,
field.in_line_tooltip, tooltip)
field_name = '%d' % (field.id)
if field.form_type.name == "CharField" and field.widget_type.name == "TextInput":
self.fields[field_name] = forms.CharField(
label=label, required=False)
elif field.form_type.name == "MultipleChoiceField" and field.widget_type.name == "SelectMultiple":
CHOICES = [(ele.value, ' '.join(ele.value.split('_')))
for ele in FilterFieldChoice.objects.filter(filter_field=field).order_by('pk')]
self.fields[field_name] = forms.MultipleChoiceField(
label=label, required=False, choices=CHOICES)
elif field.form_type.name == "ChoiceField" and field.widget_type.name == "Select":
if field.es_filter_type.name == 'filter_exists':
self.fields[field_name] = forms.ChoiceField(
label=label, required=False, choices=EXIST_CHOICES)
elif field.es_filter_type.name == 'nested_filter_exists':
self.fields[field_name] = forms.ChoiceField(
label=label, required=False, choices=EXIST_CHOICES)
else:
CHOICES = [(ele.value, ele.value) for ele in FilterFieldChoice.objects.filter(
filter_field=field).order_by('pk')]
CHOICES.insert(0, ('', '----'))
self.fields[field_name] = forms.ChoiceField(
label=label, required=False, choices=CHOICES)
elif field.form_type.name == "CharField" and field.widget_type.name == "UploadField":
self.fields[field_name] = forms.CharField(widget=forms.Textarea(
attrs={'rows': 4, 'class': 'upload-field'}), label=label, required=False)
class AttributeFormPart(forms.Form):
def __init__(self, fields, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in fields:
label = field.display_text
field_name = '%d' % (field.id)
self.fields[field_name] = forms.BooleanField(
label=label, required=False)
class AttributeForm(forms.Form):
def __init__(self, dataset, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in dataset.attributefield_set.all():
label = field.display_text
field_name = '%d' % (field.id)
self.fields[field_name] = forms.BooleanField(
label=label, required=False)
class SaveSearchForm(forms.ModelForm):
def __init__(self, user, dataset, analysis_type, additional_information, filters_used, attributes_selected, *args, **kwargs):
super(SaveSearchForm, self).__init__(*args, **kwargs)
self.fields['user'].initial = User.objects.get(id=user.id)
self.fields['dataset'].initial = dataset
self.fields['analysis_type'].initial = analysis_type
self.fields['additional_information'].initial = additional_information
self.fields['filters_used'].initial = filters_used
self.fields['attributes_selected'].initial = attributes_selected
@property
def helper(self):
helper = FormHelper()
helper.form_tag = False # don't render form DOM element
helper.render_unmentioned_fields = True # render all fields
helper.label_class = 'col-md-2'
helper.field_class = 'col-md-10'
return helper
class Meta:
model = SavedSearch
fields = '__all__'
widgets = {
'user': forms.HiddenInput(attrs={'readonly': 'readonly'}),
'dataset': forms.HiddenInput(attrs={'readonly': 'readonly'}),
'analysis_type': forms.HiddenInput(attrs={'readonly': 'readonly'}),
'additional_information': forms.HiddenInput(attrs={'readonly': 'readonly'}),
'filters_used': forms.HiddenInput(attrs={'readonly': 'readonly', }),
'attributes_selected': forms.HiddenInput(attrs={'readonly': 'readonly', 'required': True}),
'description': forms.Textarea(attrs={'autofocus': 'autofocus', 'required': True}),
}
class DocumentReviewForm(forms.ModelForm):
class Meta:
model = DocumentReview
fields = ['status',]
```
#### File: management/commands/import_test_data.py
```python
from django.core.management.base import BaseCommand, CommandError
import pprint
import os
from core.management.commands.create_gui import add_required_data_to_db
import elasticsearch
class Command(BaseCommand):
def handle(self, *args, **options):
add_required_data_to_db()
dir_path = os.path.dirname(os.path.realpath(__file__))
data = {}
with open(os.path.join(dir_path, 'data', 'test_data.csv'), 'r') as fp:
for line in fp:
if line.startswith('#'):
continue
Variant, CHROM, POS, dbSNP_ID, REF, ALT, VariantType, Sample_ID, Sample_GT, ExonicFunc_refGene = line.strip().split(',')
if Variant not in data:
tmp_dict = {
'Variant': Variant,
'CHROM': CHROM,
'POS': POS,
'REF': REF,
'ALT': ALT,
'VariantType': VariantType,
'ExonicFunc_refGene': ExonicFunc_refGene,
'sample': []
}
if dbSNP_ID:
tmp_dict['dbSNP_ID'] = dbSNP_ID
data[Variant] = tmp_dict
sample_dict = {
'Sample_ID': Sample_ID,
'Sample_GT': Sample_GT
}
data[Variant]['sample'].append(sample_dict)
data_array = []
for key, values in data.items():
data_array.append(values)
es = elasticsearch.Elasticsearch(host='172.17.57.17', port=9200)
index_name = "test_data"
type_name = "test_data"
if es.indices.exists(index_name):
es.indices.delete(index_name)
es.indices.create(index_name)
es.cluster.health(wait_for_status="yellow")
body = {
type_name: {
"properties": {
"Variant": {
"type": "keyword"
},
"CHROM": {
"type": "keyword"
},
"POS": {
"type": "integer"
},
"dbSNP_ID": {
"type": "keyword"
},
"REF": {
"type": "keyword"
},
"ALT": {
"type": "keyword"
},
"VariantType": {
"type": "keyword"
},
"ExonicFunc_refGene": {
"type": "keyword"
},
"sample": {
"type": "nested",
"properties": {
"Sample_ID": {
"type": "keyword"
},
"Sample_GT": {
"type": "keyword"
}
}
}
}
}
}
es.indices.put_mapping(index=index_name, doc_type=type_name, body=body)
for body in data_array:
es.index(index=index_name, doc_type=type_name, body=body)
```
#### File: GenEsysV/mendelian/forms.py
```python
from django import forms
class MendelianAnalysisForm(forms.Form):
ANALYSIS_CHOICES = (('', '----'),
('autosomal_dominant', 'autosomal_dominant'),
('autosomal_recessive', 'autosomal_recessive'),
('compound_heterozygous', 'compound_heterozygous'),
('denovo', 'denovo'),
)
analysis_type = forms.ChoiceField(choices=ANALYSIS_CHOICES)
class KindredForm(forms.Form):
def __init__(self, number_of_families, *args, **kwargs):
super().__init__(*args, **kwargs)
KINDRED_CHOICES = [(ele, '> ' + str(ele)) for ele in range(1, number_of_families)]
KINDRED_CHOICES.insert(0, ('', '---No Kindred Filtering---'))
self.fields['number_of_kindred'] = forms.ChoiceField(
label='Number of Kindred', required=False, choices=KINDRED_CHOICES)
class FamilyForm(forms.Form):
def __init__(self, sample_ids, *args, **kwargs):
super().__init__(*args, **kwargs)
SAMPLE_CHOICES = [(ele, ele) for ele in sample_ids]
SAMPLE_CHOICES.insert(0, ('', '---Select ID---'))
self.fields['father_id'] = forms.ChoiceField(
label='Father ID', required=True, choices=SAMPLE_CHOICES)
self.fields['mother_id'] = forms.ChoiceField(
label='Mother ID', required=True, choices=SAMPLE_CHOICES)
self.fields['child_id'] = forms.CharField(
label='Child ID', required=True)
```
#### File: GenEsysV/mendelian/utils.py
```python
import copy
import datetime
import pprint
import sys
from collections import Counter, deque
import elasticsearch
from elasticsearch import helpers
from natsort import natsorted
from core.models import AttributeField, SearchLog
from core.utils import (BaseElasticSearchQueryDSL,
BaseElasticSearchQueryExecutor,
BaseElasticsearchResponseParser,
BaseSearchElasticsearch, get_values_from_es)
thismodule = sys.modules[__name__]
def filter_using_inner_hits(source_data, inner_hits_data):
inner_hit_candidates = []
for ele in inner_hits_data['hits']['hits']:
data = ele['_source']
inner_hit_keys = sorted(data.keys())
test_string = ''.join(["%s:%s" % (key, str(data[key])) for key in inner_hit_keys if key.strip()])
inner_hit_candidates.append(test_string)
output = []
for ele in source_data:
test_string = ''.join(["%s:%s" % (key, str(ele[key])) for key in inner_hit_keys if key.strip()])
if test_string in inner_hit_candidates:
output.append(ele)
return output
def filter_source_by_family_id(sample_data, family_id):
output = []
for ele in sample_data:
if ele.get('Family_ID') == family_id:
output.append(ele)
return output
def extract_sample_inner_hits_as_array(inner_hits_sample):
output = []
for ele in inner_hits_sample:
output.append(ele.get('_source'))
return output
class MendelianElasticSearchQueryExecutor(BaseElasticSearchQueryExecutor):
# pass
def __init__(self, dataset_obj, query_body, family_dict, mendelian_analysis_type, limit_results=True, elasticsearch_terminate_after=400):
super().__init__(dataset_obj, query_body, elasticsearch_terminate_after=400)
self.family_dict = family_dict
self.mendelian_analysis_type = mendelian_analysis_type
self.family_results = {}
self.limit_results = limit_results
self.elasticsearch_terminate_after = elasticsearch_terminate_after
def add_analysis_type_filter(self, analysis_type):
query_body = copy.deepcopy(self.query_body)
if 'query' not in query_body:
query_body['query'] = {'bool': {}}
query_body['query']['bool']['filter'] = [
{
"nested": {
"inner_hits": {"size": 100},
"path": "sample",
"score_mode": "none",
"query": {
"bool": {
"filter": [
{"term": {"sample.mendelian_diseases": analysis_type}}
]
}
}
}
}
]
elif query_body['query']['bool']['filter']:
filter_array = query_body['query']['bool']['filter']
filter_array_copy = copy.deepcopy(filter_array)
sample_array = None
for ele in filter_array:
if 'nested' in ele and ele['nested']['path'] == 'sample':
filter_array_copy.remove(ele)
sample_array = ele
if sample_array:
sample_array['nested']['inner_hits'] = {}
sample_array['nested']['query']['bool']['filter'].append(
{"term": {"sample.mendelian_diseases": analysis_type}})
filter_array_copy.append(sample_array)
query_body['query']['bool']['filter'] = filter_array_copy
else:
query_body['query']['bool']['filter'].append(
{
"nested": {
"inner_hits": {"size": 100},
"path": "sample",
"score_mode": "none",
"query": {
"bool": {
"filter": [
{"term": {"sample.mendelian_diseases": analysis_type}}
]
}
}
}
}
)
return query_body
def search(self):
results = {
"took": None,
"hits": {
"total": None,
"hits": deque()
}
}
count = 0
start_time = datetime.datetime.now()
es = elasticsearch.Elasticsearch(host=self.dataset_obj.es_host, port=self.dataset_obj.es_port)
if 'CSQ_nested' in es.indices.get_mapping()[self.dataset_obj.es_index_name]['mappings']['properties']:
annotation = 'VEP'
elif 'ExonicFunc_refGene' in es.indices.get_mapping()[self.dataset_obj.es_index_name]['mappings']['properties']:
annotation = 'ANNOVAR'
query_body = self.add_analysis_type_filter(self.mendelian_analysis_type)
if annotation == 'VEP' and self.mendelian_analysis_type in ['autosomal_recessive', 'compound_heterozygous', 'x_linked_recessive']:
query_body['query']['bool']['filter'].append(
{"nested": {
"inner_hits": {},
"path": "CSQ_nested",
"query": {
"bool": {
"filter": [
{"terms": {"CSQ_nested.Consequence": ["frameshift_variant", "splice_acceptor_variant", "splice_donor_variant", "start_lost", "start_retained_variant", "stop_gained", "stop_lost"]}}
]
}
},
"score_mode": "none"
}
})
print("$$$$ query %s" % query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
doc_type = '_doc',
index=self.dataset_obj.es_index_name):
if self.limit_results and len(results['hits']['hits']) > self.elasticsearch_terminate_after:
break
inner_hits_sample = hit['inner_hits']['sample']['hits']['hits']
sample_data = extract_sample_inner_hits_as_array(inner_hits_sample)
tmp_results = hit.copy()
tmp_results['_source']['sample'] = sample_data
tmp_results['inner_hits'].pop('sample')
results['hits']['hits'].append(tmp_results)
count += 1
elapsped_time = int((datetime.datetime.now() - start_time).total_seconds() * 1000)
results['took'] = elapsped_time
results['hits']['total'] = count
return results
def excecute_elasticsearch_query(self):
results = self.search()
self.elasticsearch_response = results
class MendelianElasticsearchResponseParser(BaseElasticsearchResponseParser):
maximum_table_size = 400
class MendelianSearchElasticsearch(BaseSearchElasticsearch):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mendelian_analysis_type = kwargs.get('mendelian_analysis_type')
self.number_of_kindred = kwargs.get('number_of_kindred')
self.family_dict = None
self.limit_results = kwargs.get('limit_results', True)
def _get_family(self, dataset_es_index_name, dataset_es_type_name, dataset_es_host, dataset_es_port, Family_ID):
body_template = """
{
"_source": false,
"size": 1,
"query": {
"nested": {
"path": "sample",
"score_mode": "none",
"query": {
"bool": {
"must" : [{
"term": { "sample.Family_ID": "%s"}}],
"must_not": [
{ "term": { "sample.Father_ID": -9}},
{ "term": { "sample.Mother_ID": -9}}
]
}
},
"inner_hits": {}
}
}
}
"""
es = elasticsearch.Elasticsearch(
host=dataset_es_host, port=dataset_es_port)
body = body_template % (Family_ID)
results = es.search(index=dataset_es_index_name,
body=body, request_timeout=120)
result = results['hits']['hits'][0]['inner_hits']['sample']['hits']['hits'][0]["_source"]
father_id = result.get('Father_ID')
mother_id = result.get('Mother_ID')
child_id = result.get('Sample_ID')
child_sex = result.get('Sex')
return (father_id, mother_id, child_id, child_sex)
def get_family_dict(self):
family_ids = get_values_from_es(self.dataset_obj.es_index_name,
self.dataset_obj.es_host,
self.dataset_obj.es_port,
'Family_ID',
'sample')
family_dict = {}
for family_id in family_ids:
father_id, mother_id, child_id, child_sex = self._get_family(self.dataset_obj.es_index_name,
self.dataset_obj.es_type_name,
self.dataset_obj.es_host,
self.dataset_obj.es_port,
family_id)
family_dict[family_id] = {'father_id': father_id,
'mother_id': mother_id, 'child_id': child_id, 'child_sex': child_sex}
self.family_dict = family_dict
def apply_kindred_filtering(self, elasticsearch_response):
if not self.number_of_kindred:
return elasticsearch_response
else:
results = {
"took": None,
"hits": {
"total": None,
"hits": deque()
}
}
es_ids = [variant.get('_id') for variant in elasticsearch_response['hits']['hits']]
es_id_counter = Counter(es_ids)
for variant in elasticsearch_response['hits']['hits']:
if es_id_counter.get(variant.get('_id')) > int(self.number_of_kindred):
results['hits']['hits'].append(variant)
return results
def run_elasticsearch_query_executor(self, limit_results=True):
self.get_family_dict()
elasticsearch_query_executor = self.elasticsearch_query_executor_class(
self.dataset_obj, self.query_body, self.family_dict, self.mendelian_analysis_type, limit_results)
self.elasticsearch_response = elasticsearch_query_executor.get_elasticsearch_response()
self.elasticsearch_response = self.apply_kindred_filtering(self.elasticsearch_response)
self.elasticsearch_response_time = elasticsearch_query_executor.get_elasticsearch_response_time()
def search(self):
self.run_elasticsearch_dsl()
self.run_elasticsearch_query_executor()
self.run_elasticsearch_response_parser_class()
self.log_search()
def download(self):
self.run_elasticsearch_query_executor(limit_results=self.limit_results)
self.run_elasticsearch_response_parser_class()
```
#### File: GenEsysV/utils/add_mendelian_annotations_refactor.py
```python
import elasticsearch
autosomal_recessive_vep_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant"
],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"terms": {"sample.GT": ["1/1", "1|1"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}},
{"terms": {"sample.Mother_Genotype": ["0/1", "0|1", "1|0"]}},
{"terms": {"sample.Father_Genotype": ["0/1", "0|1", "1|0"]}}
]
}
},
"score_mode": "none"
}
},
{"nested": {
"path": "CSQ_nested",
"query": {
"bool": {
"filter": [
{"terms": {"CSQ_nested.Consequence": ["frameshift_variant", "splice_acceptor_variant", "splice_donor_variant", "start_lost", "start_retained_variant", "stop_gained", "stop_lost"]}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
]
}
}
}"""
autosomal_recessive_annovar_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant"
],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"terms": {"sample.GT": ["1/1", "1|1"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}},
{"terms": {"sample.Mother_Genotype": ["0/1", "0|1", "1|0"]}},
{"terms": {"sample.Father_Genotype": ["0/1", "0|1", "1|0"]}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
],
"should" : [
{"terms": {"ExonicFunc_ensGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"terms": {"ExonicFunc_refGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"term": {"Func_ensGene": "splicing"}},
{"term": {"Func_refGene": "splicing"}}
],
"minimum_should_match": 1
}
}
}"""
denovo_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant"],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"terms": {"sample.GT": ["0/1", "0|1", "1|0"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}},
{"term": {"sample.Mother_Genotype": "0/0"}},
{"term": {"sample.Father_Genotype": "0/0"}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
]
}
}
}"""
autosomal_dominant_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant"],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"terms": {"sample.GT": ["0/1", "0|1", "1|0"]}},
{"term": {"sample.Phenotype": "2"}}
],
"should" :[
{"term": {"sample.Mother_Phenotype": "2"}},
{"term": {"sample.Father_Phenotype": "2"}}
],
"minimum_should_match": 1
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
]
}
}
}"""
compound_heterozygous_vep_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant"
],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"terms": {"sample.GT": ["0/1", "0|1", "1|0"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}}
]
}
},
"score_mode": "none"
}
},
{"nested": {
"path": "CSQ_nested",
"query": {
"bool": {
"filter": [
{"terms": {"CSQ_nested.Consequence": ["frameshift_variant", "splice_acceptor_variant", "splice_donor_variant", "start_lost", "start_retained_variant", "stop_gained", "stop_lost"]}},
{"term": {"CSQ_nested.SYMBOL": "%s"}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
]
}
}
}"""
compound_heterozygous_annovar_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant"
],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"terms": {"sample.GT": ["0/1", "0|1", "1|0"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}}
]
}
},
"score_mode": "none"
}
},
{"nested": {
"path": "AAChange_refGene",
"query": {
"bool": {
"filter": [
{"term": {"AAChange_refGene.Gene": "%s"}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
],
"should" : [
{"terms": {"ExonicFunc_ensGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"terms": {"ExonicFunc_refGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"term": {"Func_ensGene": "splicing"}},
{"term": {"Func_refGene": "splicing"}}
],
"minimum_should_match": 1
}
}
}"""
x_linked_dominant_query_body_template = """{
"_source": ["sample","CHROM","ID","POS","REF","Variant"
],
"query": {
"bool": {
"filter": [
{"term": {"CHROM": "X"}},
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"term": {"sample.Phenotype": "2"}}
],
"should" :[
{"term": {"sample.Mother_Phenotype": "2"}},
{"term": {"sample.Father_Phenotype": "2"}}
],
"minimum_should_match": 1
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"range": {"POS": {"gt": %d, "lt": %d}}},
{"range": {"POS": {"gt": %d, "lt": %d}}}
]
}
}
}"""
x_linked_recessive_vep_query_body_template = """{
"_source": ["sample","CHROM","ID","POS","REF","Variant"
],
"query": {
"bool": {
"filter": [
{"term": {"CHROM": "X"}},
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"term": {"sample.Phenotype": "2"}}
]
}
},
"score_mode": "none"
}
},
{"nested": {
"path": "CSQ_nested",
"query": {
"bool": {
"filter": [
{"terms": {"CSQ_nested.Consequence": ["frameshift_variant", "splice_acceptor_variant", "splice_donor_variant", "start_lost", "start_retained_variant", "stop_gained", "stop_lost"]}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"range": {"POS": {"gt": %d, "lt": %d}}},
{"range": {"POS": {"gt": %d, "lt": %d}}}
]
}
}
}"""
x_linked_recessive_annovar_query_body_template = """{
"_source": ["sample","CHROM","ID","POS","REF","Variant"
],
"query": {
"bool": {
"filter": [
{"term": {"CHROM": "X"}},
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"term": {"sample.Phenotype": "2"}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"range": {"POS": {"gt": %d, "lt": %d}}},
{"range": {"POS": {"gt": %d, "lt": %d}}}
],
"should" : [
{"terms": {"ExonicFunc_ensGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"terms": {"ExonicFunc_refGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"term": {"Func_ensGene": "splicing"}},
{"term": {"Func_refGene": "splicing"}}
],
"minimum_should_match": 1
}
}
}"""
x_linked_de_novo_query_body_template = """{
"_source": ["sample","CHROM","ID","POS","REF","Variant"
],
"query": {
"bool": {
"filter": [
{"term": {"CHROM": "X"}},
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"term": {"sample.Sample_ID": "%s"}},
{"term": {"sample.Phenotype": "2"}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"range": {"POS": {"gt": %d, "lt": %d}}},
{"range": {"POS": {"gt": %d, "lt": %d}}}
]
}
}
}"""
import elasticsearch
from elasticsearch import helpers
import pprint
import json
from natsort import natsorted
def is_autosomal_dominant(sample_information):
if sample_information.get('Mother_Phenotype') == '2' and sample_information.get('Father_Phenotype') == '2':
return False
if sample_information.get('Mother_Phenotype') == '2':
if sample_information.get('Mother_Genotype') in ['0/1', '0|1', '1|0'] and sample_information.get('Father_Genotype') in ['0/0', '0|0']:
return True
# Case Father (Phenotype == 2)
elif sample_information.get('Father_Phenotype') == '2':
if sample_information.get('Mother_Genotype') in ['0/0', '0|0'] and sample_information.get('Father_Genotype') in ['0/1', '0|1', '1|0']:
return True
return False
def is_x_linked_dominant(sample_information):
if sample_information.get('Mother_Phenotype') == '2' and sample_information.get('Father_Phenotype') == '2':
return False
if sample_information.get('Sex') == '1':
if (sample_information.get('GT') in ["0/1", "0|1", "1|0", "1/1", "1|1", "1", "./1", ".|1", "1|."] and
sample_information.get('Mother_Genotype') in ["0/1", "0|1", "1|0"] and
sample_information.get('Mother_Phenotype') == "2" and
sample_information.get('Father_Phenotype') == "1" and
sample_information.get('Father_Genotype') in ["0", "0/0", "0|0"]):
return True
elif sample_information.get('Sex') == '2':
if sample_information.get('GT') in ["0/1", "0|1", "1|0"]:
if (sample_information.get('Mother_Genotype') in ["0/0", "0|0"] and
sample_information.get('Father_Genotype') in ["0/1", "0|1", "1|0", "1", "./1", ".|1", "1|."] and
sample_information.get('Father_Phenotype') == "2" and
sample_information.get('Mother_Phenotype') == "1"):
return True
elif (sample_information.get('Mother_Genotype') in ["0/1", "0|1", "1|0"] and
sample_information.get('Father_Genotype') in ["0/0", "0|0", "0"] and
sample_information.get('Mother_Phenotype') and
sample_information.get('Father_Phenotype') == "1"):
return True
return False
def is_x_linked_recessive(sample_information):
if sample_information.get('Sex') == '1':
if (sample_information.get('GT') not in ["0/0", "0|0", "0"] and
sample_information.get('Mother_Genotype') in ["0/1", "0|1", "1|0"] and
sample_information.get('Mother_Phenotype') == "1"):
return True
elif sample_information.get('Sex') == '2':
if (sample_information.get('GT') in ["1|1", "1/1"] and
sample_information.get('Mother_Genotype') in ["0/1", "0|1", "1|0"] and
sample_information.get('Mother_Phenotype') == "1" and
sample_information.get('Father_Genotype') in ["0/1", "0|1", "1|0", "1", "./1", ".|1", "1|."] and
sample_information.get('Father_Phenotype') == "2"):
return True
return False
def is_x_linked_denovo(sample_information):
if sample_information.get('Sex') == '1':
if (sample_information.get('GT') in ["0/1", "0|1", "1|0", "1/1", "1|1", "1"] and
sample_information.get('Mother_Genotype') in ["0/0", "0|0", "0"] and
sample_information.get('Mother_Phenotype') == "1" and
sample_information.get('Father_Genotype') in ["0/0", "0|0", "0"] and
sample_information.get('Father_Phenotype') == "1"):
return True
elif sample_information.get('Sex') == '2':
if (sample_information.get('GT') in ["0/1", "0|1", "1|0"] and
sample_information.get('Mother_Genotype') in ["0/0", "0|0", "0"] and
sample_information.get('Mother_Phenotype') == "1" and
sample_information.get('Father_Genotype') in ["0/0", "0|0", "0"] and
sample_information.get('Father_Phenotype') == "1"):
return True
return False
def get_vep_genes_from_es_for_compound_heterozygous(es, index_name, doc_type_name):
compound_heterozygous_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant", "CSQ_nested"
],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"terms": {"sample.GT": ["0/1", "0|1", "1|0"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}}
]
}
},
"score_mode": "none"
}
},
{"nested": {
"path": "CSQ_nested",
"query": {
"bool": {
"filter": [
{"terms": {"CSQ_nested.Consequence": ["frameshift_variant", "splice_acceptor_variant", "splice_donor_variant", "start_lost", "start_retained_variant", "stop_gained", "stop_lost"]}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
]
}
},
"size": 0,
"aggs" : {
"values" : {
"nested" : {
"path" : "CSQ_nested"
},
"aggs" : {
"values" : {"terms" : {"field" : "CSQ_nested.SYMBOL", "size" : 30000}}
}
}
}
}"""
results = es.search(index=index_name, doc_type=doc_type_name,
body=compound_heterozygous_query_body_template, request_timeout=120)
return natsorted([ele['key'] for ele in results["aggregations"]["values"]["values"]["buckets"] if ele['key']])
def get_annovar_genes_from_es_for_compound_heterozygous(es, index_name, doc_type_name):
compound_heterozygous_query_body_template = """{
"_source": ["sample", "CHROM", "ID", "POS", "REF", "Variant", "CSQ_nested"
],
"query": {
"bool": {
"filter": [
{"nested": {
"path": "sample",
"query": {
"bool": {
"filter": [
{"terms": {"sample.GT": ["0/1", "0|1", "1|0"]}},
{"term": {"sample.Phenotype": "2"}},
{"term": {"sample.Mother_Phenotype": "1"}},
{"term": {"sample.Father_Phenotype": "1"}}
]
}
},
"score_mode": "none"
}
}
],
"must_not" : [
{"terms": {"CHROM": ["X", "Y"]}}
],
"should" : [
{"terms": {"ExonicFunc_ensGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"terms": {"ExonicFunc_refGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"term": {"Func_ensGene": "splicing"}},
{"term": {"Func_refGene": "splicing"}}
],
"minimum_should_match": 1
}
},
"size": 0,
"aggs" : {
"values" : {
"nested" : {
"path" : "AAChange_refGene"
},
"aggs" : {
"values" : {"terms" : {"field" : "AAChange_refGene.Gene", "size" : 30000}}
}
}
}
}"""
results = es.search(index=index_name, doc_type=doc_type_name,
body=compound_heterozygous_query_body_template, request_timeout=120)
return natsorted([ele['key'] for ele in results["aggregations"]["values"]["values"]["buckets"] if ele['key']])
def get_values_from_es(es, index_name, doc_type_name, field_es_name, field_path):
if not field_path:
body_non_nested_template = """
{
"size": 0,
"aggs" : {
"values" : {
"terms" : { "field" : "%s", "size" : 30000 }
}
}
}
"""
body = body_non_nested_template % (field_es_name)
results = es.search(index=index_name, doc_type=doc_type_name, body=body, request_timeout=120)
return [ele['key'] for ele in results["aggregations"]["values"]["buckets"] if ele['key']]
elif field_path:
body_nested_template = """
{
"size": 0,
"aggs" : {
"values" : {
"nested" : {
"path" : "%s"
},
"aggs" : {
"values" : {"terms" : {"field" : "%s.%s", "size" : 30000}}
}
}
}
}
"""
body = body_nested_template % (field_path,
field_path,
field_es_name)
results = es.search(index=index_name, doc_type=doc_type_name, body=body, request_timeout=120)
return [ele['key'] for ele in results["aggregations"]["values"]["values"]["buckets"] if ele['key']]
def get_family_dict(es, index_name, doc_type_name):
family_ids = get_values_from_es(es, index_name, doc_type_name, 'Family_ID', 'sample')
family_dict = {}
body_template = """
{
"_source": false,
"size": 1,
"query": {
"nested": {
"path": "sample",
"score_mode": "none",
"query": {
"bool": {
"must" : [{"term": { "sample.Family_ID": "%s"}},
{"exists": { "field": "sample.Father_ID"}},
{"exists": { "field": "sample.Mother_ID"}}
]
}
},
"inner_hits": {}
}
}
}
"""
family_dict = {}
for family_id in family_ids:
body = body_template % (family_id)
results = es.search(index=index_name, doc_type=doc_type_name, body=body, request_timeout=120)
result = results['hits']['hits'][0]['inner_hits']['sample']['hits']['hits'][0]["_source"]
father_id = result.get('Father_ID')
mother_id = result.get('Mother_ID')
child_id = result.get('Sample_ID')
child_sex = result.get('Sex')
family_dict[family_id] = {'father_id': father_id,
'mother_id': mother_id, 'child_id': child_id, 'child_sex': child_sex}
return family_dict
def pop_sample_with_id(sample_array, sample_id):
saved_index = 0
for index, sample in enumerate(sample_array):
if sample.get('Sample_ID') == sample_id:
saved_index = index
sample = sample_array.pop(saved_index)
return sample
def pop_sample_with_id_apply_compound_het_rules(sample_array, sample_id):
saved_index = 0
for index, sample in enumerate(sample_array):
if sample.get('Sample_ID') == sample_id:
saved_index = index
sample = sample_array.pop(saved_index)
if (sample.get('Mother_Genotype') in ["0/1", "0|1", "1|0"] and
sample.get('Father_Genotype') in ["0/0", "0|0"]):
return sample
elif (sample.get('Mother_Genotype') in ["0/0", "0|0"] and
sample.get('Father_Genotype') in ["0/1", "0|1", "1|0"]):
return sample
return None
def are_variants_compound_heterozygous(variants):
compound_heterozygous_found = False
gt_pair_whose_reverse_to_find = None
compound_heterozygous_variants = []
for variant in variants:
father_gt = variant.get('Father_Genotype')
mother_gt = variant.get('Mother_Genotype')
sum_digits = sum([int(char)
for char in father_gt + mother_gt if char.isdigit()])
if sum_digits != 1:
continue
if not gt_pair_whose_reverse_to_find:
gt_pair_whose_reverse_to_find = [father_gt, mother_gt]
compound_heterozygous_variants.append(variant)
continue
current_gt_pair = [father_gt, mother_gt]
current_gt_pair.reverse()
if gt_pair_whose_reverse_to_find == current_gt_pair:
compound_heterozygous_variants.append(variant)
compound_heterozygous_found = True
if compound_heterozygous_found:
return compound_heterozygous_variants
else:
return False
def annotate_autosomal_recessive(es, index_name, doc_type_name, family_dict, annotation):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
if annotation == 'vep':
query_body = autosomal_recessive_vep_query_body_template % (child_id)
elif annotation == 'annovar':
query_body = autosomal_recessive_annovar_query_body_template % (child_id)
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'autosomal_recessive' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
to_update = False
if mendelian_diseases:
if 'autosomal_recessive' not in mendelian_diseases:
mendelian_diseases.append('autosomal_recessive')
to_update = True
else:
to_update = True
sample['mendelian_diseases'] = ['autosomal_recessive']
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} autosomal_recessive samples'.format(len(list(set(sample_matched)))))
def annotate_denovo(es, index_name, doc_type_name, family_dict):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
query_body = denovo_query_body_template % (child_id)
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'denovo' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
to_update = False
if mendelian_diseases:
if 'denovo' not in mendelian_diseases:
mendelian_diseases.append('denovo')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['denovo']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} denovo samples'.format(len(list(set(sample_matched)))))
def annotate_autosomal_dominant(es, index_name, doc_type_name, family_dict):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
query_body = autosomal_dominant_query_body_template % (child_id)
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
# pprint.pprint(hit["_source"])
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
mendelian_diseases = sample.get('mendelian_diseases', [])
tmp_id = es_id + child_id
if 'autosomal_dominant' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
if is_autosomal_dominant(sample):
to_update = False
if mendelian_diseases:
if 'autosomal_dominant' not in mendelian_diseases:
mendelian_diseases.append('autosomal_dominant')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['autosomal_dominant']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} autosomal dominant samples'.format(len(list(set(sample_matched)))))
range_rules = {
'hg19/GRCh37': ([60001, 2699520], [154931044, 155260560]),
'hg38/GRCh38': ([10001, 2781479], [155701383, 156030895])
}
24, 382, 427
def annotate_x_linked_dominant(es, index_name, doc_type_name, family_dict):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
query_body = x_linked_dominant_query_body_template % (
child_id,
range_rules['hg19/GRCh37'][0][0],
range_rules['hg19/GRCh37'][0][1],
range_rules['hg19/GRCh37'][1][0],
range_rules['hg19/GRCh37'][1][1])
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
# pprint.pprint(hit["_source"])
es_id = hit['_id']
# print(es_id)
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'x_linked_dominant' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
if is_x_linked_dominant(sample):
to_update = False
if mendelian_diseases:
if 'x_linked_dominant' not in mendelian_diseases:
mendelian_diseases.append('x_linked_dominant')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['x_linked_dominant']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} x_linked_dominant samples'.format(len(list(set(sample_matched)))))
def annotate_x_linked_recessive(es, index_name, doc_type_name, family_dict, annotation):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
if annotation == 'vep':
query_body = x_linked_recessive_vep_query_body_template % (
child_id,
range_rules['hg19/GRCh37'][0][0],
range_rules['hg19/GRCh37'][0][1],
range_rules['hg19/GRCh37'][1][0],
range_rules['hg19/GRCh37'][1][1]
)
elif annotation == 'annovar':
query_body = x_linked_recessive_annovar_query_body_template % (
child_id,
range_rules['hg19/GRCh37'][0][0],
range_rules['hg19/GRCh37'][0][1],
range_rules['hg19/GRCh37'][1][0],
range_rules['hg19/GRCh37'][1][1]
)
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
# pprint.pprint(hit["_source"])
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'x_linked_recessive' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
if is_x_linked_recessive(sample):
# sample['mendelian_diseases'] = 'x_linked_recessive'
to_update = False
if mendelian_diseases:
if 'x_linked_recessive' not in mendelian_diseases:
mendelian_diseases.append('x_linked_recessive')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['x_linked_recessive']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
# if to_update:
# es.update(index=index_name, doc_type=doc_type_name, id=es_id,
# body={"doc": {"sample": sample_array}})
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} x_linked_recessive samples'.format(len(list(set(sample_matched)))))
def annotate_x_linked_denovo(es, index_name, doc_type_name, family_dict):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
query_body = x_linked_de_novo_query_body_template % (
child_id,
range_rules['hg19/GRCh37'][0][0],
range_rules['hg19/GRCh37'][0][1],
range_rules['hg19/GRCh37'][1][0],
range_rules['hg19/GRCh37'][1][1])
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
# pprint.pprint(hit["_source"])
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'x_linked_denovo' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
if is_x_linked_denovo(sample):
mendelian_diseases = sample.get('mendelian_diseases')
to_update = False
if mendelian_diseases:
if 'x_linked_denovo' not in mendelian_diseases:
mendelian_diseases.append('x_linked_denovo')
print(type(mendelian_diseases), es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['x_linked_denovo']
to_update = True
tmp_id = es_id + child_id
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} x_linked_denovo samples'.format(len(list(set(sample_matched)))))
def annotate_compound_heterozygous(es, index_name, doc_type_name, family_dict, annotation):
sample_matched = []
for family_id, family in family_dict.items():
child_id = family.get('child_id')
if annotation == 'vep':
genes = get_vep_genes_from_es_for_compound_heterozygous(es, index_name, doc_type_name)
elif annotation == 'annovar':
genes = get_annovar_genes_from_es_for_compound_heterozygous(es, index_name, doc_type_name)
for gene in genes:
if annotation == 'vep':
query_body = compound_heterozygous_vep_query_body_template % (child_id, gene)
elif annotation == 'annovar':
query_body = compound_heterozygous_annovar_query_body_template % (child_id, gene)
query_body = json.loads(query_body)
samples = []
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id_apply_compound_het_rules(sample_array, child_id)
if not sample:
continue
sample.update({'es_id': es_id})
samples.append(sample)
actions = []
count = 0
if len(samples) > 1 and are_variants_compound_heterozygous(samples):
for sample in samples:
es_id = sample.pop("es_id")
es_document = es.get(index_name, doc_type_name, es_id)
sample_array = es_document["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
mendelian_diseases = sample.get('mendelian_diseases', [])
tmp_id = es_id + child_id
if 'compound_heterozygous' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
to_update = False
if mendelian_diseases:
if 'compound_heterozygous' not in mendelian_diseases:
mendelian_diseases.append('compound_heterozygous')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['compound_heterozygous']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} compound_heterozygous samples'.format(len(list(set(sample_matched)))))
def main():
import datetime
index_name = "ashkenazitrio4families"
doc_type_name = "ashkenazitrio4families_"
annotation = 'vep'
es = elasticsearch.Elasticsearch(host='172.16.58.3', port=9200)
family_dict = get_family_dict(es, index_name, doc_type_name)
pprint.pprint(family_dict)
all_start_time = datetime.datetime.now()
start_time = datetime.datetime.now()
print('Starting annotate_autosomal_recessive', start_time)
annotate_autosomal_recessive(es, index_name, doc_type_name, family_dict, annotation)
print('Finished annotate_autosomal_recessive', int((datetime.datetime.now() - start_time).total_seconds()), 'seconds')
start_time = datetime.datetime.now()
print('Starting annotate_denovo', start_time)
annotate_denovo(es, index_name, doc_type_name, family_dict)
print('Finished annotate_denovo', int((datetime.datetime.now() - start_time).total_seconds()), 'seconds')
start_time = datetime.datetime.now()
print('Starting annotate_autosomal_dominant', start_time)
annotate_autosomal_dominant(es, index_name, doc_type_name, family_dict)
print('Finished annotate_autosomal_dominant', int((datetime.datetime.now() - start_time).total_seconds()), 'seconds')
start_time = datetime.datetime.now()
print('Starting annotate_x_linked_dominant', start_time)
annotate_x_linked_dominant(es, index_name, doc_type_name, family_dict)
print('Finished annotate_x_linked_dominant', int((datetime.datetime.now() - start_time).total_seconds()), 'seconds')
start_time = datetime.datetime.now()
print('Starting annotate_x_linked_recessive', start_time)
annotate_x_linked_recessive(es, index_name, doc_type_name, family_dict, annotation)
print('Finished annotate_x_linked_recessive', int((datetime.datetime.now() - start_time).total_seconds()), 'seconds')
start_time = datetime.datetime.now()
print('Starting annotate_x_linked_denovo', start_time)
annotate_x_linked_denovo(es, index_name, doc_type_name, family_dict)
print('Finished annotate_x_linked_denovo', int((datetime.datetime.now() - start_time).total_seconds()), 'seconds')
start_time = datetime.datetime.now()
print('Starting annotate_compound_heterozygous', start_time)
annotate_compound_heterozygous(es, index_name, doc_type_name, family_dict, annotation)
print('Finished annotate_compound_heterozygous', int(
(datetime.datetime.now() - start_time).total_seconds()), 'seconds')
print('Finished annotating all in ', int((datetime.datetime.now() - all_start_time).total_seconds()), 'seconds')
if __name__ == "__main__":
main()
```
#### File: GenEsysV/utils/autosomal_dominant.py
```python
import json
from pprint import pprint
from elasticsearch import Elasticsearch, helpers
es = Elasticsearch(['http://199.109.193.178:9200/'])
doc = {
'size' : 10000,
'query': {
'match_all' : {}
}
}
# doc = {
# 'size' : 10000,
# "query": {
# "nested" : {
# "path" : "sample",
# "query" : {
# "bool" : {
# "filter" : [
# { "match" : {"sample.sample_id" : "1805"} }
# { "match" : {"sample.GT" : "1805"} }
# ]
# }
# }
# }
# }
# }
query_string = """
{
"_source": ["Variant", "sample"],
"query": {
"nested" : {
"path" : "sample",
"score_mode" : "none",
"query" : {
"bool" : {
"filter" : [
{ "term" : {"sample.Sample_ID" : "%s"} },
{ "terms" : {"sample.GT" : ["0/1", "0|1", "1|0"] }}
]
}
}
}
}
}
"""
"""
Mendelian - de novo mutations selected:
Filter by this rule: mother_genotype == '0/0' or '0|0',
father_genotype == '0/0' or '0|0',
affected child_genotype == '0/1' or '0|1' or '1|0'.
"""
mother_id = "1805"
father_id = "1847"
child_id = "4805"
def is_autosomal_dominant(sample_array, father_id, mother_id, child_id):
looking_for_ids = (father_id, mother_id, child_id)
mother_gt = father_gt = child_gt = 'N/A'
for ele in sample_array:
sample_id = ele.get('Sample_ID')
if sample_id not in looking_for_ids:
continue
if sample_id == father_id:
father_gt = ele.get('GT')
elif sample_id == mother_id:
mother_gt = ele.get('GT')
elif sample_id == child_id:
child_gt = ele.get('GT')
if child_gt not in ['0/1', '0|1', '1|0']:
return None
if not all( (father_gt, mother_gt, child_gt)):
return None
case_1 = case_2 = False
# Case 1
"""
mother_genotype == '0/1' or '0|1' or '1|0',
father_genotype == '0/0' or '0|0',
affected child_genotype == '0/1' or '0|1' or '1|0'.
"""
if mother_gt in ['0/1', '0|1', '1|0',] and father_gt == 'N/A':
case_1 = True
# Case 2
"""
mother_genotype == '0/0' or '0|0',
father_genotype == '0/1' or '0|1' or '1|0',
affected child_genotype == '0/1' or '0|1' or '1|0'.
"""
if mother_gt == 'N/A' and father_gt in ['0/1', '0|1', '1|0',]:
case_2 = True
if child_gt == 'N/A':
print('How did this happen?')
pprint(sample_array)
return None
if any((case_1, case_2)):
return (father_gt, mother_gt, child_gt)
else:
return None
query = json.loads(query_string % (child_id))
autosomal_dominant_count = 0
total = 0
for ele in helpers.scan(es,
query=query,
scroll=u'5m',
size=10000,
preserve_order=False,
index='test1',
doc_type='test1_'):
result = ele['_source']
autosomal_dominant = is_autosomal_dominant(result.get('sample'), father_id, mother_id, child_id)
if autosomal_dominant:
autosomal_dominant_count += 1
# print(autosomal_dominant_count, autosomal_dominant)
total += 1
print('Total Candidates: ', total, 'autosomal_dominant: ', autosomal_dominant_count)
``` |
{
"source": "jianxinwei/privacy",
"score": 2
} |
#### File: privacy/keras_models/dp_keras_model.py
```python
import tensorflow as tf
def make_dp_model_class(cls):
"""Given a subclass of `tf.keras.Model`, returns a DP-SGD version of it."""
class DPModelClass(cls):
"""A DP version of `cls`, which should be a subclass of `tf.keras.Model`."""
def __init__(
self,
l2_norm_clip,
noise_multiplier,
use_xla=True,
*args, # pylint: disable=keyword-arg-before-vararg, g-doc-args
**kwargs):
"""Initializes the DPModelClass.
Args:
l2_norm_clip: Clipping norm (max L2 norm of per microbatch
gradients).
noise_multiplier: Ratio of the standard deviation to the clipping
norm.
use_xla: If True, compiles train_step to XLA.
"""
super(DPModelClass, self).__init__(*args, **kwargs)
self._l2_norm_clip = l2_norm_clip
self._noise_multiplier = noise_multiplier
if use_xla:
self.train_step = tf.function(
self.train_step, experimental_compile=True)
def _process_per_example_grads(self, grads):
grads_flat = tf.nest.flatten(grads)
squared_l2_norms = [
tf.reduce_sum(input_tensor=tf.square(g)) for g in grads_flat
]
global_norm = tf.sqrt(tf.add_n(squared_l2_norms))
div = tf.maximum(global_norm / self._l2_norm_clip, 1.)
clipped_flat = [g / div for g in grads_flat]
return tf.nest.pack_sequence_as(grads, clipped_flat)
def _reduce_per_example_grads(self, stacked_grads):
summed_grads = tf.reduce_sum(input_tensor=stacked_grads, axis=0)
noise_stddev = self._l2_norm_clip * self._noise_multiplier
noise = tf.random.normal(
tf.shape(input=summed_grads), stddev=noise_stddev)
noised_grads = summed_grads + noise
return noised_grads / tf.cast(stacked_grads.shape[0], noised_grads.dtype)
def _compute_per_example_grads(self, data):
x, y = data
with tf.GradientTape() as tape:
# We need to add the extra dimension to x and y because model
# expects batched input.
y_pred = self(x[None], training=True)
loss = self.compiled_loss(
y[None], y_pred, regularization_losses=self.losses)
grads_list = tape.gradient(loss, self.trainable_variables)
clipped_grads = self._process_per_example_grads(grads_list)
return tf.squeeze(y_pred, axis=0), loss, clipped_grads
def train_step(self, data):
_, y = data
y_pred, _, per_eg_grads = tf.vectorized_map(
self._compute_per_example_grads, data)
grads = tf.nest.map_structure(self._reduce_per_example_grads,
per_eg_grads)
self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
self.compiled_metrics.update_state(y, y_pred)
return {m.name: m.result() for m in self.metrics}
return DPModelClass
DPModel = make_dp_model_class(tf.keras.Model)
DPSequential = make_dp_model_class(tf.keras.Sequential)
``` |
{
"source": "JianXinyu/Gadgets",
"score": 3
} |
#### File: Gadgets/NL RoadTest Booking Script/appointment.py
```python
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
import time
from playsound import playsound
def fill(driver):
first_name = driver.find_element_by_id('firstNameControl')
first_name.send_keys('XXX')
first_name.send_keys(Keys.ENTER)
last_name = driver.find_element_by_id('lastNameControl')
last_name.send_keys('XXX')
last_name.send_keys(Keys.ENTER)
email = driver.find_element_by_id('emailControl')
email.send_keys('<EMAIL>')
email.send_keys(Keys.ENTER)
cemail = driver.find_element_by_id('confirmEmailControl')
cemail.send_keys('<EMAIL>')
cemail.send_keys(Keys.ENTER)
phone = driver.find_element_by_id('phoneControl')
phone.send_keys('XXX')
phone.send_keys(Keys.ENTER)
book_button_xpath = "//*[@id='pageScheduleDetails']/table/tbody/tr[17]/td[2]/div/a"
book_button = driver.find_element_by_xpath(book_button_xpath)
book_button.click()
def check(driver, location):
# switch to iframe
iframe = driver.find_elements_by_tag_name('iframe')[0]
driver.switch_to.frame(iframe)
# start booking
button = driver.find_element_by_id("bookOrFindBook")
button.click()
# https://seleniumwithjavapython.wordpress.com/selenium-with-python/basics-of-webdriver/handling-dropdowns/
s1 = Select(driver.find_element_by_id('firstSelectControl'))
s1.select_by_visible_text(location)
s2 = Select(driver.find_element_by_id('secondSelectControl'))
time.sleep(1)
try:
s2.select_by_visible_text('Road Test - Passenger (Class 05)')
# go to calender area
# https://www.lambdatest.com/blog/how-to-automate-calendar-using-selenium-webdriver-for-testing/
calender = driver.find_element_by_class_name('ui-datepicker-calendar')
# get month
month = driver.find_element_by_class_name('ui-datepicker-month')
# find the first avaliable day
day = calender.find_element_by_xpath('//td[@data-handler="selectDay"]')
print('location: %s, time: %s %s' % (location, month.text, day.text))
# add your own condition to auto book
if month.text == 'June' or (month.text == 'July' and int(day.text) <= 20):
playsound('bell.wav')
# don't know why I have to scroll down to click the day
calender.location_once_scrolled_into_view
day.click()
# select the first avaliable time
dtime = Select(driver.find_element_by_id('timeControl'))
dtime.select_by_index(1)
# Next button, not sure why other methods don't work
next_button_xpath = "//*[@id='pageScheduleTime']/table/tbody/tr[8]/td[2]/div/a"
next_button = driver.find_element_by_xpath(next_button_xpath)
next_button.click()
# fill text boxes
fill(driver)
time.sleep(10)
except Exception:
pass
driver.get(driver.current_url)
time.sleep(1)
driver.refresh()
if __name__ == "__main__":
driver = webdriver.Firefox(executable_path='./geckodriver')
driver.get("https://www.mrdappointments.gov.nl.ca/qwebbook/index.html")
while True:
# choose your city
check(driver, 'Clarenville MRD')
check(driver, 'Harbour Grace MRD')
check(driver, 'Mount Pearl MRD')
time.sleep(60)
driver.close()
``` |
{
"source": "jianxiongcai/HelloSLAM",
"score": 3
} |
#### File: HelloSLAM/line_extrator/extract.py
```python
import os;
def remove_ext(p):
print(p);
res = "";
for k in range(len(p)):
if (p[len(p)-k-1]=="."):
break;
for i in range(len(p)-k-1):
res += p[i];
return res;
import os;
data_path = "../data";
folder = "rgb";
folder_path = data_path + "/" + folder;
print("Checking folder: "+ folder_path)
os.mkdir( data_path+"/lines-raw/");
if (os.path.isdir(folder_path)):
for image in os.listdir(folder_path):
image_path = folder_path+"/"+image;
saving_path = remove_ext(data_path+"/lines-raw/"+image);
cmd = "./lsd " + " -P " + saving_path +".esp " + image_path + " " +saving_path + ".txt";
print(cmd);
os.system(cmd);
``` |
{
"source": "jianxiongcai/tensorboardX",
"score": 2
} |
#### File: tensorboardX/tests/test_summary.py
```python
from tensorboardX import summary
from .expect_reader import compare_proto
import numpy as np
import pytest
import unittest
np.random.seed(0)
# compare_proto = write_proto # massive update expect
class SummaryTest(unittest.TestCase):
def test_uint8_image(self):
'''
Tests that uint8 image (pixel values in [0, 255]) is not changed
'''
test_image = np.random.randint(0, 256, size=(3, 32, 32), dtype=np.uint8)
scale_factor = summary._calc_scale_factor(test_image)
assert scale_factor == 1, 'Values are already in [0, 255], scale factor should be 1'
def test_float32_image(self):
'''
Tests that float32 image (pixel values in [0, 1]) are scaled correctly
to [0, 255]
'''
test_image = np.random.rand(3, 32, 32).astype(np.float32)
scale_factor = summary._calc_scale_factor(test_image)
assert scale_factor == 255, 'Values are in [0, 1], scale factor should be 255'
def test_list_input(self):
with pytest.raises(Exception) as e_info:
summary.histogram('dummy', [1,3,4,5,6], 'tensorflow')
def test_empty_input(self):
print('expect error here:')
with pytest.raises(Exception) as e_info:
summary.histogram('dummy', np.ndarray(0), 'tensorflow')
def test_image_with_boxes(self):
compare_proto(summary.image_boxes('dummy',
np.random.rand(3, 32, 32).astype(np.float32),
np.array([[10, 10, 40, 40]])), self)
def test_image_with_one_channel(self):
np.random.seed(0)
compare_proto(summary.image('dummy', np.random.rand(1, 8, 8).astype(np.float32), dataformats='CHW'), self)
def test_image_with_one_channel_batched(self):
np.random.seed(0)
compare_proto(summary.image('dummy', np.random.rand(2, 1, 8, 8).astype(np.float32), dataformats='NCHW'), self)
def test_image_with_3_channel_batched(self):
np.random.seed(0)
compare_proto(summary.image('dummy', np.random.rand(2, 3, 8, 8).astype(np.float32), dataformats='NCHW'), self)
def test_image_without_channel(self):
np.random.seed(0)
compare_proto(summary.image('dummy', np.random.rand(8, 8).astype(np.float32), dataformats='HW'), self)
def test_video(self):
try:
import moviepy
except ImportError:
return
np.random.seed(0)
compare_proto(summary.video('dummy', np.random.rand(4, 3, 1, 8, 8).astype(np.float32)), self)
summary.video('dummy', np.random.rand(16, 48, 1, 28, 28).astype(np.float32))
#summary.video('dummy', np.random.rand(20, 7, 1, 8, 8).astype(np.float32))
def test_audio(self):
np.random.seed(0)
compare_proto(summary.audio('dummy', np.random.rand(42)), self)
def test_text(self):
compare_proto(summary.text('dummy', 'text 123'), self)
def test_histogram_auto(self):
np.random.seed(0)
compare_proto(summary.histogram('dummy', np.random.rand(1024), bins='auto', max_bins=5), self)
def test_histogram_fd(self):
np.random.seed(0)
compare_proto(summary.histogram('dummy', np.random.rand(1024), bins='fd', max_bins=5), self)
def test_histogram_doane(self):
np.random.seed(0)
compare_proto(summary.histogram('dummy', np.random.rand(1024), bins='doane', max_bins=5), self)
``` |
{
"source": "Jianxun-Wang/Physics-constrained-Bayesian-deep-learning",
"score": 2
} |
#### File: Physics-constrained-Bayesian-deep-learning/code/BayesNN.py
```python
import numpy as np
from scipy.spatial.distance import pdist, squareform
import copy
import math
# plotting
import pandas as pd
import matplotlib.pylab as plt
import matplotlib.ticker as ticker
# system
from time import time
import sys
import os
import gc
import pdb
import subprocess # Call the command line
from subprocess import call
# torch import
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import normalize # noqa: F401
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch.distributions import Gamma
from torch.optim.lr_scheduler import ReduceLROnPlateau
import matplotlib.ticker as ticker
# local import
from FCN import Net
from args import args, device
class BayesNN(nn.Module):
"""Define Bayesian netowrk
"""
def __init__(self, model, n_samples=2, noise=1e-6):
super(BayesNN, self).__init__()
if not isinstance(model, nn.Module):
raise TypeError("model {} is not a Module subclass".format(
torch.typename(model)))
self.n_samples = n_samples # number of particles (# of perturbed NN)
# w_i ~ StudentT(w_i | mu=0, lambda=shape/rate, nu=2*shape)
# for efficiency, represent StudentT params using Gamma params
# Nick shape = 0.5, rate = 10
self.w_prior_shape = 1.
self.w_prior_rate = 0.05
# noise variance 1e-6: beta ~ Gamma(beta | shape, rate)
## Nick shape = 10 rate = 10*C0*args*.t**k0 = 10*0.2*0.005^3 = 2.5e-7
self.beta_prior_shape = (2.)
self.beta_prior_rate = noise
print('noise is',noise)
################
# for the equation loglilihood
self.var_eq = 1e-4
self.lamda = np.ones((1,))
## for stenosis hard use
self.mu = args.mu
self.sigma = args.sigma
self.scale = args.scale
self.nu = args.nu
self.rho = args.rho
self.rInlet = args.rInlet
self.xStart = args.xStart
self.xEnd = args.xEnd
self.dP = args.dP
self.L = args.L
##
################
# replicate `n_samples` instances with the same network as `model`
instances = []
for i in range(n_samples):
new_instance = copy.deepcopy(model)
#new_instance = Net(1, 20)
# initialize each model instance with their defualt initialization
# instead of the prior
#new_instance.reset_parameters()
def init_normal(m):
if type(m) == nn.Linear:
nn.init.kaiming_normal_(m.weight)
new_instance.apply(init_normal)
print('Reset parameters in model instance {}'.format(i))
instances.append(new_instance)
#t.sleep(100)
self.nnets = nn.ModuleList(instances)
#del instances # delete instances
# log precision (Gamma) of Gaussian noise
## change to a constant log_beta
log_beta = Gamma(self.beta_prior_shape,
self.beta_prior_rate).sample((self.n_samples,)).log().to(device)
##
## beta equals to 10^3, 1/beta = 1e-3
#log_beta = (1/noise*torch.ones(self.n_samples,)).log()
print('log_beta is',log_beta)
for i in range(n_samples):
self.nnets[i].log_beta = Parameter(log_beta[i])
print('log_beta grad is',self.nnets[i].log_beta.requires_grad)
self.nnets[i].beta_eq = 1/self.var_eq
print('Total number of parameters: {}'.format(self._num_parameters()))
def _num_parameters(self):
count = 0
for name, param in self.named_parameters():
# print(name)
count += param.numel()
return count
def __getitem__(self, idx):
return self.nnets[idx]
@property
def log_beta(self):
return torch.tensor([self.nnets[i].log_beta.item()
for i in range(self.n_samples)], device=device)
def forward(self, inputs):
output = []
for i in range(self.n_samples):
output.append(self.nnets[i].forward(inputs))
output = torch.stack(output)
return output
def _log_joint(self, index, output, target, outputb, outputin, targetin, outputout, targetout, ntrain):
"""Log joint probability or unnormalized posterior for single model
instance. Ignoring constant terms for efficiency.
Can be implemented in batch computation, but memory is the bottleneck.
Thus here we trade computation for memory, e.g. using for loop.
Args:
index (int): model index, 0, 1, ..., `n_samples`
output (Tensor): y_pred
target (Tensor): y
ntrain (int): total number of training data, mini-batch is used to
evaluate the log joint prob
Returns:
Log joint probability (zero-dim tensor)
"""
# Normal(target | output, 1 / beta * I)
#print('output.size = ',output.size(0))
n_train = output.size(0)
targetb = torch.zeros_like(outputb)
### u loglikelihood
## sparse domain data
# u
log_likelihood = ntrain / output.size(0) * (
- 0.5 * self.nnets[index].log_beta.exp()
* ((target[:,0:2] - output[:,0:2]).pow(2).sum()
+ 0.5 * target[:,0:2].numel() * self.nnets[index].log_beta))
# v
#log_likelihood = ntrain / output.size(0) * (
#- 0.5 * self.nnets[index].log_beta.exp()
#* ((target[:,1] - output[:,1]).pow(2).sum()
#+ 0.5 * target[:,1].numel() * self.nnets[index].log_beta))
## boundary data
# u
log_likelihood += ntrain / outputb.size(0) * (
- 0.5 * self.nnets[index].log_beta.exp()
* ((targetb[:,0:2] - outputb[:,0:2]).pow(2).sum()
+ 0.5 * targetb[:,0:2].numel() * self.nnets[index].log_beta))
# inlet data
# u
log_likelihood += ntrain / outputin.size(0) * (
- 0.5 * self.nnets[index].log_beta.exp()
* ((targetin[:,0:2] - outputin[:,0:2]).pow(2).sum()
+ 0.5 * targetin[:,0:2].numel() * self.nnets[index].log_beta))
## outlet data
# u
log_likelihood += ntrain/outputout.size(0) * (
- 0.5 * self.nnets[index].log_beta.exp()
* ((targetout[:,0:2] - outputout[:,0:2]).pow(2).sum()
+ 0.5 * targetout[:,0:2].numel() * self.nnets[index].log_beta))
# log prob of prior of weights, i.e. log prob of studentT
log_prob_prior_w = torch.tensor(0.).to(device)
for param in self.nnets[index].features.parameters():
log_prob_prior_w += torch.log1p(0.5 / self.w_prior_rate * param.pow(2)).sum()
log_prob_prior_w *= -(self.w_prior_shape + 0.5)
# log prob of prior of log noise-precision (NOT noise precision)
# noise prior (beta_shape - 1)*log_beta
log_prob_prior_log_beta = ((self.beta_prior_shape-1) * self.nnets[index].log_beta- self.beta_prior_rate*self.nnets[index].log_beta.exp() )
return log_likelihood + log_prob_prior_w + log_prob_prior_log_beta
def _log_likeEq(self,index,u,u_x,u_t,u_xx,ntrain):
#u = output[:,0]
u = u.view(len(u),-1)
'''
x = inputs[:,0]
t = inputs[:,1]
u = output[:,0]
x,t,u = x.view(len(x),-1),t.view(len(t),-1),u.view(len(u),-1)
u_x = torch.autograd.grad(u,x,grad_outputs=torch.ones_like(x),create_graph = True,only_inputs=True)[0]
u_t = torch.autograd.grad(u,t,grad_outputs=torch.ones_like(x),create_graph = True,only_inputs=True)[0]
u_xx = torch.autograd.grad(u_x,x,grad_outputs=torch.ones_like(x),create_graph = True,only_inputs=True)[0]
'''
mu = 0
res = u_t + u*u_x
#print('res is',res)
#print('equation output.size is',output.size(0))
'''
log_likelihood = ntrain / output.size(0) * (
- 0.5 * self.nnets[index].beta_eq#* (self.nnets[index].log_beta.exp()/ self.nnets[index].log_beta.exp())
* (res - 0).pow(2).sum()
)
'''
loss_f = nn.MSELoss()
loss = loss_f(res,torch.zeros_like(res))
#print('log_likelihood is',log_likelihood)
return loss
#return log_likelihood
def _mse(self, index, output, target, ntrain):
loss_f = nn.MSELoss()
loss = loss_f(output,target)
#print('log_likelihood is',log_likelihood)
return loss
def criterion(self,index,x,y,ntrain):
x = torch.FloatTensor(x).to(device)
y = torch.FloatTensor(y).to(device)
x.requires_grad = True
y.requires_grad = True
net_in = torch.cat((x,y),1)
output = self.nnets[index].forward(net_in)
u = output[:,0]
v = output[:,1]
P = output[:,2]
u = u.view(len(u),-1)
v = v.view(len(v),-1)
P = P.view(len(P),-1)
# axisymetric
#R = self.scale * 1/np.sqrt(2*np.pi*self.sigma**2)*torch.exp(-(x-self.mu)**2/(2*self.sigma**2))
#h = self.rInlet - R
#u_hard = u*(h**2 - y**2)
#v_hard = (h**2 -y**2)*v
u_hard = u
v_hard = v
P_hard = (self.xStart-x)*0 + self.dP*(self.xEnd-x)/self.L + 0*y + (self.xStart - x)*(self.xEnd - x)*P
#P_hard = (-4*x**2+3*x+1)*dP +(xStart - x)*(xEnd - x)*P
u_x = torch.autograd.grad(u_hard,x,grad_outputs=torch.ones_like(x),create_graph = True,only_inputs=True)[0]
u_xx = torch.autograd.grad(u_x,x,grad_outputs=torch.ones_like(x),create_graph = True,only_inputs=True)[0]
u_y = torch.autograd.grad(u_hard,y,grad_outputs=torch.ones_like(y),create_graph = True,only_inputs=True)[0]
u_yy = torch.autograd.grad(u_y,y,grad_outputs=torch.ones_like(y),create_graph = True,only_inputs=True)[0]
P_x = torch.autograd.grad(P_hard,x,grad_outputs=torch.ones_like(x),create_graph = True,only_inputs=True)[0]
#P_xx = torch.autograd.grad(P_x,x,grad_outputs=torch.ones_like(x),create_graph = True,only_inputs=True)[0]
#print('type of nu is',nu.shape)
loss_1 = (u_hard*u_x+v_hard*u_y-self.nu*(u_xx+u_yy)+1/self.rho*P_x)
v_x = torch.autograd.grad(v_hard,x,grad_outputs=torch.ones_like(y),create_graph = True,only_inputs=True)[0]
v_xx = torch.autograd.grad(v_x,x,grad_outputs=torch.ones_like(y),create_graph = True,only_inputs=True)[0]
v_y = torch.autograd.grad(v_hard,y,grad_outputs=torch.ones_like(y),create_graph = True,only_inputs=True)[0]
v_yy = torch.autograd.grad(v_y,y,grad_outputs=torch.ones_like(y),create_graph = True,only_inputs=True)[0]
P_y = torch.autograd.grad(P_hard,y,grad_outputs=torch.ones_like(y),create_graph = True,only_inputs=True)[0]
#P_yy = torch.autograd.grad(P_y,y,grad_outputs=torch.ones_like(x),create_graph = True,allow_unused = True)[0]
loss_2 = (u_hard*v_x+v_hard*v_y - self.nu*(v_xx+v_yy)+1/self.rho*P_y)
#Main_deriv = torch.cat((u_x,u_xx,u_y,u_yy,P_x,v_x,v_xx,v_y,v_yy,P_y),1)
loss_3 = (u_x + v_y)
#loss_3 = u_x**2 + 2*u_y*v_x + v_y**2+1/rho*(P_xx + P_yy)
#loss_3 = loss_3*100
# MSE LOSS
#loss_f = nn.MSELoss()
#loss_f = nn.L1loss()
#ntrain = 50
logloss1 = ntrain / output.size(0) * (
- 0.5 * self.nnets[index].beta_eq#* (self.nnets[index].log_beta.exp()/ self.nnets[index].log_beta.exp())
* (loss_1 - torch.zeros_like(loss_1)).pow(2).sum()
)
logloss2 = ntrain / output.size(0) * (
- 0.5 * self.nnets[index].beta_eq#* (self.nnets[index].log_beta.exp()/ self.nnets[index].log_beta.exp())
* (loss_2 - torch.zeros_like(loss_2)).pow(2).sum()
)
logloss3 =ntrain / output.size(0) * (
- 0.5 * self.nnets[index].beta_eq#* (self.nnets[index].log_beta.exp()/ self.nnets[index].log_beta.exp())
* (loss_3 - torch.zeros_like(loss_3)).pow(2).sum()
)
'''
logloss1 = -self.nnets[index].beta_eq* (loss_1 - torch.zeros_like(loss_1)).pow(2).mean()
logloss2 = -self.nnets[index].beta_eq* (loss_2 - torch.zeros_like(loss_2)).pow(2).mean()
logloss3 = -self.nnets[index].beta_eq* (loss_3 - torch.zeros_like(loss_3)).pow(2).mean()
'''
#loss = loss_f(loss_1,torch.zeros_like(loss_1))+ loss_f(loss_2,torch.zeros_like(loss_2))+loss_f(loss_3,torch.zeros_like(loss_3))
#return loss
return (logloss1 + logloss2 + logloss3),loss_1,loss_2,loss_3
## implement hard constraint
def hard_constraint(self, inputs, output):
x= inputs[:,0]
y = inputs[:,1]
u = output[:,0]
v = output[:,1]
P = output[:,2]
x = x.view(len(x),-1)
y = y.view(len(y),-1)
u = u.view(len(u),-1)
v = v.view(len(v),-1)
P = P.view(len(P),-1)
# axisymetric
#R = self.scale * 1/np.sqrt(2*np.pi*self.sigma**2)*torch.exp(-(x-self.mu)**2/(2*self.sigma**2))
#h = self.rInlet - R
#u_hard = u*(h**2 - y**2)
#v_hard = (h**2 -y**2)*v
u_hard = u
v_hard = v
P_hard = (self.xStart-x)*0 + self.dP*(self.xEnd-x)/self.L + 0*y + (self.xStart - x)*(self.xEnd - x)*P
output_const = torch.cat((u_hard,v_hard,P_hard),1)
return output_const
#def predict(self, x_test):
### modified for burgers,
def predict(self, inputs):
"""
Predictive mean and variance at x_test. (only average over w and beta)
Args:
x_test (Tensor): [N, *], test input
"""
# S x N x oC x oH x oW
y = self.forward(inputs)
print('shape of output is',y.shape)
'''
x = inputs[:,0]
y = inputs[:,1]
x = x.view(len(x),-1)
y = y.view(len(y),-1)
u = y[:,0]
v = y[:,1]
P = y[:,2]
R = torch.FloatTensor(yUp)
u_hard = u*(R**2 - y**2)
v_hard = (R**2 -y**2)*v
P_hard = (self.xStart-x)*0 + self.dP*(self.xEnd-xt)/L + 0*y + (self.xStart - x)*(self.xEnd - x)*P
u_hard = u.view(len(u_hard),-1)
v_hard = v.view(len(v_hard),-1)
P_hard = P.view(len(P_hard),-1)
'''
#y_pred_mean = y.mean(0)
# compute predictive variance per pixel
# N x oC x oH x oW
#EyyT = (y ** 2).mean(0)
#EyEyT = y_pred_mean ** 2
#beta_inv = (- self.log_beta).exp()
#y_pred_var = beta_inv.mean() + EyyT - EyEyT
return y
'''
def predict(self, x_test):
"""
Predictive mean and variance at x_test. (only average over w and beta)
Args:
x_test (Tensor): [N, *], test input
"""
# S x N x oC x oH x oW
y = self.forward(x_test)
y_pred_mean = y.mean(0)
# compute predictive variance per pixel
# N x oC x oH x oW
EyyT = (y ** 2).mean(0)
EyEyT = y_pred_mean ** 2
beta_inv = (- self.log_beta).exp()
y_pred_var = beta_inv.mean() + EyyT - EyEyT
return y_pred_mean, y_pred_var
def propagate(self, mc_loader):
"""
Mean and Variance statistics of predictive output distribution
averaging over the input distribution, i.e. uncertainty propagation.
First compute the conditional predictive mean and var given realizations
of uncertain surrogate; then compute the statistics of those conditional
statistics.
Args:
mc_loader (torch.utils.data.DataLoader): dataloader for the Monte
Carlo data (10,000 is used in this work)
S: num of samples
M: num of data
D: output dimensions
"""
# First compute conditional statistics
# S x N x oC x oH x oW
# self.cpu()
# x_test = x_test.cpu()
# print('here')
# S x oC x oH x oW
output_size = mc_loader.dataset[0][1].size()
cond_Ey = torch.zeros(self.n_samples, *output_size, device=device)
cond_Eyy = torch.zeros_like(cond_Ey)
for _, (x_mc, _) in enumerate(mc_loader):
x_mc = x_mc.to(device)
# S x B x oC x oH x oW
y = self.forward(x_mc)
cond_Ey += y.mean(1)
cond_Eyy += y.pow(2).mean(1)
cond_Ey /= len(mc_loader)
cond_Eyy /= len(mc_loader)
beta_inv = (- self.log_beta).exp()
print('Noise variances: {}'.format(beta_inv))
y_cond_pred_var = cond_Eyy - cond_Ey ** 2 \
+ beta_inv.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
# compute statistics of conditional statistics
return cond_Ey.mean(0), cond_Ey.var(0), \
y_cond_pred_var.mean(0), y_cond_pred_var.var(0)
'''
``` |
{
"source": "Jianxun-Wang/PICNNSR",
"score": 2
} |
#### File: PICNNSR/demo0/dataset.py
```python
from torch.utils.data import Dataset, DataLoader
import pdb
import numpy as np
class VaryGeoDataset(Dataset):
"""docstring for hcubeMeshDataset"""
def __init__(self,MeshList):
self.MeshList=MeshList
def __len__(self):
return len(self.MeshList)
def __getitem__(self,idx):
mesh=self.MeshList[idx]
x=mesh.x
y=mesh.y
xi=mesh.xi
eta=mesh.eta
J=mesh.J_ho
Jinv=mesh.Jinv_ho
dxdxi=mesh.dxdxi_ho
dydxi=mesh.dydxi_ho
dxdeta=mesh.dxdeta_ho
dydeta=mesh.dydeta_ho
cord=np.zeros([2,x.shape[0],x.shape[1]])
cord[0,:,:]=x; cord[1,:,:]=y
InvariantInput=np.zeros([2,J.shape[0],J.shape[1]])
InvariantInput[0,:,:]=J
InvariantInput[1,:,:]=Jinv
return [InvariantInput,cord,xi,eta,J,
Jinv,dxdxi,dydxi,
dxdeta,dydeta]
```
#### File: PICNNSR/demo0/model.py
```python
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import pdb
torch.manual_seed(123)
class USCNNSep(nn.Module):
def __init__(self,h,nx,ny,nVarIn=1,nVarOut=1,initWay=None,k=5,s=1,p=2):
super(USCNNSep, self).__init__()
"""
Extract basic information
"""
self.initWay=initWay
self.nVarIn=nVarIn
self.nVarOut=nVarOut
self.k=k
self.s=1
self.p=2
self.deltaX=h
self.nx=nx
self.ny=ny
"""
Define net
"""
self.source=torch.tensor(torch.ones(1,47))*0.5
self.source =torch.nn.Parameter(self.source)
self.source.requires_grad = True
W1=16
W2=32
self.relu=nn.ReLU()
self.US=nn.Upsample(size=[self.ny-2,self.nx-2],mode='bicubic')
self.conv1=nn.Conv2d(self.nVarIn,W1,kernel_size=k, stride=s, padding=p)
self.conv2=nn.Conv2d(W1,W2,kernel_size=k, stride=s, padding=p)
self.conv3=nn.Conv2d(W2,W1,kernel_size=k, stride=s, padding=p)
self.conv4=nn.Conv2d(W1,self.nVarOut,kernel_size=k, stride=s, padding=p)
self.pixel_shuffle1 = nn.PixelShuffle(1)
self.conv11=nn.Conv2d(self.nVarIn,W1,kernel_size=k, stride=s, padding=p)
self.conv22=nn.Conv2d(W1,W2,kernel_size=k, stride=s, padding=p)
self.conv33=nn.Conv2d(W2,W1,kernel_size=k, stride=s, padding=p)
self.conv44=nn.Conv2d(W1,self.nVarOut,kernel_size=k, stride=s, padding=p)
self.pixel_shuffle11 = nn.PixelShuffle(1)
self.conv111=nn.Conv2d(self.nVarIn,W1,kernel_size=k, stride=s, padding=p)
self.conv222=nn.Conv2d(W1,W2,kernel_size=k, stride=s, padding=p)
self.conv333=nn.Conv2d(W2,W1,kernel_size=k, stride=s, padding=p)
self.conv444=nn.Conv2d(W1,self.nVarOut,kernel_size=k, stride=s, padding=p)
self.pixel_shuffle111 = nn.PixelShuffle(1)
if self.initWay is not None:
self._initialize_weights()
#Specify filter
dxiFilter=torch.Tensor([[[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[1., -8., 0., 8., -1.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]]]).to("cuda")/12./self.deltaX
self.convdxi=nn.Conv2d(1, 1, (5,5),stride=1, padding=0, bias=None)
self.convdxi.weight=nn.Parameter(dxiFilter, requires_grad=False)
detaFilter=torch.Tensor([[[[0., 0., 1., 0., 0.],
[0., 0., -8., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 8., 0., 0.],
[0., 0., -1., 0., 0.]]]]).to("cuda")/12./self.deltaX
self.convdeta=nn.Conv2d(1,1,(5,5),stride=1,padding=0,bias=None)
self.convdeta.weight=nn.Parameter(detaFilter,requires_grad=False)
lapFilter=torch.Tensor([[[[0., 0., -1., 0., 0.],
[0., 0., 16., 0., 0.],
[-1., 16., -60., 16., -1.],
[0., 0., 16., 0., 0.],
[0., 0., -1., 0., 0.]]]]).to("cuda")/12./self.deltaX/self.deltaX
self.convlap = nn.Conv2d(1, 1, (5,5),stride=1, padding=0, bias=None)
self.convlap.weight=nn.Parameter(lapFilter, requires_grad=False)
def forward(self, x):
x=self.US(x)
x1=self.relu(self.conv1(x))
x1=self.relu(self.conv2(x1))
x1=self.relu(self.conv3(x1))
x1=self.pixel_shuffle1(self.conv4(x1))
x2=self.relu(self.conv11(x))
x2=self.relu(self.conv22(x2))
x2=self.relu(self.conv33(x2))
x2=self.pixel_shuffle11(self.conv44(x2))
x3=self.relu(self.conv111(x))
x3=self.relu(self.conv222(x3))
x3=self.relu(self.conv333(x3))
x3=self.pixel_shuffle111(self.conv444(x3))
return torch.cat([x1,x2,x3],axis=1)
def _initialize_weights(self):
if self.initWay=='kaiming':
init.kaiming_normal_(self.conv1.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv2.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv3.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv4.weight)
init.kaiming_normal_(self.conv11.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv22.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv33.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv44.weight)
init.kaiming_normal_(self.conv111.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv222.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv333.weight, mode='fan_out', nonlinearity='relu')
init.kaiming_normal_(self.conv444.weight)
elif self.initWay=='ortho':
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight)
init.orthogonal_(self.conv11.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv22.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv33.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv44.weight)
init.orthogonal_(self.conv111.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv222.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv333.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv444.weight)
else:
print('Only Kaiming or Orthogonal initializer can be used!')
exit()
``` |
{
"source": "Jianyang-Hu/numpypractice",
"score": 3
} |
#### File: Jianyang-Hu/numpypractice/class_08_0405.py
```python
class Foo:
def __init__(self,name): #注意别写成了__int__
self.name = name
def show(self):
print("show")
#对象
obj = Foo("hu")
#反射,类,只能找类里的成员
# r = hasattr(Foo,'show')
# print(r)
#反射:对象,既可以找对象,也找类的成员
r = hasattr(obj,'name')
print(r)
r = hasattr(obj,'show')
print(r)
```
#### File: Jianyang-Hu/numpypractice/email_0320.py
```python
def email(p,text,subject):
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
msg = MIMEText(text,'plain','utf-8')
msg['From'] = formataddr(['胡杨','<EMAIL>'])
msg['To'] = formataddr('胡杨','<EMAIL>')
msg['Subject'] = subject
server = smtplib.SMTP('smtp.163.com',25)
server.login('<EMAIL>','xiaohubishen1991')
server.sendmail('<EMAIL>',[p, ],msg.as_string())
server.quit()
email('<EMAIL>','i love python')
```
#### File: Jianyang-Hu/numpypractice/images_0430.py
```python
from scipy import misc
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
#载入Lena图像,并使用灰度颜色表将其在子图中显示出来
image = misc.ascent().astype(np.float32)
plt.subplot(221)
plt.title("Original Image")
img = plt.imshow(image, cmap=plt.cm.gray)
plt.axis("off")
#中值滤波器扫描信号中的每一个信号,并替换为相邻数据点的中值
plt.subplot(222)
plt.title("Median Filter")
filtered = ndimage.median_filter(image, size=(42,42))
plt.imshow(filtered, cmap=plt.cm.gray)
plt.axis("off")
plt.subplot(223)
plt.title("Rotated")
rotated = ndimage.rotate(image, 90)
plt.imshow(rotated, cmap=plt.cm.gray)
plt.axis("off")
#Prewitt滤波器是基于图像强度的梯度计算
plt.subplot(224)
plt.title("Prewitt Filter")
filtered = ndimage.prewitt(image)
plt.imshow(filtered, cmap=plt.cm.gray)
plt.axis("off")
plt.show()
"""
python从网络读取图片并直接进行处理的方法:
python从网络读取图片并直接进行处理的方法。分享给大家供大家参考。具体实现方法如下:
下面的代码可以实现从网络读取一张图片,不需要保存为本地文件,直接通过Image模块对图片进行处理,
这里使用到了cStringIO库,主要是把从网络读取到的图片数据模拟成本地文件。
import urllib2
import Image
import cStringIO
def ImageScale(url,size):
file = cStringIO.StringIO(urllib2.urlopen(url).read())
img = Image.open(file)
img.show()
python 读取并显示图片的两种方法:
http://www.cnblogs.com/yinxiangnan-charles/p/5928689.html
"""
```
#### File: Jianyang-Hu/numpypractice/map_filter_0321.py
```python
it = [1, 3, 5, 7, 9]
def add(num):
return num + 2
rs = map(add,it)
print(list(rs)) #需要用list进行转换
rs2 = map(lambda x:x * x,[1,2,3,4,5])
print(list(rs2))
#filter filter()函数包括两个参数,分别是function和list。
# 该函数根据function参数返回的结果是否为真来过滤list参数中的项,最后返回一个新列表
li = [2,5,8,4,6,0]
print(list(filter(lambda x:x>3,li)))
#如果filter参数值为None,就使用identity()函数,list参数中所有为假的元素都将被删除。
b = filter(None,li)
print(list(b))
``` |
{
"source": "jianyangli/sbpy",
"score": 2
} |
#### File: gas/tests/test_prodrate_remote.py
```python
import os
import numpy as np
import astropy.units as u
from astropy.time import Time
from astropy.tests.helper import remote_data
from astropy.table import Table
from astroquery.lamda import Lamda
from astroquery.jplspec import JPLSpec
import pytest
from .. import (Haser, photo_timescale, LTE, NonLTE, einstein_coeff,
intensity_conversion, beta_factor, total_number, from_Haser)
from ....data import Ephem, Phys
class MockPyradex:
"""
Class to be the mock return value of NonLTE.from_pyradex
"""
def __init__(self):
"""
Define a testing dictionary
"""
self.value = {0: 1.134e14}
# monkeypatched NonLTE.from_pyradex
@pytest.fixture
def mock_nonlte(monkeypatch):
"""
from_pyradex.value mocked to return dictionary.
"""
def mock_cdensity(*args, **kwargs):
"""
Define a testing Quantity
"""
integrated_flux = args[1]
if integrated_flux == 0.26 * u.K * u.km / u.s:
return u.Quantity([2.17469686e+14], 1/u.cm**2)
elif integrated_flux == 0.27 * u.K * u.km / u.s:
return u.Quantity([2.25833905e+14], 1/u.cm**2)
elif integrated_flux == 0.28 * u.K * u.km / u.s:
return u.Quantity([2.34198124e+14], 1/u.cm**2)
elif integrated_flux == 1.234 * u.K * u.km / u.s:
return u.Quantity([1.134e14], 1/u.cm**2)
monkeypatch.setattr(NonLTE, "from_pyradex", mock_cdensity)
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@remote_data
def test_remote_prodrate_simple_hcn():
hcn = Table.read(data_path('HCN.csv'), format="ascii.csv")
temp_estimate = 47. * u.K
target = '103P'
vgas = 0.8 * u.km / u.s
aper = 30 * u.m # The aperture for telescope used (Drahus et al. 2012)
b = 1.13 # Value taken from (Drahus et al. 2012)
mol_tag = 27001
transition_freq = (265.886434 * u.GHz).to('MHz')
q_found = []
mol_data = Phys.from_jplspec(temp_estimate, transition_freq, mol_tag)
intl = intensity_conversion(mol_data)
mol_data.apply([intl.value] * intl.unit, name='intl')
au = einstein_coeff(mol_data)
mol_data.apply([au.value] * au.unit, name='eincoeff')
for i in range(0, 28):
time = Time(hcn['Time'][i], format='iso')
integrated_flux = hcn['T_B'][i] * u.K * u.km / u.s
ephemobj = Ephem.from_horizons(
target, epochs=time, id_type='designation',
closest_apparition=True)
lte = LTE()
q = lte.from_Drahus(integrated_flux, mol_data,
ephemobj, vgas, aper, b=b)
q = np.log10(q.value)
q_found.append(q)
q_pred = list(hcn['log(Q)'])
np.testing.assert_almost_equal(q_pred, q_found, decimal=1.3)
err = abs((np.array(q_pred) - np.array(q_found)) / np.array(q_pred) * 100)
assert np.all(err < 0.2345)
@remote_data
def test_remote_prodrate_simple_ch3oh():
ch3oh = Table.read(data_path('CH3OH.csv'), format="ascii.csv")
temp_estimate = 47. * u.K
target = '103P'
vgas = 0.8 * u.km / u.s
aper = 30 * u.m # The aperture for telescope used (Drahus et al. 2012)
b = 1.13 # Value taken from (Drahus et al. 2012)
mol_tag = 32003
transition_freq = (157.178987 * u.GHz).to('MHz')
q_found = []
mol_data = Phys.from_jplspec(temp_estimate, transition_freq, mol_tag)
intl = intensity_conversion(mol_data)
mol_data.apply([intl.value] * intl.unit, name='intl')
au = einstein_coeff(mol_data)
mol_data.apply([au.value] * au.unit, name='eincoeff')
for i in range(0, 20):
time = Time(ch3oh['Time'][i], format='iso')
integrated_flux = ch3oh['T_B'][i] * u.K * u.km / u.s
ephemobj = Ephem.from_horizons(target, epochs=time,
id_type='designation',
closest_apparition=True)
lte = LTE()
q = lte.from_Drahus(integrated_flux, mol_data,
ephemobj, vgas, aper, b=b)
q = np.log10(q.value)
q_found.append(q)
q_pred = list(ch3oh['log(Q)'])
err = abs((np.array(q_pred) - np.array(q_found)) / np.array(q_pred) * 100)
assert np.all(err < 0.35)
@remote_data
def test_einstein():
transition_freq_list = [(1611.7935180 * u.GHz).to('MHz'),
(177.26111120 * u.GHz).to('MHz')]
mol_tag_list = [28001, 27001]
temp_estimate = 300. * u.K
result = []
catalog_result = []
cat = JPLSpec.get_species_table()
for i in range(0, 2):
transition_freq = transition_freq_list[i]
mol_tag = mol_tag_list[i]
mol_data = Phys.from_jplspec(temp_estimate, transition_freq, mol_tag)
intl = intensity_conversion(mol_data)
mol_data.apply([intl.value] * intl.unit, name='intl')
au = einstein_coeff(mol_data)
result.append(au.value)
mol = cat[cat['TAG'] == mol_tag]
mol_name = mol['NAME'].data[0]
lam_search = Lamda.query(mol=mol_name.lower())
lam_result = lam_search[1]
tran = transition_freq.to('GHz').value
lam_found = lam_result[lam_result['Frequency'] == tran]
au_cat = lam_found['EinsteinA']
au_cat = au_cat.data[0]
catalog_result.append(au_cat)
err = (abs((np.array(catalog_result) - np.array(result)) /
np.array(catalog_result) * 100))
assert np.all(err < 23.5)
@remote_data
def test_Haser_prodrate():
co = Table.read(data_path('CO.csv'), format="ascii.csv")
lte = LTE()
Q_estimate = 2.8*10**(28) / u.s
transition_freq = (230.53799 * u.GHz).to('MHz')
aper = 10 * u.m
mol_tag = 28001
temp_estimate = 25. * u.K
vgas = 0.5 * u.km / u.s
target = 'C/2016 R2'
b = 0.74
mol_data = Phys.from_jplspec(temp_estimate, transition_freq, mol_tag)
intl = intensity_conversion(mol_data)
mol_data.apply([intl.value] * intl.unit, name='intl')
au = einstein_coeff(mol_data)
mol_data.apply([au.value] * au.unit, name='eincoeff')
mol_data.apply([1.] * u.AU * u.AU * u.s, name='beta')
mol_data.apply([1.] / (u.m * u.m), name='cdensity')
mol_data.apply([1.], name='total_number')
q_found = []
parent = photo_timescale('CO') * vgas
coma = Haser(Q_estimate, vgas, parent)
for i in range(0, 5):
time = Time(co['Time'][i], format='iso')
integrated_flux = co['T_B'][i] * u.K * u.km / u.s
ephemobj = Ephem.from_horizons(target, epochs=time)
beta = beta_factor(mol_data, ephemobj)
mol_data['beta'] = beta
cdensity = lte.cdensity_Bockelee(integrated_flux, mol_data)
mol_data['cdensity'] = cdensity
tnum = total_number(mol_data, aper, b)
mol_data['total_number'] = tnum
Q = from_Haser(coma, mol_data, aper=aper)
q_found.append(np.log10(Q.value)[0])
q_pred = list(co['log(Q)'])
err = abs((np.array(q_pred) - np.array(q_found)) / np.array(q_pred) * 100)
assert np.all(err < 2.5)
'''
Last test run: 08/01/2019 11:15:00 , sbpy version: v0.2dev259, python 3.6.8
Author: <NAME>
Tester: <NAME>
Tested: locally, needs pyradex to be installed
Status: Passed
See https://github.com/keflavich/pyradex for installment
'''
@remote_data
def test_Haser_pyradex():
try:
import pyradex
except ImportError:
return None
co = Table.read(data_path('CO.csv'), format="ascii.csv")
nonlte = NonLTE()
lte = LTE()
Q_estimate = 2.8*10**(28) / u.s
transition_freq = (230.53799 * u.GHz).to('MHz')
aper = 10 * u.m
mol_tag = 28001
temp_estimate = 25. * u.K
vgas = 0.5 * u.km / u.s
target = 'C/2016 R2'
b = 0.74
mol_data = Phys.from_jplspec(temp_estimate, transition_freq, mol_tag)
intl = intensity_conversion(mol_data)
mol_data.apply([intl.value] * intl.unit, name='intl')
au = einstein_coeff(mol_data)
mol_data.apply([au.value] * au.unit, name='eincoeff')
mol_data.apply([1.] * u.AU**2 * u.s, name='beta')
mol_data.apply([1.] / u.m**2, name='cdensity')
mol_data.apply([1.], name='total_number')
q_found = []
parent = photo_timescale('CO') * vgas
coma = Haser(Q_estimate, vgas, parent)
for i in range(0, 5):
time = Time(co['Time'][i], format='iso')
integrated_flux = co['T_B'][i] * u.K * u.km / u.s
ephemobj = Ephem.from_horizons(target, epochs=time)
beta = beta_factor(mol_data, ephemobj)
mol_data['beta'] = beta
cdensity_bockelee = lte.cdensity_Bockelee(integrated_flux, mol_data)
mol_data['cdensity'] = cdensity_bockelee
cdensity = nonlte.from_pyradex(integrated_flux, mol_data)
mol_data['cdensity'] = cdensity
tnum = total_number(mol_data, aper, b)
mol_data['total_number'] = tnum
Q = from_Haser(coma, mol_data, aper=aper)
q_found.append(np.log10(Q.value)[0])
q_pred = list(co['log(Q)'])
err = abs((np.array(q_pred) - np.array(q_found)) / np.array(q_pred) * 100)
assert np.all(err < 0.35)
@remote_data
def test_intensity_conversion():
# test untested case for intensity conversion function
temp_estimate = 47. * u.K
vgas = 0.8 * u.km / u.s
mol_tag = 27001
transition_freq = (265.886434 * u.GHz).to('MHz')
mol_data = Phys.from_jplspec(temp_estimate, transition_freq, mol_tag)
mol_data['eup_J'] = 3.52359898e-20 * u.J
mol_data['elo_J'] = 1.76181853e-20 * u.J
intl = intensity_conversion(mol_data)
assert np.isclose(intl.value, 6.186509000388917e-11)
@remote_data
def test_einsteincoeff_case():
# test untested case for einstein coefficient
temp_estimate = 47. * u.K
vgas = 0.8 * u.km / u.s
mol_tag = 27001
transition_freq = (265.886434 * u.GHz).to('MHz')
mol_data = Phys.from_jplspec(temp_estimate, transition_freq, mol_tag)
mol_data['t_freq'] = 2658864.34 * u.MHz
intl = intensity_conversion(mol_data)
mol_data.apply([intl.value] * intl.unit, name='intl')
au = einstein_coeff(mol_data)
assert np.isclose(round(au.value, 4), 0.0086)
@remote_data
def test_betafactor_case():
# test untested case for beta beta_factor
r = 1.06077567 * u.AU
delta = 0.14633757 * u.AU
quanteph = [r, delta]
nameseph = ['r', 'delta']
ephemobj = Ephem.from_dict(dict(zip(nameseph, quanteph)))
mol_name = 'CN'
namephys = ['mol_tag']
quantphys = [mol_name]
mol_data = Phys.from_dict(dict(zip(namephys, quantphys)))
beta = beta_factor(mol_data, ephemobj)
assert np.isclose(beta.value[0], 354452.18195014383)
'''
Last test run: 08/02/2019 09:32:00 , sbpy version: v0.2dev259, python 3.6.8
Author: <NAME>
Tester: <NAME>
Tested: locally, needs pyradex to be installed
Status: Passed
See https://github.com/keflavich/pyradex for installment
'''
@remote_data
def test_pyradex_case():
# test untested case for Pyradex
try:
import pyradex
except ImportError:
return None
transition_freq = (177.196 * u.GHz).to(u.MHz)
mol_tag = 29002
cdensity_guess = (1.89*10.**(14) / (u.cm * u.cm))
temp_estimate = 20. * u.K
temp_back = 2.8 * u.K
mol_data = Phys.from_jplspec(temp_estimate, transition_freq, mol_tag)
mol_data.apply([cdensity_guess.value] *
cdensity_guess.unit, name='cdensity')
mol_data.apply([temp_back.value] * temp_back.unit, name='temp_back')
mol_data.apply(['HCO+@xpol'], name='lamda_name')
nonLTE = NonLTE()
cdensity = nonLTE.from_pyradex(1.234 * u.K * u.km / u.s, mol_data,
iter=100, collider_density={'H2': 900})
assert np.isclose(cdensity.value[0], 1.134e14)
@remote_data
def test_Haser_prodrate_pyradex(mock_nonlte):
co = Table.read(data_path('CO.csv'), format="ascii.csv")
nonlte = NonLTE()
lte = LTE()
Q_estimate = 2.8*10**(28) / u.s
transition_freq = (230.53799 * u.GHz).to('MHz')
aper = 10 * u.m
mol_tag = 28001
temp_estimate = 25. * u.K
vgas = 0.5 * u.km / u.s
target = 'C/2016 R2'
b = 0.74
mol_data = Phys.from_jplspec(temp_estimate, transition_freq, mol_tag)
intl = intensity_conversion(mol_data)
mol_data.apply([intl.value] * intl.unit, name='intl')
au = einstein_coeff(mol_data)
mol_data.apply([au.value] * au.unit, name='eincoeff')
mol_data.apply([1.] * u.AU * u.AU * u.s, name='beta')
mol_data.apply([1.] / (u.m * u.m), name='cdensity')
mol_data.apply([1.], name='total_number')
q_found = []
parent = photo_timescale('CO') * vgas
coma = Haser(Q_estimate, vgas, parent)
for i in range(0, 5):
time = Time(co['Time'][i], format='iso')
integrated_flux = co['T_B'][i] * u.K * u.km / u.s
ephemobj = Ephem.from_horizons(target, epochs=time)
beta = beta_factor(mol_data, ephemobj)
mol_data['beta'] = beta
cdensity_bockelee = lte.cdensity_Bockelee(integrated_flux, mol_data)
mol_data['cdensity'] = cdensity_bockelee
cdensity = nonlte.from_pyradex(integrated_flux, mol_data)
mol_data['cdensity'] = cdensity
tnum = total_number(mol_data, aper, b)
mol_data['total_number'] = tnum
Q = from_Haser(coma, mol_data, aper=aper)
q_found.append(np.log10(Q.value)[0])
q_pred = list(co['log(Q)'])
err = abs((np.array(q_pred) - np.array(q_found)) / np.array(q_pred) * 100)
assert np.all(err < 0.35)
@remote_data
def test_pyradex_cdensity(mock_nonlte):
transition_freq = (177.196 * u.GHz).to(u.MHz)
mol_tag = 29002
cdensity_guess = (1.89*10.**(14) / (u.cm * u.cm))
temp_estimate = 20. * u.K
temp_back = 2.8 * u.K
mol_data = Phys.from_jplspec(temp_estimate, transition_freq, mol_tag)
mol_data.apply([cdensity_guess.value] *
cdensity_guess.unit, name='cdensity')
mol_data.apply([temp_back.value] * temp_back.unit, name='temp_back')
mol_data.apply(['HCO+@xpol'], name='lamda_name')
nonLTE = NonLTE()
cdensity = nonLTE.from_pyradex(1.234 * u.K * u.km / u.s, mol_data,
iter=100, collider_density={'H2': 900})
assert np.isclose(cdensity.value[0], 1.134e14)
assert cdensity.unit == u.cm**-2
```
#### File: sbpy/photometry/bandpass.py
```python
__all__ = [
'bandpass'
]
import os
from astropy.utils.data import get_pkg_data_filename
def bandpass(name):
"""Retrieve bandpass transmission spectrum from sbpy.
Parameters
----------
name : string
Name of the bandpass, case insensitive. See notes for
available filters.
Returns
-------
bp : `~synphot.SpectralElement`
Notes
-----
Available filters:
+-------------+---------------------------+
| Name | Source |
+=============+===========================+
| 2MASS J | Cohen et al. 2003 |
+-------------+---------------------------+
| 2MASS H | Cohen et al. 2003 |
+-------------+---------------------------+
| 2MASS Ks | Cohen et al. 2003 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| PS1 g | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 r | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 i | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 w | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 y | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 z | Tonry et al. 2012 |
+-------------+---------------------------+
| SDSS u | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS g | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS r | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS i | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS z | SDSS, dated 2001 |
+-------------+---------------------------+
| WFC3 F438W | HST/WFC3 UVIS, v4 |
+-------------+---------------------------+
| WFC3 F606W | HST/WFC3 UVIS, v4 |
+-------------+---------------------------+
| WISE W1 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W2 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W3 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W4 | Jarrett et al. 2011 |
+-------------+---------------------------+
References
----------
.. [CDBS] Space Telescope Science Institute. HST Calibration Reference
Data System. https://hst-crds.stsci.edu/ .
.. [COH03] <NAME>. et al. 2003. Spectral Irradiance Calibration
in the Infrared. XIV. The Absolute Calibration of 2MASS. AJ
126, 1090.
.. [JAR11] <NAME>. et al. 2011. The Spitzer-WISE Survey of
the Ecliptic Poles. ApJ 735, 112.
.. [SDSS] Sloan Digital Sky Survey. Camera.
www.sdss.org/instruments/camera .
.. [TON12] <NAME>. et al. 2012. The Pan-STARRS1 Photometric
System. ApJ 750, 99.
"""
try:
import synphot
except ImportError:
raise ImportError('synphot is required.')
name2file = {
'2mass j': '2mass-j-rsr.txt',
'2mass h': '2mass-h-rsr.txt',
'2mass ks': '2mass-ks-rsr.txt',
'cousins r': 'cousins_r_004_syn.fits',
'cousins i': 'cousins_i_004_syn.fits',
'johnson u': 'johnson_u_004_syn.fits',
'johnson b': 'johnson_b_004_syn.fits',
'johnson v': 'johnson_v_004_syn.fits',
'ps1 g': 'ps1-gp1.txt',
'ps1 r': 'ps1-rp1.txt',
'ps1 i': 'ps1-ip1.txt',
'ps1 w': 'ps1-wp1.txt',
'ps1 y': 'ps1-yp1.txt',
'ps1 z': 'ps1-zp1.txt',
'sdss u': 'sdss-u.fits',
'sdss g': 'sdss-g.fits',
'sdss r': 'sdss-r.fits',
'sdss i': 'sdss-i.fits',
'sdss z': 'sdss-z.fits',
'wfc3 f438w': 'wfc3_uvis_f438w_004_syn.fits',
'wfc3 f606w': 'wfc3_uvis_f606w_004_syn.fits',
'wise w1': 'WISE-RSR-W1.EE.txt',
'wise w2': 'WISE-RSR-W2.EE.txt',
'wise w3': 'WISE-RSR-W3.EE.txt',
'wise w4': 'WISE-RSR-W4.EE.txt',
}
fn = get_pkg_data_filename(os.path.join(
'..', 'photometry', 'data', name2file[name.lower()]))
bp = synphot.SpectralElement.from_file(fn)
return bp
``` |
{
"source": "jianyex/stocks",
"score": 3
} |
#### File: stocks/stocks/daily_update.py
```python
import time
import datetime
import random
from stocks.stock_data_fetcher import InvestingComSp500TodayFetcher, YahooFinanceStockDataFetcher
from stocks.sqlite_connector import SqliteConnector
from stocks.constants import db_file_path, SYMBOLS
def fetch_daily_sp500_quotes_from_investing_com():
"""
Download and parse the stock quotes data for today from investing.com
:return: historical_data, as json string
"""
investing_com_data_fetcher = InvestingComSp500TodayFetcher()
historical_data = investing_com_data_fetcher.get_historical_data()
return historical_data
def save_quotes_from_investing_com_to_db(raw_json_data, conn):
"""
save the today's sp500 raw data to database
:param raw_json_data:
:param conn:
:return:
"""
raw_json_data_str = str(raw_json_data)
date = raw_json_data["3M"]['pid-277-time']
records = [{"raw_json_data": raw_json_data_str, "date": date}]
conn.insert_records(table="investing_com_sp500_today", records=records)
def fetch_data_from_yahoo_finance(start="2017-01-01"):
"""
fetch data from yahoo finance from start date to end date, and save it to datebase
:param start:
:return:
"""
conn = SqliteConnector(db_file_path)
yahoo_finance = YahooFinanceStockDataFetcher()
end = str(datetime.datetime.today().strftime('%Y-%m-%d'))
for symbol in SYMBOLS:
current_data = conn.pull_data_as_df(
"select * from raw_stock_data where symbol='{}' and date > '2019-01-01';".format(symbol))
if len(current_data) > 140:
print("Data for stock {} already exists...".format(symbol))
else:
print("Start fetching data for stock {}...".format(symbol))
df = yahoo_finance.get_historical_data(symbol, start, end)
print(df.head())
print(df.tail())
conn.dump_df_to_sql(df, "raw_stock_data")
time.sleep(200 + random.randint(-50, 50))
print("End fetching data for stock {}...".format(symbol))
def main():
conn = SqliteConnector(db_file_path)
historical_data = fetch_daily_sp500_quotes_from_investing_com()
save_quotes_from_investing_com_to_db(historical_data, conn)
if __name__ == "__main__":
main()
```
#### File: stocks/stocks/feature_builder.py
```python
import pandas as pd
import os
import talib
from stocks.sqlite_connector import SqliteConnector
class FeatureBuilder:
def __init__(self, data):
self._data = data
def add_features(self):
pass
def add_label(self, label_def_func):
pass
@property
def data(self):
return self._get_data()
def _get_data(self):
return self._data
if __name__ == "__main__":
current_dir = os.getcwd()
db_file_path = os.path.join(current_dir, "../data/stocks.db")
conn = SqliteConnector(db_file_path)
query = "SELECT * FROM raw_stock_data WHERE symbol in ('AAPL', 'GILD') ORDER BY symbol, date;"
data = conn.pull_data_as_df(query)
feature_builder = FeatureBuilder(data=data)
print(feature_builder.data)
```
#### File: stocks/tests/test_daily_update.py
```python
import pytest
import json
from stocks.constants import db_file_path
from stocks.sqlite_connector import SqliteConnector
from stocks.daily_update import save_quotes_from_investing_com_to_db
@pytest.fixture
def raw_json_data():
with open(".\\investing_com_raw_data.json", "rb") as f:
raw_json_data = json.load(f)
return raw_json_data
@pytest.fixture
def conn():
conn = SqliteConnector(db_file_path)
return conn
def test_save_quotes_from_investing_com_to_db(raw_json_data, conn):
save_quotes_from_investing_com_to_db(raw_json_data, conn)
``` |
{
"source": "JianyiCheng/DSS",
"score": 3
} |
#### File: DSS/src/dot_find_cycles.py
```python
import sys
from os import path, access, R_OK
import argparse
import networkx as nx
from networkx.drawing.nx_pydot import read_dot
def main():
parser = argparse.ArgumentParser(description="Finds cycles in dot file graphs, such as those from Puppet. "
"By <NAME> <http://blog.jasonantman.com>")
parser.add_argument('dotfile', metavar='DOTFILE', nargs='?', type=argparse.FileType('r'), default=sys.stdin,
help="the dotfile to process. Uses standard input if argument is '-' or not present")
parser.add_argument("--only-shortest", action='store_true',
help="only show the shortest cycles. Example: if both A->C and A->B->C exist, only show the former. "
"This vastly reduces the amount of output when analysing dependency issues.")
parser.add_argument("--print-labels", action='store_true',
help="print the node labels instead of their ids.")
args = parser.parse_args()
# read in the specified file, create a networkx DiGraph
G = nx.DiGraph(read_dot(args.dotfile))
C = nx.simple_cycles(G)
if args.only_shortest:
C = remove_super_cycles(C)
if args.print_labels:
C = extract_node_labels(C, G)
for i in C:
# append the first node again so that the cycle is complete
i.append(i[0])
print(" -> ".join(i))
def remove_super_cycles(cycle_list):
# sorting by length makes the search easier, because shorter cycles cannot be supercycles of longer ones
cycle_list = sorted(cycle_list, key=len)
forward_index = 0
while forward_index < len(cycle_list):
backward_index = len(cycle_list) - 1
while backward_index > forward_index:
# when comparing two cycles, remove all elements that are not in the shortest one
filtered_list = [x for x in cycle_list[backward_index] if x in cycle_list[forward_index]]
# double the cycle length, to account for cycles shifted over the end of the list
full_cycle = filtered_list + filtered_list
# find the matching start position
while full_cycle and full_cycle[0] != cycle_list[forward_index][0]:
del full_cycle[0]
# matching start position found, now compare the rest
if cycle_list[forward_index] == full_cycle[:len(cycle_list[forward_index])]:
# cycle matches, remove supercycle from end result
del cycle_list[backward_index]
backward_index = backward_index - 1
forward_index = forward_index + 1
return cycle_list
def extract_node_labels(C, G):
C_labels = []
for cycle in C:
cycle_labels = []
for node_id in cycle:
cycle_labels.append(G.nodes[node_id]['label'].replace('"',''))
C_labels.append(cycle_labels)
return C_labels
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass # eat CTRL+C so it won't show an exception
```
#### File: DSS/src/frontEndAnalysis.py
```python
from __future__ import print_function
import os, fnmatch, datetime, sys, re
import helper as helper
top = sys.argv[1]
mode = sys.argv[2] if len(sys.argv) > 2 else "release"
hardware = sys.argv[3] if len(sys.argv) > 3 else "full"
def funcSearch(parentI): # search for head files and called function
global fI
index = fI
funcName = fT.name[index]
cppTemp = helper.fileOpen(srcDir+"/"+funcName+".cpp")
buff = ""
for line in cppTemp:
buff=buff+line
cppTemp.close()
buff = helper.removeComment(buff)
buff = buff.replace(" ", "")
buff = buff.replace("\t", "")
buff = buff.replace("\n", "")
if "#pragmaSSII=" in buff:
fT.sch[index] = 1
iiStr = helper.strFindNumber(buff, "#pragmaSSII=")
if iiStr == "":
helper.error("Error: II value is not specified properly in function "+funcName)
assert 0
else:
fT.ii[index] = int(iiStr)
elif "#pragmaSS" in buff: # by default II = 1
fT.sch[index] = 1
fT.ii[index] = 1
elif fT.ii[index] == 1:
helper.error("Error: DS function ("+funcName+") is not allowed in a SS function("+fT.name[parentI]+")!")
astTemp = helper.fileOpen(srcDir+"/"+funcName+"Ast.rpt")
funcCheck = 0
for line in astTemp:
if ".h" in line and line[line.find("<")+1:line.find(".h")+2] not in hL:
hL.append(line[line.find("<")+1:line.find(".h")+2])
if ".cpp" in line and line[line.find("<")+1:line.find(".cpp")+4] not in cppL:
cppL.append(line[line.find("<")+1:line.find(".cpp")+4])
if " Function " in line and line[1+line.find("'", line.find(" Function ")):line.find("'", line.find("'", line.find(" Function "))+1)] not in fT.name: # found a new called function
fI = fI + 1
fT.name.append(line[1+line.find("'", line.find(" Function ")):line.find("'", line.find("'", line.find(" Function "))+1)])
fT.sch.append(fT.sch[index]) # following the parent function by default
fT.ii.append(-1)
funcSearch(index)
if "`-FunctionDecl" in line:
funcCheck = funcCheck + 1
astTemp.close()
if funcCheck == 0:
helper.error("Error: Cannot find declaration of function "+funcName+" in "+srcDir+"/"+funcName+".cpp")
assert 0
elif funcCheck > 1:
helper.error("Error: More than one function declaration found in one cpp file: "+funcName+".cpp\nPlease declare each function in its own cpp file.")
assert 0
elif funcCheck != 1:
helper.error("Error: Unkown bug found during searching function declaration.\n")
assert 0
def dynamicCodeGen(index):
dynaSh.write("# function: "+fT.name[index]+"\ncp "+dss+"/examples/"+buildDir+"/"+fT.name[index]+".cpp ./\nmake name="+fT.name[index]+" graph\nrm -r "+dss+"/examples/"+buildDir+"/ds_"+fT.name[index]+"\nmv _build/"+fT.name[index]+" "+dss+"/examples/"+buildDir+"/ds_"+fT.name[index]+"\nif [ ! -f "+dss+"/examples/"+buildDir+"/ds_"+fT.name[index]+"/"+fT.name[index]+"_graph.dot ]; then\n\tmv "+dss+"/examples/"+buildDir+"/ds_"+fT.name[index]+"/*_graph.dot "+dss+"/examples/"+buildDir+"/ds_"+fT.name[index]+"/"+fT.name[index]+"_graph.dot\nfi\ncp "+dss+"/examples/"+top+"/_build/ds/"+fT.name[index]+"_bbgraph.dot "+dss+"/examples/"+buildDir+"/ds_"+fT.name[index]+"/"+fT.name[index]+"_bbgraph.dot")
astTemp=[]
ftemp=helper.fileOpen(srcDir+"/"+fT.name[index]+"Ast.rpt")
for line in ftemp:
astTemp.append(line)
ftemp.close()
astTemp.append(" ")
astTemp.append(" ")
astTemp.append(" ") # in case it jump to the index out of the range
cppTemp=[]
ftemp=helper.fileOpen(srcDir+"/"+fT.name[index]+".cpp")
for line in ftemp:
cppTemp.append(line)
ftemp.close()
# find called function and remove memory interface
funcProtpy = []
i = 0
buffList = []
while i < len(astTemp):
if " Function " in astTemp[i]:
funcName = astTemp[i][1+astTemp[i].find("'", astTemp[i].find(" Function ")):astTemp[i].find("'", astTemp[i].find("'", astTemp[i].find(" Function "))+1)]
j = i+2
varCount = 0
while "`-DeclRefExpr" in astTemp[j]:
j = j+2
varCount = varCount+1
j = i+2
varI = []
while "`-DeclRefExpr" in astTemp[j]:
if " *'" in astTemp[j]:
varI.append(0)
colNum = helper.astGetCol(astTemp, j)
lineNum = helper.astGetLine(astTemp, j)
varName = astTemp[j][1+astTemp[j].find("'", astTemp[j].find("Var ")):astTemp[j].find("'", astTemp[j].find("'", astTemp[j].find("Var "))+1)]
if colNum == "":
helper.error("Error: cannot find column in AST dump at line: "+str(j) + " in "+buildDir+"/"+fT.name[index]+"Ast.rpt")
assert 0
elif lineNum == "":
helper.error("Error: cannot find line in AST dump at line: "+str(j) + " in "+buildDir+"/"+fT.name[index]+"Ast.rpt")
assert 0
else:
colNum = int(colNum)
lineNum = int(lineNum)
spaceVar = ''.join([" " for iI in range(0, len(varName))])
cppTemp[lineNum-1] = cppTemp[lineNum-1][0:colNum-1]+ spaceVar + cppTemp[lineNum-1][colNum-1+len(varName):]
if len(varI) < varCount :
while cppTemp[lineNum-1].find(",",colNum) == -1:
lineNum = lineNum + 1
colNum = 0
cppTemp[lineNum-1] = cppTemp[lineNum-1][:cppTemp[lineNum-1].find(",",colNum)]+" "+cppTemp[lineNum-1][cppTemp[lineNum-1].find(",",colNum)+1:]
elif sum(varI) != 0:
while cppTemp[lineNum-1].rfind(",",0,colNum) == -1:
lineNum = lineNum -1
colNum = len(cppTemp[lineNum-1])-1
cppTemp[lineNum-1] = cppTemp[lineNum-1][:cppTemp[lineNum-1].rfind(",",0,colNum)]+" "+cppTemp[lineNum-1][cppTemp[lineNum-1].rfind(",",0,colNum)+1:]
else:
varI.append(1)
j = j+2
j = 0
while "FunctionDecl " not in astTemp[j] or funcName+" '" not in astTemp[j]:
j = j + 1
strTemp = ""
strTemp = astTemp[j][astTemp[j].find(funcName+" '")+len(funcName)+2:len(astTemp[j])-2].replace(" ", " "+funcName, 1)
iI = 0
k = ""
j = j+1
buff = ""
while strTemp[iI] != ')':
if strTemp[iI] == '(':
k = k+buff+"("
buff = ""
elif strTemp[iI] == ',':
if "ParmVarDecl " not in astTemp[j]:
helper.error("Error: function prototype does not match with the called function: "+funcName)
assert 0
if "*" not in buff:
if k[len(k)-1] == '(':
k = k+buff+" "+astTemp[j][astTemp[j].rfind(" ",0,astTemp[j].find("'")-1)+1:astTemp[j].find("'")-1]
else:
k = k+", "+buff+" "+astTemp[j][astTemp[j].rfind(" ",0,astTemp[j].find("'")-1)+1:astTemp[j].find("'")-1]
buff = ""
j = j+1
else:
buff = buff + strTemp[iI]
iI = iI + 1
if "*" not in buff:
if k[len(k)-1] == '(':
k = k+buff+" "+astTemp[j][astTemp[j].rfind(" ",0,astTemp[j].find("'")-1)+1:astTemp[j].find("'")-1]+");\n"
else:
k = k+", "+buff+" "+astTemp[j][astTemp[j].rfind(" ",0,astTemp[j].find("'")-1)+1:astTemp[j].find("'")-1]+");\n"
else:
k = k+");\n"
if k not in buffList:
buffList.append(k)
funcProtpy.append(k)
i = i+1
ftemp=open(buildDir+"/"+fT.name[index]+".cpp", "w")
for line in funcProtpy:
ftemp.write(line)
for line in cppTemp:
if "#include \"" not in line:
ftemp.write(line)
ftemp.close()
def staticCodeGen(index):
iiCheck = 0
swapFile = -1
for cpp in cppL:
if fT.name[index]+".cpp" in cpp:
swapFile = index
ftemp = helper.fileOpen(cpp)
preCode = ""
for line in ftemp:
preCode = preCode + line
ftemp.close()
preCode = helper.removeComment(preCode)
ftemp = open(srcDir+"/"+fT.name[index]+"_.cpp","w")
ftemp.write(preCode)
ftemp.close()
ftemp = helper.fileOpen(srcDir+"/"+fT.name[index]+"_.cpp")
preCode = []
for line in ftemp:
if "#include \"" in line:
preCode.append(line.replace("#include \"","#include \""+os.path.relpath(cpp[:cpp.rfind("/")],os.getcwd()+"/"+buildDir)+"/"))
elif "#" in line and "pragma" in line and "SS" in line and "II" in line and "=" in line and str(fT.ii[index]) in line:
preCode.append("#pragma HLS PIPELINE II="+str(fT.ii[index])+"\n")
iiCheck = 1
elif fT.name[index]+"(" in line: # assume "{" is in the same line as the function prototype
preCode.append(line)
line.replace("\t", " ")
# idx_1 = line.find("(")
# #if "void" not in line[0:idx_1]:
# # preCode.append("#pragma HLS interface ap_hs port=return\n") # use ap_done as valid, ap_ce as nready
# idx_2 = line.find(",")
# args = []
# while idx_2 != -1:
# i = 0
# for k in line[idx_1+1:idx_2].split(" "):
# if k != "":
# i = i + 1
# if i == 2:
# args.append(k)
# assert(i == 2)
# idx_1 = idx_2
# idx_2 = line.find(",")
# if line[idx_1+1:line.find(")")] != "void":
# i = 0
# for k in line[idx_1+1:line.find(")")].split(" "):
# if k != "":
# i = i + 1
# if i == 2:
# args.append(k)
#
# for arg in args:
# preCode.append("#pragma HLS interface ap_hs port="+arg+"\n")
else:
preCode.append(line)
ftemp.close()
if iiCheck == 0:
helper.error("Error: incorrect pragma presentation in "+line)
ftemp = open(buildDir+"/"+fT.name[index]+".cpp", "w")
for line in preCode:
ftemp.write(line)
ftemp.close()
statTcl.write("open_project -reset ss_"+fT.name[index]+"\nset_top "+fT.name[index]+"\nadd_files {")
i = 0
while i < len(cppL):
if i == swapFile:
statTcl.write(os.path.relpath(fT.name[index]+".cpp "))
else:
statTcl.write(os.path.relpath(cppL[i],os.getcwd()+"/"+buildDir)+" ")
i = i+1
i = 0
while i < len(hL):
statTcl.write(os.path.relpath(hL[i],os.getcwd()+"/"+buildDir)+" ")
i = i+1
statTcl.write("}\nopen_solution -reset \"solution1\"\nset_part {xc7z020clg484-1}\ncreate_clock -period 10 -name default\n")
if hardware == "full":
statTcl.write("config_bind -effort high\n")
statTcl.write("config_interface -clock_enable\ncsynth_design\n")
if hardware == "full":
statTcl.write("export_design -flow syn -rtl vhdl -format ip_catalog\n\n")
fT = helper.funcTree()
fT.name.append(top)
fT.sch.append(0) # DS implementation
fT.ii.append(-1)
hL = []
cppL = []
fI = 0
buildDir=top+"/_build/dss"
srcDir=top+"/_build/dss/src"
vhdlDir=top+"/vhdl/dss"
buff = []
ftemp=helper.fileOpen("../env.tcl")
for line in ftemp:
if "#" in line:
buff.append(line[0:line.find("#")])
else:
buff.append(line)
ftemp.close()
for line in buff:
if "DSS=" in line:
dss = helper.findPath(line, buff).replace("\n","")
if "CLANG=" in line:
clang = helper.findPath(line, buff).replace("\n","")
if "VHLS=" in line:
vhls = helper.findPath(line, buff).replace("\n","")
if "OPT=" in line:
opt = helper.findPath(line, buff).replace("\n","")
if "DHLS=" in line:
dhls = helper.findPath(line, buff).replace("\n","")
print("Top function '"+top+"'.("+mode+") Start preprocessing...")
print("--------------- DSS Synthesis -----------------")
funcSearch(-1)
# final check
if len(fT.name) != len(fT.sch) or len(fT.name) != len(fT.ii):
print("Error found: function tree construction failed")
assert 0
ftemp=open(buildDir+"/config.tcl", "w")
ftemp.write(top+","+str(len(fT.name))+"\n")
i = 0
while (i<len(fT.name)):
ftemp.write(fT.name[i]+","+str(fT.sch[i])+","+str(fT.ii[i])+"\n")
print(str(i)+". Function '"+fT.name[i]+"' is scheduled in ", end="")
print("SS with II = "+str(fT.ii[i]) if fT.sch[i] else "DS")
i = i+1
ftemp.close()
ftemp=open(buildDir+"/dir.tcl", "w")
i = 0
while (i<len(hL)):
ftemp.write(hL[i]+"\n")
i = i+1
i = 0
while (i<len(cppL)):
ftemp.write(cppL[i]+"\n")
i = i+1
ftemp.close()
dynaSh=open(buildDir+"/dynamic.sh", "w")
dynaSh.write("#!/bin/sh\nDHLS_EX="+dhls+"/elastic-circuits/examples/\n\ncd $DHLS_EX\n".replace("//", "/"))
statTcl=open(buildDir+"/static.tcl", "w")
i = 0
while (i<len(fT.sch)):
if fT.sch[i] == 0:
dynamicCodeGen(i)
else:
staticCodeGen(i)
i = i+1
dynaSh.close()
statTcl.close()
print("Front-end analysis finished successfully.")
# statSh.write("cp -r "+buildDir+"/ss_"+fT.name[index]+"/solution1/impl/ip "+vhdlDir+"ss_"+fT.name[index]+"\n")
``` |
{
"source": "JianyiCheng/MachSuite",
"score": 3
} |
#### File: JianyiCheng/MachSuite/ResultCheck.py
```python
from optparse import OptionParser
import sys, os, time, datetime, glob
from tabulate import tabulate
from typing import List
benchmarks = ['aes/aes', 'backprop/backprop', 'bfs/bulk', 'bfs/queue', 'fft/strided', \
'fft/transpose', 'gemm/blocked', 'gemm/ncubed', 'kmp/kmp', 'nw/nw', \
'md/knn', 'md/grid', 'sort/merge', 'sort/radix', 'spmv/ellpack', \
'spmv/crs', 'stencil/stencil2d', 'stencil/stencil3d', 'viterbi/viterbi']
def check():
total = len(benchmarks)
for benchmark in benchmarks:
print(benchmark, end="\t\t")
bench_name = benchmark[0:benchmark.find("/")]
proj_name = benchmark + '/' + bench_name + '_syn'
cosim_log_name = proj_name + '/solution/sim/report/*.rpt'
cosim_log = glob.glob(cosim_log_name)
if os.path.isdir(proj_name) and len(cosim_log) == 1:
with open(cosim_log[0]) as check_log:
if 'Pass' in check_log.read():
print('success')
continue
print("fail")
def main():
optparser = OptionParser()
optparser.add_option("-c", "--check", action="store_true", dest="check_status", default=True,
help="Check Vitis project status")
(options, args) = optparser.parse_args()
if options.check_status:
check()
if __name__ == '__main__':
main()
``` |
{
"source": "JianYiHuang-int/demo",
"score": 2
} |
#### File: JianYiHuang-int/demo/api.py
```python
import requests
import json
"""
【事件数值】 【事件描述】
100 私聊消息
200 群聊消息
300 暂无
400 群成员增加
410 群成员减少
500 收到好友请求
600 二维码收款
700 收到转账
800 软件开始启动
900 新的账号登录完成
910 账号下线
"""
""""
* @param int type => 事件类型(事件列表可参考 - 事件列表demo)
* @param string from_wxid => 1级来源id(比如发消息的人的id)
* @param string from_name => 1级来源昵称(比如发消息的人昵称)
* @param string final_from_wxid => 2级来源id(群消息事件下,1级来源为群id,2级来源为发消息的成员id,私聊事件下都一样)
* @param string final_nickname => 2级来源昵称
* @param string robot_wxid => 当前登录的账号(机器人)标识id
* @param string msg => 消息内容
* @param string parameters => 附加参数(暂未用到,请忽略)
* @param int time => 请求时间(时间戳10位版本)
【功能名】 【demo中函数名】
发送文本消息 send_text_msg()
发送群消息并艾特某人 send_group_at_msg()
发送图片消息 send_iamge_msg()
发送视频消息 send_video_msg()
发送文件消息 send_file_msg()
发送动态表情 send_emoji_msg()
发送分享链接 send_link_msg()
发送音乐消息 send_music_msg()
取指定登录账号的昵称 get_robot_name()
取指定登录账号的头像 get_robot_headimgurl()
取登录账号列表 get_logged_account_list()
取好友列表 get_friend_list()
取群聊列表 get_group_list()
取群成员资料 get_group_member()
取群成员列表 get_group_member_list()
接收好友转账 accept_transfer()
同意群聊邀请 agree_group_invite()
同意好友请求 agree_friend_verify()
修改好友备注 modify_friend_note()
删除好友 delete_friend()
踢出群成员 remove_group_member()
修改群名称 modify_group_name()
修改群公告 modify_group_notice()
建立新群 building_group()
退出群聊 quit_group()
邀请加入群聊 invite_in_group()
"""
# 主动调用发送接口
API_URL = "http://127.0.0.1:8073/send"
# API_URL = "http://172.16.58.3:8073/send"
# TODO data['msg'] = requests.utils.quote(msg) # 发送内容 发送的信息如果出现乱码,请按照此方式修改
def send_text_msg(robwxid, to_wxid, msg):
"""
发送文字消息(好友或者群)
* @access public
* @param string robwxid 登录账号id,用哪个账号去发送这条消息
* @param string to_wxid 对方的id,可以是群或者好友id
* @param string msg 消息内容
* @return string json_string
"""
data = dict()
data['type'] = 100 # Api数值(可以参考 - api列表demo)
data['msg'] = requests.utils.quote(msg) # 发送内容
data['to_wxid'] = to_wxid # 对方id
data['robot_wxid'] = robwxid # 账户id,用哪个账号去发送这条消息
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def send_group_at_msg(robwxid, to_wxid, at_wxid, at_name, msg):
"""
* 发送群消息并艾特某人
*
* @access public
* @param string robwxid 账户id,用哪个账号去发送这条消息
* @param string to_wxid 群id
* @param string at_wxid 艾特的id,群成员的id
* @param string at_name 艾特的昵称,群成员的昵称
* @param string msg 消息内容
* @return string json_string
"""
data = dict()
data['type'] = 102 # Api数值(可以参考 - api列表demo)
data['msg'] = requests.utils.quote(msg) # 发送的文件的绝对路径
data['to_wxid'] = to_wxid # 群id
data['at_wxid'] = at_wxid # 艾特的id,群成员的id
data['at_name'] = at_name # 艾特的昵称,群成员的昵称
data['robot_wxid'] = robwxid # 账户id,用哪个账号去发送这条消息
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def send_image_msg(robwxid, to_wxid, path):
"""
* 发送图片消息
*
* @access public
* @param string robwxid 登录账号id,用哪个账号去发送这条消息
* @param string to_wxid 对方的id,可以是群或者好友id
* @param string path 图片的绝对路径
* @return string json_string
"""
data = dict()
data['type'] = 103 # Api数值(可以参考 - api列表demo)
data['msg'] = path # 发送的图片的绝对路径
data['to_wxid'] = to_wxid # 对方id
data['robot_wxid'] = robwxid # 账户id,用哪个账号去发送这条消息
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def send_video_msg(robwxid, to_wxid, path):
"""
* 发送视频消息
*
* @access public
* @param string robwxid 账户id,用哪个账号去发送这条消息
* @param string to_wxid 对方的id,可以是群或者好友id
* @param string path 视频的绝对路径
* @return string json_string
"""
data = dict()
data['type'] = 104 # Api数值(可以参考 - api列表demo)
data['msg'] = path # 发送的视频的绝对路径
data['to_wxid'] = to_wxid # 对方id
data['robot_wxid'] = robwxid # 账户id,用哪个账号去发送这条消息
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def send_file_msg(robwxid, to_wxid, path):
"""
* 发送文件消息
*
* @access public
* @param string robwxid 账户id,用哪个账号去发送这条消息
* @param string to_wxid 对方的id,可以是群或者好友id
* @param string path 文件的绝对路径
* @return string json_string
"""
data = dict()
data['type'] = 105 # Api数值(可以参考 - api列表demo)
data['msg'] = path # 发送的文件的绝对路径
data['to_wxid'] = to_wxid # 对方id(默认发送至来源的id,也可以发给其他人)
data['robot_wxid'] = robwxid # 账户id,用哪个账号去发送这条消息
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def send_emoji_msg(robwxid, to_wxid, path):
"""
* 发送动态表情
*
* @access public
* @param string robwxid 账户id,用哪个账号去发送这条消息
* @param string to_wxid 对方的id,可以是群或者好友id
* @param string path 动态表情文件(通常是gif)的绝对路径
* @return string json_string
"""
data = dict()
data['type'] = 106 # Api数值(可以参考 - api列表demo)
data['msg'] = path # 发送的动态表情的绝对路径
data['to_wxid'] = to_wxid # 对方id(默认发送至来源的id,也可以发给其他人)
data['robot_wxid'] = robwxid # 账户id,用哪个账号去发送这条消息
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def send_link_msg(robwxid, to_wxid, title, text, target_url, pic_url):
"""
* 发送分享链接
*
* @access public
* @param string robwxid 账户id,用哪个账号去发送这条消息
* @param string to_wxid 对方的id,可以是群或者好友id
* @param string title 链接标题
* @param string text 链接内容
* @param string target_url 跳转链接
* @param string pic_url 图片链接
* @return string json_string
"""
link = dict()
link['title'] = title
link['text'] = text
link['url'] = target_url
link['pic'] = pic_url
# 封装返回数据结构
data = dict()
data['type'] = 107 # Api数值(可以参考 - api列表demo)
data['msg'] = link # 发送的分享链接结构体
data['to_wxid'] = to_wxid # 对方id(默认发送至来源的id,也可以发给其他人)
data['robot_wxid'] = robwxid # 账户id,用哪个账号去发送这条消息
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def send_music_msg(robwxid, to_wxid, name):
"""
* 发送音乐分享
*
* @access public
* @param string robwxid 账户id,用哪个账号去发送这条消息
* @param string to_wxid 对方的id,可以是群或者好友id
* @param string name 歌曲名字
* @return string json_string
"""
data = dict()
data['type'] = 108 # Api数值(可以参考 - api列表demo)
data['msg'] = name # 歌曲名字
data['to_wxid'] = to_wxid # 对方id(默认发送至来源的id,也可以发给其他人)
data['robot_wxid'] = robwxid # 账户id,用哪个账号去发送这条消息
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def get_robot_name(robwxid):
"""
* 取指定登录账号的昵称
*
* @access public
* @param string robwxid 账户id
* @return string 账号昵称
"""
data = dict()
data['type'] = 201 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def get_robot_headimgurl(robwxid):
"""
* 取指定登录账号的头像
*
* @access public
* @param string robwxid 账户id
* @return string 头像http地址
"""
data = dict()
data['type'] = 202 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def get_logged_account_list():
"""
* 取登录账号列表
*
* @access public
* @param string robwxid 账户id
* @return string 当前框架已登录的账号信息列表
"""
data = dict()
data['type'] = 203 # Api数值(可以参考 - api列表demo)
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def get_friend_list(robwxid='', is_refresh=0):
"""
* 取好友列表
*
* @access public
* @param string robwxid 账户id
* @param string is_refresh 是否刷新
* @return string 当前框架已登录的账号信息列表
"""
data = dict()
data['type'] = 204 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id(可选,如果填空字符串,即取所有登录账号的好友列表,反正取指定账号的列表)
data['is_refresh'] = is_refresh # 是否刷新列表,0 从缓存获取 / 1 刷新并获取
result = json.dumps(data)
try:
ret_data = requests.post(API_URL, data=result).text # 获取数据
ret_json = json.loads(ret_data) # 格式化提取数据
ret_list = requests.utils.unquote(ret_json["data"]) # 对获取的数据进行解码
return json.loads(ret_list) # 对解码后的数据返回list
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def get_group_list(robwxid='', is_refresh=0):
"""
* 取群聊列表
*
* @access public
* @param string robwxid 账户id
* @param string is_refresh 是否刷新
* @return string 当前框架已登录的账号信息列表
"""
data = dict()
data['type'] = 205 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id(可选,如果填空字符串,即取所有登录账号的好友列表,反正取指定账号的列表)
data['is_refresh'] = is_refresh # 是否刷新列表,0 从缓存获取 / 1 刷新并获取
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def get_group_member_list(robwxid, group_wxid, is_refresh=0):
"""
* 取群成员列表
*
* @access public
* @param string robwxid 账户id
* @param string group_wxid 群id
* @param string is_refresh 是否刷新
* @return string 当前框架已登录的账号信息列表
"""
data = dict()
data['type'] = 206 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
data['group_wxid'] = group_wxid # 群id
data['is_refresh'] = is_refresh # 是否刷新列表,0 从缓存获取 / 1 刷新并获取
result = json.dumps(data)
try:
ret_data = requests.post(API_URL, data=result).text # 获取数据
ret_json = json.loads(ret_data) # 格式化提取数据
ret_list = requests.utils.unquote(ret_json["data"]) # 对获取的数据进行解码
return json.loads(ret_list) # 对解码后的数据返回list
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def get_group_member(robwxid, group_wxid, member_wxid):
"""
* 取群成员资料
*
* @access public
* @param string robwxid 账户id
* @param string group_wxid 群id
* @param string member_wxid 群成员id
* @return string json_string
"""
data = dict()
data['type'] = 207 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id,取哪个账号的资料
data['group_wxid'] = group_wxid # 群id
data['member_wxid'] = member_wxid # 群成员id
result = json.dumps(data)
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def accept_transfer(robwxid, friend_wxid, json_string):
"""
* 接收好友转账
*
* @access public
* @param string robwxid 账户id
* @param string friend_wxid 朋友id
* @param string json_string 转账事件原消息
* @return string json_string
"""
data = dict()
data['type'] = 301 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
data['friend_wxid'] = friend_wxid # 朋友id
data['msg'] = json_string # 转账事件原消息
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def agree_group_invite(robwxid, json_string):
"""
* 同意群聊邀请
*
* @access public
* @param string robwxid 账户id
* @param string json_string 同步消息事件中群聊邀请原消息
* @return string json_string
"""
data = dict()
data['type'] = 302 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
data['msg'] = json_string # 同步消息事件中群聊邀请原消息
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def agree_friend_verify(robwxid, json_string):
"""
* 同意好友请求
*
* @access public
* @param string robwxid 账户id
* @param string json_string 好友请求事件中原消息
* @return string json_string
"""
data = dict()
data['type'] = 303 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
data['msg'] = json_string # 好友请求事件中原消息
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def modify_friend_note(robwxid, friend_wxid, note):
"""
* 修改好友备注
*
* @access public
* @param string robwxid 账户id
* @param string friend_wxid 好友id
* @param string note 新备注(空字符串则是删除备注)
* @return string json_string
"""
data = dict()
data['type'] = 304 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
data['friend_wxid'] = friend_wxid # 朋友id
data['note'] = note # 新备注(空字符串则是删除备注)
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def delete_friend(robwxid, friend_wxid):
"""
* 删除好友
*
* @access public
* @param string robwxid 账户id
* @param string friend_wxid 好友id
* @return string json_string
"""
data = dict()
data['type'] = 305 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
data['friend_wxid'] = friend_wxid # 朋友id
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def remove_group_member(robwxid, group_wxid, member_wxid):
"""
* 踢出群成员
*
* @access public
* @param string robwxid 账户id
* @param string group_wxid 群id
* @param string member_wxid 群成员id
* @return string json_string
"""
data = dict()
data['type'] = 306 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
data['group_wxid'] = group_wxid # 群id
data['member_wxid'] = member_wxid # 群成员id
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def modify_group_name(robwxid, group_wxid, group_name):
"""
* 修改群名称
*
* @access public
* @param string robwxid 账户id
* @param string group_wxid 群id
* @param string group_name 新群名
* @return string json_string
"""
data = dict()
data['type'] = 307 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
data['group_wxid'] = group_wxid # 群id
data['group_name'] = group_name # 新群名
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def modify_group_notice(robwxid, group_wxid, notice):
"""
* 修改群公告
*
* @access public
* @param string robwxid 账户id
* @param string group_wxid 群id
* @param string notice 新公告
* @return string json_string
"""
data = dict()
data['type'] = 308 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
data['group_wxid'] = group_wxid # 群id
data['notice'] = notice # 新公告
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def building_group(robwxid, friends):
"""
* 建立新群
*
* @access public
* @param string robwxid 账户id
* @param array friends 三个人及以上的好友id数组,['wxid_1xxx', 'wxid_2xxx', 'wxid_3xxx', 'wxid_4xxx']
* @return string json_string
"""
data = dict()
data['type'] = 309 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
data['friends'] = friends # 好友id数组
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def quit_group(robwxid, group_wxid):
"""
* 退出群聊
*
* @access public
* @param string robwxid 账户id
* @param string group_wxid 群id
* @return string json_string
"""
data = dict()
data['type'] = 310 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
data['group_wxid'] = group_wxid # 群id
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def invite_in_group(robwxid, group_wxid, friend_wxid):
"""
* 邀请加入群聊
*
* @access public
* @param string robwxid 账户id
* @param string group_wxid 群id
* @param string friend_wxid 好友id
* @return string json_string
"""
data = dict()
data['type'] = 311 # Api数值(可以参考 - api列表demo)
data['robot_wxid'] = robwxid # 账户id
data['group_wxid'] = group_wxid # 群id
data['friend_wxid'] = friend_wxid # 好友id
result = json.dumps(data)
try:
return requests.post(API_URL, data={"data": result})
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"请检查当前的回复地址是否正确! {API_URL}")
def sendSGHttp(url, params, method='get', timeout=3):
"""
* 执行一个 HTTP 请求,仅仅是post组件,其他语言请自行替换即可
*
* @param string url 执行请求的url地址
* @param mixed params 表单参数
* @param int timeout 超时时间
* @param string method 请求方法 post / get
* @return array 结果数组
"""
if url is None:
return None
ret = None
err = None
if method.lower() == "get":
try:
ret = requests.get(url, data=params, timeout=timeout).text
except Exception as e:
err = e
else:
try:
ret = requests.post(url, data=params, timeout=timeout).text
except Exception as e:
err = e
data = {"result": ret, "error_msg": err}
return data
``` |
{
"source": "Jian-Yin-Shine/Partial-Computation-Offloading-for-MEC",
"score": 3
} |
#### File: Jian-Yin-Shine/Partial-Computation-Offloading-for-MEC/mec.py
```python
import numpy as np
import argparse
class Env():
def __init__(self, W, F, K, Dn, Cn, f, dist, pn, pi):
# W 带宽 10 MHz
# F 边缘服务器总计算能力
# K 用户数量
# Dn, Cn 任务量大小,所需cpu周期数, (300~500kb), (900, 1100)兆周期数 1Mhz = 1000khz = 1000*1000hz
# f 用户本地计算能力 1GHz/s | [0.5, 1.5]GHz/s (1000*1000*1000)
# dist 用户距离
# pn, pi 上传功率,闲时功率 | mW (毫瓦)
# state 系统状态
self.W, self.F, self.K = W, F, K
self.pn, self.pi = pn, pi
self.Dn, self.Cn, self.f, self.dist = Dn, Cn, f, dist
self.state = 0
self.reward = 0
# self.pre_state = 5
def step(self, action):
# 把action 特殊处理了一下,防止出现算法bug,小于0的置为0,大于1的置为1,无限大的置为1
action[action < 0] = 0
action[action > 1] = 1
action[np.isnan(action)] = 1
# 用于返回的状态和奖励
self.state = 0
self.reward = 0
# 有几个需要计算卸载
rk = np.sum(action > 0)
# 所有用户卸载了多少到边缘服务器,之后依据这个按比例分配计算资源
sum_c = 0
for i in range(self.K):
if action[i] > 0:
sum_c += self.Cn[i] * action[i]
mw = pow(10, -174 / 10) * 0.001 # 噪声功率转化 -174dbm 转成瓦特
for i in range(self.K):
if action[i] > 0:
tmp_rn = self.W * 1000 / rk * 1000 # W / K 速率公式的一部分
rn = tmp_rn * np.log2(1 + self.pn * 0.001 * pow(self.dist[i], -3) / (tmp_rn * mw)) # 计算速率
# 部分卸载部分的第一步卸载延迟
to1 = action[i] * self.Dn[i] * 1024 / rn
# 部分卸载的第二步计算延迟
to2 = action[i] * self.Cn[i] / (self.F * 1000 * action[i] * self.Cn[i] / sum_c)
# 部分卸载的本地计算部分 1-action
tl = (1 - action[i]) * self.Cn[i] / (self.f * 1000)
# 时延是max(本地计算延迟,计算卸载的部分的延迟)
self.state += max(to1 + to2, tl)
elif action[i] == 0:
# 本地执行的延迟
self.state += (self.Cn[i]) / (self.f * 1000)
# self.reward = (self.pre_state - self.state) / self.pre_state
# 奖励是状态的相反数
self.reward = -self.state
return self.state, self.reward, False, {}
def reset(self):
# random_action = np.random.uniform(0, 1, self.K)
# state, _, _, _ = self.step(random_action)
# state, _, _, _ = self.step(np.array([0.5] * self.K))
state, _, _, _ = self.step(np.zeros(self.K))
return state
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--num-ue', type=int, default=5) # 用户数量
parser.add_argument('--F', type=int, default=5) # 边缘服务器计算量
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(args)
num_ue = args.num_ue
F = args.F
env = Env(W=10, F=5, K=num_ue,
Dn=np.random.uniform(300, 500, num_ue), Cn=np.random.uniform(900, 1100, num_ue),
f=1, dist=np.random.uniform(0, 200, num_ue), pn=500, pi=100)
state, reward, _, _ = env.step(np.ones(num_ue))
print(state)
state, reward, _, _ = env.step(np.array([0.5, 0.5, 0.5, 0.5, 0.5]))
print(state)
state, reward, _, _ = env.step(np.array([1/3, 1/3, 1/3, 2/3, 2/3]))
print(state)
``` |
{
"source": "jianyiyang5/tutorials",
"score": 2
} |
#### File: beginner_source/text_sentiment_ngrams_tutorial/train.py
```python
import time
import torch
from torch.utils.data import DataLoader
from torch.utils.data.dataset import random_split
from torchtext.data.utils import ngrams_iterator
from torchtext.data.utils import get_tokenizer
from data import get_datasets, generate_batch
from model import TextSentiment
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
BATCH_SIZE = 16
ag_news_label = {1 : "World",
2 : "Sports",
3 : "Business",
4 : "Sci/Tec"}
def train_func(sub_train_, optimizer, model, criterion, scheduler):
# Train the model
train_loss = 0
train_acc = 0
data = DataLoader(sub_train_, batch_size=BATCH_SIZE, shuffle=True,
collate_fn=generate_batch)
for i, (text, offsets, cls) in enumerate(data):
optimizer.zero_grad()
text, offsets, cls = text.to(device), offsets.to(device), cls.to(device)
output = model(text, offsets)
loss = criterion(output, cls)
train_loss += loss.item()
loss.backward()
optimizer.step()
train_acc += (output.argmax(1) == cls).sum().item()
# Adjust the learning rate
scheduler.step()
return train_loss / len(sub_train_), train_acc / len(sub_train_)
def test(data_, model, criterion):
loss = 0
acc = 0
data = DataLoader(data_, batch_size=BATCH_SIZE, collate_fn=generate_batch)
for text, offsets, cls in data:
text, offsets, cls = text.to(device), offsets.to(device), cls.to(device)
with torch.no_grad():
output = model(text, offsets)
loss = criterion(output, cls)
loss += loss.item()
acc += (output.argmax(1) == cls).sum().item()
return loss / len(data_), acc / len(data_)
def train():
train_dataset, test_dataset = get_datasets()
VOCAB_SIZE = len(train_dataset.get_vocab())
EMBED_DIM = 32
N_EPOCHS = 5
NUN_CLASS = len(train_dataset.get_labels())
model = TextSentiment(VOCAB_SIZE, EMBED_DIM, NUN_CLASS).to(device)
min_valid_loss = float('inf')
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=4.0)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.9)
train_len = int(len(train_dataset) * 0.95)
sub_train_, sub_valid_ = \
random_split(train_dataset, [train_len, len(train_dataset) - train_len])
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train_func(sub_train_, optimizer, model, criterion, scheduler)
valid_loss, valid_acc = test(sub_valid_, model, criterion)
secs = int(time.time() - start_time)
mins = secs / 60
secs = secs % 60
print('Epoch: %d' % (epoch + 1), " | time in %d minutes, %d seconds" % (mins, secs))
print(f'\tLoss: {train_loss:.4f}(train)\t|\tAcc: {train_acc * 100:.1f}%(train)')
print(f'\tLoss: {valid_loss:.4f}(valid)\t|\tAcc: {valid_acc * 100:.1f}%(valid)')
print('Checking the results of test dataset...')
test_loss, test_acc = test(test_dataset, model, criterion)
print(f'\tLoss: {test_loss:.4f}(test)\t|\tAcc: {test_acc * 100:.1f}%(test)')
return model, train_dataset.get_vocab()
def predict(text, model, vocab, ngrams):
tokenizer = get_tokenizer("basic_english")
with torch.no_grad():
text = torch.tensor([vocab[token]
for token in ngrams_iterator(tokenizer(text), ngrams)])
output = model(text, torch.tensor([0]))
return output.argmax(1).item() + 1
if __name__ == '__main__':
model, vocab = train()
ex_text_str = "<NAME>. – Four days ago, <NAME> was \
enduring the season’s worst weather conditions on Sunday at The \
Open on his way to a closing 75 at Royal Portrush, which \
considering the wind and the rain was a respectable showing. \
Thursday’s first round at the WGC-FedEx St. Jude Invitational \
was another story. With temperatures in the mid-80s and hardly any \
wind, the Spaniard was 13 strokes better in a flawless round. \
Thanks to his best putting performance on the PGA Tour, Rahm \
finished with an 8-under 62 for a three-stroke lead, which \
was even more impressive considering he’d never played the \
front nine at TPC Southwind."
model = model.to("cpu")
print("This is a %s news" % ag_news_label[predict(ex_text_str, model, vocab, 2)])
```
#### File: intermediate_source/char_rnn_classification/predict.py
```python
from train import *
def predict(model, all_categories, input_line, n_predictions=3):
print('\n> %s' % input_line)
with torch.no_grad():
output = evaluate(model, lineToTensor(input_line))
# Get top N categories
topv, topi = output.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i].item()
category_index = topi[0][i].item()
print('(%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
if __name__ == '__main__':
model = load_model('output/rnn.pt')
model.eval()
category_lines, all_categories = load_data()
names = ['Dovesky', 'Jackson', 'Satoshi', 'Chan', 'Jonas', 'Joris']
for name in names:
predict(model, all_categories, name)
```
#### File: char_rnn_classification/test/test_model.py
```python
import unittest
from model import RNN
from data import *
class TestModel(unittest.TestCase):
def test_rnn(self):
n_hidden = 128
n_categories = 3
rnn = RNN(n_letters, n_hidden, n_categories)
input = letterToTensor('A')
hidden = torch.zeros(1, n_hidden)
output1, next_hidden1 = rnn(input, hidden)
# print(output1, next_hidden1)
input = lineToTensor('Albert')
hidden = torch.zeros(1, n_hidden)
output2, next_hidden2 = rnn(input[0], hidden)
print(output2, output2.size())
print(next_hidden2, next_hidden2.size())
self.assertTrue(torch.equal(output1, output2))
self.assertTrue(torch.equal(next_hidden1, next_hidden2))
if __name__ == '__main__':
unittest.main()
```
#### File: intermediate_source/char_rnn_generation/data.py
```python
from __future__ import unicode_literals, print_function, division
from io import open
import glob
import os
import unicodedata
import string
import random
import torch
import itertools
all_letters = "▁" + string.ascii_letters + " .,;'-"
n_letters = len(all_letters) + 1 # Plus EOS marker
PAD_token = 0
def findFiles(path): return glob.glob(path)
# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
# Read a file and split into lines
def readLines(filename):
with open(filename, encoding='utf-8') as f:
lines = f.read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
# Build the category_lines dictionary, a list of lines per category
def load_data(path_regex='../data/names/*.txt'):
category_lines = {}
all_categories = []
for filename in findFiles(path_regex):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
return category_lines, all_categories
# Random item from a list
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
# Get a random category and random line from that category
def randomTrainingPair(category_lines, all_categories):
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
return category, line
# One-hot vector for category
def categoryTensor(category, all_categories):
n_categories = len(all_categories)
li = all_categories.index(category)
tensor = torch.zeros(1, n_categories)
tensor[0][li] = 1
return tensor
# One-hot matrix of first to last letters (not including EOS) for input
def inputTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li in range(len(line)):
letter = line[li]
tensor[li][0][all_letters.find(letter)] = 1
return tensor
# LongTensor of second letter to end (EOS) for target
def targetTensor(line):
letter_indexes = [all_letters.find(line[li]) for li in range(1, len(line))]
letter_indexes.append(n_letters - 1) # EOS
return torch.LongTensor(letter_indexes)
# Make category, input, and target tensors from a random category, line pair
def randomTrainingExample(category_lines, all_categories):
category, line = randomTrainingPair(category_lines, all_categories)
category_tensor = categoryTensor(category, all_categories)
input_line_tensor = inputTensor(line)
target_line_tensor = targetTensor(line)
return category_tensor, input_line_tensor, target_line_tensor
def categoryIdxTensor(categories, all_categories):
li = [all_categories.index(category) for category in categories]
return torch.LongTensor(li)
def letterToIndex(letter):
return all_letters.find(letter)
def lineToIndex(line):
return [letterToIndex(l) for l in line]
def zeroPadding(l, fillvalue=PAD_token):
return list(itertools.zip_longest(*l, fillvalue=fillvalue))
def binaryMatrix(l, value=PAD_token):
m = []
for i, seq in enumerate(l):
m.append([])
for token in seq:
if token == value:
m[i].append(0)
else:
m[i].append(1)
return m
def inputVar(lines):
indexes_batch = [lineToIndex(line) for line in lines]
lengths = torch.tensor([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
padVar = torch.LongTensor(padList)
return padVar, lengths
# Returns padded target sequence tensor, padding mask, and max target length
def outputVar(lines):
indexes_batch = [lineToIndex(sentence[1:]) for sentence in lines]
for idx in indexes_batch:
idx.append(n_letters - 1) # EOS
max_target_len = max([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
mask = binaryMatrix(padList)
mask = torch.BoolTensor(mask)
padVar = torch.LongTensor(padList)
return padVar, mask, max_target_len
def batch2TrainData(pair_batch, all_categories):
pair_batch.sort(key=lambda x: len(x[0]), reverse=True)
input_batch, category_batch = [], []
for pair in pair_batch:
input_batch.append(pair[0])
category_batch.append(all_categories.index(pair[1]))
inp, lengths = inputVar(input_batch)
target, mask, max_target_len = outputVar(input_batch)
categories = torch.LongTensor(category_batch)
return inp, lengths, categories, target, mask, max_target_len
def create_batches(category_lines, batch_size):
training_examples = []
for category, lines in category_lines.items():
random.shuffle(lines)
training_examples.extend([line, category] for line in lines[:batch_size])
random.shuffle(training_examples)
return batch(training_examples, batch_size)
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
```
#### File: intermediate_source/char_rnn_generation/model.py
```python
import torch
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_size, n_categories, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size)
self.o2o = nn.Linear(hidden_size + output_size, output_size)
self.dropout = nn.Dropout(0.1)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, category, input, hidden):
input_combined = torch.cat((category, input, hidden), 1)
hidden = self.i2h(input_combined)
output = self.i2o(input_combined)
output_combined = torch.cat((hidden, output), 1)
output = self.o2o(output_combined)
output = self.dropout(output)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
class EncoderRNN(nn.Module):
def __init__(self, cat_hidden_size, hidden_size, category_embedding, embedding, output_size, n_layers=1, dropout=0):
super(EncoderRNN, self).__init__()
self.n_layers = n_layers
# self.n_categories = n_categories
# self.hidden_size = hidden_size
self.category_embedding = category_embedding
self.embedding = embedding
self.output_size = output_size
# Initialize GRU; the input_size and hidden_size params are both set to 'hidden_size'
# because our input size is a word embedding with number of features == hidden_size
self.gru = nn.GRU(cat_hidden_size+hidden_size, hidden_size, n_layers,
dropout=(0 if n_layers == 1 else dropout), bidirectional=False)
self.softmax = nn.Softmax(dim=2)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, input_seq, cat_seq, input_lengths, hidden=None):
# Convert word indexes to embeddings
embedded = self.embedding(input_seq)
category_embedded = self.category_embedding(cat_seq)
# repeat with the max seq length
category_embedded = category_embedded.repeat(max(input_lengths).item(), 1, 1)
combined = torch.cat((embedded, category_embedded), 2)
# Pack padded batch of sequences for RNN module
packed = nn.utils.rnn.pack_padded_sequence(combined, input_lengths)
# Forward pass through GRU
outputs, hidden = self.gru(packed, hidden)
# Unpack padding
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)
# Sum bidirectional GRU outputs
# outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:]
# Return output and final hidden state
outputs = self.linear(outputs)
outputs = self.softmax(outputs)
return outputs, hidden
```
#### File: intermediate_source/char_rnn_generation/predict.py
```python
from train import *
# Sample from a category and starting letter
def sample(rnn, category, all_categories, start_letter='A'):
with torch.no_grad(): # no need to track history in sampling
category_tensor = categoryTensor(category, all_categories)
input = inputTensor(start_letter)
hidden = rnn.initHidden()
output_name = start_letter
for i in range(max_length):
output, hidden = rnn(category_tensor, input[0], hidden)
topv, topi = output.topk(1)
topi = topi[0][0]
if topi == n_letters - 1:
break
else:
letter = all_letters[topi]
output_name += letter
input = inputTensor(letter)
return output_name
# Get multiple samples from one category and multiple starting letters
def samples(rnn, category, all_categories, start_letters='ABC'):
for start_letter in start_letters:
print(sample(rnn, category, all_categories, start_letter))
if __name__ == '__main__':
category_lines, all_categories = load_data()
model = load_model('output/rnn.pt')
model.eval()
samples(model, 'Russian', all_categories, 'RUS')
samples(model, 'German', all_categories, 'GER')
samples(model, 'Spanish', all_categories, 'SPA')
samples(model, 'Chinese', all_categories, 'CHINESE')
```
#### File: char_rnn_generation/test/test_model.py
```python
import unittest
from model import EncoderRNN
from data import *
from train_batch import *
class TestModel(unittest.TestCase):
def test_rnn(self):
random.seed(2)
batch_size = 3
category_lines, all_categories = load_data('../../data/names/*.txt')
batches = create_batches(category_lines, batch_size)
batch = next(batches)
print('batch:', batch)
inp, lengths, categories, target, mask, max_target_len = batch2TrainData(batch, all_categories)
max_len = max(lengths).item()
n_hidden = 16
n_hidden_cat = 8
n_categories = len(all_categories)
rnn = EncoderRNN(n_hidden_cat, n_hidden, torch.nn.Embedding(n_categories, n_hidden_cat),
torch.nn.Embedding(n_letters, n_hidden), n_letters)
rnn.zero_grad()
outputs, hidden = rnn(inp, categories, lengths)
# print(outputs)
print('outputs size:', outputs.size())
self.assertEqual(torch.Size([max_len, batch_size, n_letters]), outputs.size())
outputs = outputs.transpose(0, 1)
print('outputs size after transpose:', outputs.size())
print('lengths:', lengths)
# output = outputs[torch.arange(outputs.size(0)), lengths-1]
# output = outputs[torch.arange(outputs.size(0)), 0]
# print(output)
# print('output size:', output.size())
#
# mask_loss, nTotal = maskNLLLoss(output, target[0], mask[0])
# print(mask_loss, nTotal)
# # print(next(rnn.parameters()).grad.data)
# mask_loss.backward()
# print(next(rnn.parameters()).grad.data)
mask_loss, nTotal = maskNLLLoss(outputs.transpose(0, 1), target, mask)
print(mask_loss, nTotal)
mask_loss.backward()
print(next(rnn.parameters()).grad.data)
if __name__ == '__main__':
unittest.main()
```
#### File: intermediate_source/char_rnn_generation/utils.py
```python
import time
import math
import matplotlib.pyplot as plt
# To keep track of how long training takes I am adding a
# ``timeSince(timestamp)`` function which returns a human readable string:
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def plot_losses(all_losses):
plt.figure()
plt.plot(all_losses)
plt.show()
```
#### File: seq2seq_translation/test/test_data.py
```python
import unittest
import random
from data import *
class TestData(unittest.TestCase):
def test_prepareData(self):
input_lang, output_lang, pairs = prepareData('eng', 'fra', True, '../../data')
print(random.choice(pairs))
def test_batch2TrainData(self):
input_lang, output_lang, pairs = prepareData('eng', 'fra', True, '../../data')
# Example for validation
small_batch_size = 5
batches = batch2TrainData(input_lang, output_lang, [random.choice(pairs) for _ in range(small_batch_size)])
input_variable, lengths, target_variable, mask, max_target_len = batches
print("input_variable:", input_variable)
print("lengths:", lengths)
print("target_variable:", target_variable)
print("mask:", mask)
print("max_target_len:", max_target_len)
def test_create_batches(self):
input_lang, output_lang, pairs = prepareData('eng', 'fra', True, '../../data')
print(next(create_batches(pairs, 3)))
def test_input_tensor_with_mask(self):
input_lang, output_lang, pairs = prepareData('eng', 'fra', True, '../../data')
src_sentences = [src for src, _ in pairs[0:3]]
print(src_sentences)
print(tensor_with_mask(src_sentences, input_lang))
# row batch, col sequence
expected = [[ 4, 8, 11],
[ 5, 9, 12],
[ 6, 10, 7],
[ 7, 7, 2],
[ 2, 2, 0]]
expected_mask = [[ True, True, True],
[ True, True, True],
[ True, True, True],
[ True, True, True],
[ True, True, False]]
input_tensor, mask = tensor_with_mask(src_sentences, input_lang)
self.assertTrue(torch.equal(torch.LongTensor(expected), input_tensor))
self.assertTrue(torch.equal(torch.BoolTensor(expected_mask), mask))
tgt_sentences = [tgt for _, tgt in pairs[0:3]]
tgt_tensor, tgt_mask, _ = outputVar(tgt_sentences, output_lang)
print(tgt_sentences)
print(tgt_tensor, tgt_mask)
print(tgt_tensor.size())
```
#### File: intermediate_source/seq2seq_translation/train_batch.py
```python
import random
import os
import torch
from torch import nn, optim
from data import SOS_token, batch2TrainData, create_batches, prepareData, Lang
from model_batch import EncoderRNNBatch, LuongAttnDecoderRNN
# For pytorch 1.1
# def maskNLLLoss(inp, target, mask, device):
# nTotal = (mask == True).sum()
# crossEntropy = -torch.log(torch.gather(inp, 1, target.view(-1, 1)).squeeze(1))
# loss = crossEntropy.masked_select(mask == True).mean()
# loss = loss.to(device)
# return loss, nTotal.item()
def maskNLLLoss(inp, target, mask, device):
nTotal = mask.sum()
crossEntropy = -torch.log(torch.gather(inp, 1, target.view(-1, 1)).squeeze(1))
loss = crossEntropy.masked_select(mask).mean()
loss = loss.to(device)
return loss, nTotal.item()
def train(input_variable, lengths, target_variable, mask, max_target_len, encoder, decoder,
encoder_optimizer, decoder_optimizer, batch_size, clip, device, teacher_forcing_ratio=1.0):
encoder = encoder.to(device)
decoder = decoder.to(device)
encoder.train()
decoder.train()
# Zero gradients
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# Set device options
input_variable = input_variable.to(device)
lengths = lengths.to(device)
target_variable = target_variable.to(device)
mask = mask.to(device)
# Initialize variables
loss = 0
print_losses = []
n_totals = 0
# Forward pass through encoder
encoder_outputs, encoder_hidden = encoder(input_variable, lengths)
# Create initial decoder input (start with SOS tokens for each sentence)
decoder_input = torch.LongTensor([[SOS_token for _ in range(batch_size)]])
decoder_input = decoder_input.to(device)
# Set initial decoder hidden state to the encoder's final hidden state
decoder_hidden = encoder_hidden[:decoder.n_layers]
# Determine if we are using teacher forcing this iteration
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
# Forward batch of sequences through decoder one time step at a time
if use_teacher_forcing:
for t in range(max_target_len):
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
# Teacher forcing: next input is current target
decoder_input = target_variable[t].view(1, -1)
# Calculate and accumulate loss
mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t], device)
loss += mask_loss
print_losses.append(mask_loss.item() * nTotal)
n_totals += nTotal
else:
for t in range(max_target_len):
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
# No teacher forcing: next input is decoder's own current output
_, topi = decoder_output.topk(1)
decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]])
decoder_input = decoder_input.to(device)
# Calculate and accumulate loss
mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t], device)
loss += mask_loss
print_losses.append(mask_loss.item() * nTotal)
n_totals += nTotal
# Perform backpropatation
loss.backward()
# Clip gradients: gradients are modified in place
_ = nn.utils.clip_grad_norm_(encoder.parameters(), clip)
_ = nn.utils.clip_grad_norm_(decoder.parameters(), clip)
# Adjust model weights
encoder_optimizer.step()
decoder_optimizer.step()
return sum(print_losses), n_totals
# iterations are actually epochs
def trainIters(model_name, src_voc, tgt_voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer,
encoder_n_layers, decoder_n_layers, hidden_size, save_dir, n_iteration, batch_size, clip, corpus_name,
loadFilename, checkpoint, device):
# Initializations
print('Initializing ...')
start_iteration = 1
if loadFilename:
start_iteration = checkpoint['iteration'] + 1
# Training loop
print("Training...")
for iteration in range(start_iteration, n_iteration + 1):
batches = create_batches(pairs, batch_size)
total_loss = 0
n_totals = 0
for batch in batches:
input_variable, lengths, target_variable, mask, max_target_len = batch2TrainData(src_voc, tgt_voc, batch)
# Run a training iteration with batch
cur_batch_size = input_variable.size()[1]
loss, n_total = train(input_variable, lengths, target_variable, mask, max_target_len, encoder,
decoder, encoder_optimizer, decoder_optimizer, cur_batch_size, clip, device)
total_loss += loss
n_totals += n_total
# Print progress
print_loss_avg = total_loss / n_totals
print("Epoch: {}; Percent complete: {:.1f}%; Average loss: {:.4f}".format(iteration, iteration / n_iteration * 100, print_loss_avg))
# Save checkpoint
directory = os.path.join(save_dir, model_name, corpus_name, '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size))
if not os.path.exists(directory):
os.makedirs(directory)
torch.save({
'iteration': iteration,
'en': encoder.state_dict(),
'de': decoder.state_dict(),
'en_opt': encoder_optimizer.state_dict(),
'de_opt': decoder_optimizer.state_dict(),
'loss': print_loss_avg,
'src_voc_dict': src_voc.__dict__,
'tgt_voc_dict': tgt_voc.__dict__,
'src_embedding': encoder.embedding.state_dict(),
'tgt_embedding': decoder.embedding.state_dict(),
'attn_model': decoder.attn_model
}, os.path.join(directory, '{}_{}.tar'.format(iteration, 'checkpoint')))
def load_model(save_dir, model_name, corpus_name, encoder_n_layers, decoder_n_layers, hidden_size, dropout):
directory = os.path.join(save_dir, model_name, corpus_name,
'{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size))
checkpoint = torch.load(f'{directory}/50_checkpoint.tar')
# iteration = checkpoint['iteration']
attn_model = checkpoint['attn_model']
src_voc_dict = Lang('')
src_voc_dict.__dict__ = checkpoint['src_voc_dict']
tgt_voc_dict = Lang('')
tgt_voc_dict.__dict__ = checkpoint['tgt_voc_dict']
src_embedding = nn.Embedding(src_voc_dict.n_words, hidden_size)
src_embedding.load_state_dict(checkpoint['src_embedding'])
encoder = EncoderRNNBatch(hidden_size, src_embedding, n_layers=encoder_n_layers, dropout=dropout)
encoder.load_state_dict(checkpoint['en'])
tgt_embedding = nn.Embedding(tgt_voc_dict.n_words, hidden_size)
tgt_embedding.load_state_dict(checkpoint['tgt_embedding'])
decoder = LuongAttnDecoderRNN(attn_model, tgt_embedding, hidden_size, tgt_voc_dict.n_words, n_layers=decoder_n_layers, dropout=dropout)
decoder.load_state_dict(checkpoint['de'])
return encoder, decoder, src_voc_dict, tgt_voc_dict
if __name__ == '__main__':
batch_size = 64
clip = 50
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
learning_rate = 0.0001
decoder_learning_ratio = 5.0
hidden_size = 256
encoder_n_layers = 2
decoder_n_layers = 2
dropout = 0.1
attn_model = 'dot'
checkpoint = None
loadFilename = None
epochs = 50
input_lang, output_lang, pairs = prepareData('eng', 'fra', True, '../data')
encoder = EncoderRNNBatch(hidden_size, nn.Embedding(input_lang.n_words, hidden_size), encoder_n_layers, dropout)
decoder = LuongAttnDecoderRNN(attn_model, nn.Embedding(output_lang.n_words, hidden_size), hidden_size,
output_lang.n_words, decoder_n_layers, dropout)
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)
trainIters('model_batch', input_lang, output_lang, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer,
encoder_n_layers, decoder_n_layers, hidden_size, 'output', epochs, batch_size, clip, 'en-fr',
loadFilename, checkpoint, device)
``` |
{
"source": "jianyuan/python-terraform-plugin",
"score": 2
} |
#### File: python-terraform-plugin/terraform/fields.py
```python
import typing
import marshmallow
class BaseField(marshmallow.fields.Field):
terraform_type: typing.Optional[str] = None
def __init__(
self,
*,
default: typing.Any = None,
allow_none: bool = True,
description: typing.Optional[str] = None,
required: bool = False,
optional: bool = False,
computed: bool = False,
force_new: bool = False,
sensitive: bool = False,
removed: typing.Optional[str] = None,
deprecated: typing.Optional[str] = None,
**kwargs,
):
# TODO: validate metadata
metadata = {
"description": description,
"required": required,
"optional": optional,
"computed": computed,
"force_new": force_new,
"sensitive": sensitive,
"removed": removed,
"deprecated": deprecated,
}
super().__init__(default=default, allow_none=allow_none, **kwargs, **metadata)
def get_terraform_type(self) -> typing.Any:
if self.terraform_type is not None:
return self.terraform_type
raise NotImplementedError
def serialize(self, *args, **kwargs):
if (
self.metadata["removed"] is not None
or self.metadata["deprecated"] is not None
):
return None
return super().serialize(*args, **kwargs)
class BaseNestedField(BaseField):
def __init__(
self, min_items: int = 0, max_items: int = 0, **kwargs,
):
# TODO: validate metadata
metadata = {
"min_items": min_items,
"max_items": max_items,
}
super().__init__(**kwargs, **metadata)
def get_inner(self):
raise NotImplementedError
def get_terraform_type(self) -> typing.Any:
return [self.terraform_type, self.get_inner().get_terraform_type()]
class Bool(marshmallow.fields.Boolean, BaseField):
terraform_type = "bool"
class Int(marshmallow.fields.Integer, BaseField):
terraform_type = "number"
class Float(marshmallow.fields.Float, BaseField):
terraform_type = "number"
class String(marshmallow.fields.String, BaseField):
terraform_type = "string"
class List(marshmallow.fields.List, BaseNestedField):
terraform_type = "list"
def get_inner(self):
return self.inner
class Set(List, BaseNestedField):
terraform_type = "set"
class Map(marshmallow.fields.Mapping, BaseNestedField):
terraform_type = "map"
def __init__(self, values=None, **kwargs):
if values is None:
values = String()
super().__init__(String(), values, **kwargs)
def get_inner(self):
from terraform import schemas
if isinstance(self.value_field, Nested) and isinstance(
self.value_field.nested, schemas.Resource
):
return String()
return self.value_field
class Nested(marshmallow.fields.Nested, BaseField):
def get_terraform_type(self) -> typing.Any:
return self.nested.get_terraform_type()
```
#### File: python-terraform-plugin/terraform/schemas.py
```python
import abc
import dataclasses
import enum
import json
import operator
import typing
import marshmallow
from terraform import fields, settings
from terraform.protos import tfplugin5_1_pb2
class NestingMode(enum.Enum):
INVALID = enum.auto()
SINGLE = enum.auto()
GROUP = enum.auto()
LIST = enum.auto()
SET = enum.auto()
MAP = enum.auto()
def to_proto(self) -> tfplugin5_1_pb2.Schema.NestedBlock.NestingMode:
return NESTING_MODE_PROTO_ENUMS[self]
NESTING_MODE_PROTO_ENUMS = {
NestingMode.INVALID: tfplugin5_1_pb2.Schema.NestedBlock.NestingMode.INVALID,
NestingMode.SINGLE: tfplugin5_1_pb2.Schema.NestedBlock.NestingMode.SINGLE,
NestingMode.GROUP: tfplugin5_1_pb2.Schema.NestedBlock.NestingMode.GROUP,
NestingMode.LIST: tfplugin5_1_pb2.Schema.NestedBlock.NestingMode.LIST,
NestingMode.SET: tfplugin5_1_pb2.Schema.NestedBlock.NestingMode.SET,
NestingMode.MAP: tfplugin5_1_pb2.Schema.NestedBlock.NestingMode.MAP,
}
@dataclasses.dataclass
class Attribute:
type: typing.Any
description: typing.Optional[str] = None
required: bool = False
optional: bool = False
computed: bool = False
sensitive: bool = False
def to_proto(self, *, name: str) -> tfplugin5_1_pb2.Schema.Attribute:
return tfplugin5_1_pb2.Schema.Attribute(
name=name,
type=encode_type(self.type),
description=self.description,
required=self.required,
optional=self.optional,
computed=self.computed,
sensitive=self.sensitive,
)
@dataclasses.dataclass
class Block:
attributes: typing.Dict[str, Attribute] = dataclasses.field(default_factory=dict)
block_types: typing.Dict[str, "NestedBlock"] = dataclasses.field(
default_factory=dict
)
def to_proto(self) -> tfplugin5_1_pb2.Schema.Block:
block = tfplugin5_1_pb2.Schema.Block()
for name, attribute in sorted(
self.attributes.items(), key=operator.itemgetter(0)
):
block.attributes.append(attribute.to_proto(name=name))
for name, block_type in sorted(
self.block_types.items(), key=operator.itemgetter(0)
):
block.block_types.append(block_type.to_proto(name=name))
return block
@dataclasses.dataclass
class NestedBlock:
nesting: NestingMode
block: Block = dataclasses.field(default_factory=Block)
min_items: int = 0
max_items: int = 0
def to_proto(self, *, name: str) -> tfplugin5_1_pb2.Schema.NestedBlock:
return tfplugin5_1_pb2.Schema.NestedBlock(
type_name=name,
block=self.block.to_proto(),
nesting=self.nesting.to_proto(),
min_items=self.min_items,
max_items=self.max_items,
)
def encode_type(obj: typing.Any) -> bytes:
return json.dumps(obj, separators=(",", ":")).encode("ascii")
class SchemaMeta(marshmallow.schema.SchemaMeta, abc.ABCMeta):
...
class Schema(marshmallow.Schema, metaclass=SchemaMeta):
schema_version: typing.Optional[int] = None
def get_terraform_type(self) -> typing.Any:
return [
"object",
{
field.name: field.get_terraform_type()
for field in self.declared_fields.values()
},
]
@marshmallow.pre_dump
def none_missing(self, data, **kwargs):
return {key: value for key, value in data.items() if value is not None}
def to_proto(self) -> tfplugin5_1_pb2.Schema:
return tfplugin5_1_pb2.Schema(
version=self.schema_version, block=self.to_block().to_proto(),
)
def to_block(self) -> Block:
attributes = {}
block_types = {}
for field in self.declared_fields.values():
field = typing.cast(fields.BaseField, field)
if (
(
(
isinstance(field, fields.Map)
and not isinstance(
field.value_field, (Resource, fields.BaseField)
)
)
or (
isinstance(field, fields.List)
and isinstance(field.inner, fields.Nested)
and isinstance(field.inner.nested, Schema)
)
)
# Computed-only fields are always handled as attributes
and not (field.metadata["computed"] and not field.metadata["optional"])
):
if isinstance(field, fields.Set):
nesting = NestingMode.SET
elif isinstance(field, fields.List):
nesting = NestingMode.LIST
elif isinstance(field, fields.Map):
nesting = NestingMode.MAP
else:
raise NotImplementedError
min_items = field.metadata["min_items"]
max_items = field.metadata["max_items"]
if field.required and field.metadata["min_items"] == 0:
min_items = 1
if field.metadata["optional"] and field.metadata["min_items"] > 0:
min_items = 0
if field.metadata["computed"] and not field.metadata["optional"]:
min_items = 0
max_items = 0
block_types[field.name] = NestedBlock(
nesting=nesting,
block=field.inner.nested.to_block(),
min_items=min_items,
max_items=max_items,
)
else:
required = field.required
optional = field.metadata["optional"]
# NOTE: See Schema.coreConfigSchemaAttribute for explanation
if field.required and callable(field.default):
try:
value = field.default()
except Exception:
required = False
optional = True
else:
if value is not None:
required = False
optional = True
attributes[field.name] = Attribute(
type=field.get_terraform_type(),
description=field.metadata["description"],
required=required,
optional=optional,
computed=field.metadata["computed"],
sensitive=field.metadata["sensitive"],
)
return Block(attributes=attributes, block_types=block_types)
@dataclasses.dataclass
class ResourceData(typing.MutableMapping):
data: typing.Dict[str, typing.Any] = dataclasses.field(default_factory=dict)
def __getitem__(self, key: str) -> typing.Any:
return self.data[key]
def __setitem__(self, key: str, value: typing.Any) -> None:
self.data[key] = value
def __delitem__(self, key: str) -> None:
del self.data[key]
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
return iter(self.data)
def set_id(self, value: str) -> None:
self[settings.ID_KEY] = value
class Resource(Schema):
name: str
provider: "Provider"
id = fields.String(optional=True, computed=True)
def upgrade_state(
self, *, state: typing.Dict[str, typing.Any], version: int
) -> typing.Dict[str, typing.Any]:
return self.dump(state)
async def create(self, data: ResourceData):
...
async def read(self, data: ResourceData):
...
async def update(self, data: ResourceData):
...
async def delete(self, data: ResourceData):
...
async def exists(self, data: ResourceData):
...
class Resources(typing.Mapping[str, Resource]):
def __init__(
self,
resources: typing.Optional[typing.Sequence[Resource]] = None,
*,
provider: "Provider"
):
self.resources: typing.Dict[str, Resource] = {}
self.provider = provider
if resources is not None:
for resource in resources:
self.add(resource)
def __getitem__(self, name: str) -> Resource:
return self.resources[name]
def __iter__(self):
return iter(self.resources)
def __len__(self) -> int:
return len(self.resources)
def add(self, resource: Resource):
resource.provider = self.provider
self.resources[resource.name] = resource
class Provider(Schema):
name: str
terraform_version: typing.Optional[str] = None
def __init__(
self,
resources: typing.Optional[typing.Sequence[Resource]] = None,
data_sources: typing.Optional[typing.Sequence[Resource]] = None,
):
super().__init__()
self.resources = Resources(resources, provider=self)
self.data_sources = Resources(data_sources, provider=self)
self.config: typing.Dict[str, typing.Any] = {}
def add_resource(self, resource: Resource):
self.resources.add(resource)
def add_data_source(self, data_source: Resource):
self.data_sources.add(data_source)
def configure(self, config: typing.Dict[str, typing.Any]):
self.config = config
```
#### File: python-terraform-plugin/tests/test_diagnostics.py
```python
import typing
import pytest
from terraform import diagnostics, fields, schemas
@pytest.mark.parametrize(
"schema,data,expected_diagnostics",
[
(
schemas.Schema.from_dict(
{
"int": fields.Int(required=True),
"float": fields.Float(required=True),
"bool": fields.Bool(required=True),
"string": fields.String(required=True),
},
)(),
{},
diagnostics.Diagnostics(
diagnostics=[
diagnostics.Diagnostic(
severity=diagnostics.Severity.ERROR,
summary="Missing data for required field.",
attribute_paths=[
diagnostics.AttributePathStepAttribute("int"),
],
),
diagnostics.Diagnostic(
severity=diagnostics.Severity.ERROR,
summary="Missing data for required field.",
attribute_paths=[
diagnostics.AttributePathStepAttribute("float"),
],
),
diagnostics.Diagnostic(
severity=diagnostics.Severity.ERROR,
summary="Missing data for required field.",
attribute_paths=[
diagnostics.AttributePathStepAttribute("bool"),
],
),
diagnostics.Diagnostic(
severity=diagnostics.Severity.ERROR,
summary="Missing data for required field.",
attribute_paths=[
diagnostics.AttributePathStepAttribute("string"),
],
),
]
),
),
(
schemas.Schema.from_dict(
{
"list": fields.List(fields.Int(), required=True),
"set": fields.Set(fields.String(), optional=True),
"map": fields.Map(fields.Bool(), optional=True),
"map_default_type": fields.Map(optional=True),
}
)(),
{
"list": ["not an integer"],
"set": [42],
"map": {"map_key": 42, 42: "Not a string"},
"map_default_type": {"map_key": 42},
},
diagnostics.Diagnostics(
diagnostics=[
diagnostics.Diagnostic(
severity=diagnostics.Severity.ERROR,
summary="Not a valid integer.",
attribute_paths=[
diagnostics.AttributePathStepAttribute("list"),
diagnostics.AttributePathStepElement(0),
],
),
diagnostics.Diagnostic(
severity=diagnostics.Severity.ERROR,
summary="Not a valid string.",
attribute_paths=[
diagnostics.AttributePathStepAttribute("set"),
diagnostics.AttributePathStepElement(0),
],
),
diagnostics.Diagnostic(
severity=diagnostics.Severity.ERROR,
summary="Not a valid boolean.",
attribute_paths=[
diagnostics.AttributePathStepAttribute("map"),
diagnostics.AttributePathStepElement("map_key"),
],
),
diagnostics.Diagnostic(
severity=diagnostics.Severity.ERROR,
summary="Key: Not a valid string.",
attribute_paths=[
diagnostics.AttributePathStepAttribute("map"),
diagnostics.AttributePathStepElement(42),
],
),
diagnostics.Diagnostic(
severity=diagnostics.Severity.ERROR,
summary="Not a valid boolean.",
attribute_paths=[
diagnostics.AttributePathStepAttribute("map"),
diagnostics.AttributePathStepElement(42),
],
),
diagnostics.Diagnostic(
severity=diagnostics.Severity.ERROR,
summary="Not a valid string.",
attribute_paths=[
diagnostics.AttributePathStepAttribute("map_default_type"),
diagnostics.AttributePathStepElement("map_key"),
],
),
]
),
),
],
)
def test_diagnostics_from_schema_errors(
schema: schemas.Schema,
data: typing.Any,
expected_diagnostics: diagnostics.Diagnostics,
):
errors = schema.validate(data)
actual_diagnostics = diagnostics.Diagnostics.from_schema_errors(errors=errors)
assert actual_diagnostics == expected_diagnostics
``` |
{
"source": "jianyuan/sentry",
"score": 2
} |
#### File: auth/providers/saml2.py
```python
from __future__ import absolute_import, print_function
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import (
HttpResponse, HttpResponseRedirect, HttpResponseServerError,
HttpResponseNotAllowed, Http404,
)
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from six.moves.urllib.parse import urlparse
from sentry import options
from sentry.auth import Provider, AuthView
from sentry.auth.exceptions import IdentityNotValid
from sentry.models import (AuthProvider, Organization, OrganizationStatus, User, UserEmail)
from sentry.utils.http import absolute_uri
from sentry.utils.auth import login, get_login_redirect
try:
from onelogin.saml2.auth import OneLogin_Saml2_Auth, OneLogin_Saml2_Settings
HAS_SAML2 = True
except ImportError:
HAS_SAML2 = False
def OneLogin_Saml2_Auth(*args, **kwargs):
raise NotImplementedError('Missing SAML libraries')
def OneLogin_Saml2_Settings(*args, **kwargs):
raise NotImplementedError('Missing SAML libraries')
def get_provider(organization_slug):
try:
organization = Organization.objects.get(slug=organization_slug)
except Organization.DoesNotExist:
return None
if organization.status != OrganizationStatus.VISIBLE:
return None
try:
auth_provider = AuthProvider.objects.get(organization=organization)
return auth_provider.get_provider()
except AuthProvider.DoesNotExist:
return None
class SAML2LoginView(AuthView):
def dispatch(self, request, helper):
provider = helper.provider
saml_config = provider.build_saml_config(helper.organization.slug)
auth = provider.build_auth(request, saml_config)
return self.redirect(auth.login())
class SAML2ACSView(AuthView):
@method_decorator(csrf_exempt)
def dispatch(self, request, organization_slug):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
provider = get_provider(organization_slug)
if provider is None:
raise Http404
organization = Organization.objects.get(slug=organization_slug)
saml_config = provider.build_saml_config(organization_slug)
auth = provider.build_auth(request, saml_config)
auth.process_response()
errors = auth.get_errors()
if errors:
error_reason = auth.get_last_error_reason()
raise IdentityNotValid(error_reason)
attributes = auth.get_attributes()
nameid = auth.get_nameid()
email = self.retrieve_email(attributes, nameid, provider.config)
# Filter users based on the emails provided in the commits
user_emails = list(
UserEmail.objects.filter(email__iexact=email, is_verified=True).order_by('id')
)
if user_emails:
users = list(
User.objects.filter(
id__in=set((ue.user_id for ue in user_emails)),
is_active=True,
sentry_orgmember_set__organization_id=organization.id
)[0:2]
)
if users:
if len(users) == 1:
user = users[0]
user.backend = settings.AUTHENTICATION_BACKENDS[0]
if login(
request,
user,
after_2fa=request.build_absolute_uri(),
organization_id=organization.id
):
request.session['saml'] = {
'nameid': nameid,
'nameid_format': auth.get_nameid_format(),
'session_index': auth.get_session_index()
}
return HttpResponseRedirect(get_login_redirect(request))
else:
return HttpResponseServerError(
"Found several accounts related with %s on this organization" % email
)
else:
return HttpResponseServerError(
"The user %s is not related with this organization" % email
)
else:
return HttpResponseServerError(
"An user with a verified mail: %s does not exist" % email
)
def retrieve_email(self, attributes, nameid, config):
possible_mail = None
if nameid and '@' in nameid:
possible_mail = nameid
if attributes and 'attribute_mapping' in config and 'attribute_mapping_email' in config[
'attribute_mapping'
]:
email_mapping = config['attribute_mapping']['attribute_mapping_email']
if email_mapping and email_mapping in attributes:
return attributes[email_mapping][0]
elif possible_mail:
return possible_mail
else:
raise Exception(
"Email was not provided by the IdP and is required in order to execute the SAML process"
)
elif possible_mail:
return possible_mail
else:
raise Exception("Email mapping is required in order to execute the SAML process")
def retrieve_firstname(self, attributes, config):
firstname = None
if attributes and 'attribute_mapping' in config and 'attribute_mapping_firstname' in config[
'attribute_mapping'
]:
firstname_mapping = config['attribute_mapping']['attribute_mapping_firstname']
if firstname_mapping and firstname_mapping in attributes:
firstname = attributes[firstname_mapping][0]
return firstname
class SAML2MetadataView(AuthView):
def dispatch(self, request, organization_slug):
provider = get_provider(organization_slug)
if provider is None:
raise Http404
saml_config = provider.build_saml_config(organization_slug)
saml_settings = OneLogin_Saml2_Settings(settings=saml_config, sp_validation_only=True)
metadata = saml_settings.get_sp_metadata()
errors = saml_settings.validate_metadata(metadata)
if len(errors) == 0:
resp = HttpResponse(content=metadata, content_type='text/xml')
else:
resp = HttpResponseServerError(content=', '.join(errors))
return resp
class SAML2Provider(Provider):
def get_auth_pipeline(self):
return [SAML2LoginView()]
def build_config(self, state):
data = {}
if 'idp' in state.keys():
data['idp'] = state['idp']
if 'contact' in state.keys():
data['contact'] = state['contact']
if data:
data['attribute_mapping'] = {
'attribute_mapping_email': 'email',
'attribute_mapping_firstname': ''
}
return data
def build_identity(self, state):
# return None # TODO If I return None, then a loop after execute the config
# happens from organizations/<org>/auth/ to /auth/login/ /<org>/
identity = {}
if state and 'contact' in state:
identity['id'] = state['contact']
identity['email'] = state['contact']
return identity
def build_saml_config(self, org_slug):
metadata_url = absolute_uri(
reverse('sentry-auth-organization-saml-metadata', args=[org_slug])
)
acs_url = absolute_uri(reverse('sentry-auth-organization-saml-acs', args=[org_slug]))
saml_config = {}
saml_config['strict'] = True
saml_config['idp'] = self.extract_parsed_data_from_idp_data(self.config)
saml_config['sp'] = {
"entityId": metadata_url,
"assertionConsumerService": {
"url": acs_url,
"binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST"
}
}
return saml_config
def prepare_saml_request(self, request):
url = urlparse(options.get('system.url-prefix'))
return {
'https': 'on' if url.scheme == 'https' else 'off',
'http_host': url.hostname,
'script_name': request.META['PATH_INFO'],
'server_port': url.port,
'get_data': request.GET.copy(),
'post_data': request.POST.copy()
}
def build_auth(self, request, config):
req = self.prepare_saml_request(request)
return OneLogin_Saml2_Auth(req, config)
@staticmethod
def extract_idp_data_from_form(form):
idp_data = {
'idp_entityid': form.cleaned_data['idp_entityid'],
'idp_sso_url': form.cleaned_data['idp_sso_url'],
'idp_x509cert': form.cleaned_data['idp_x509cert']
}
if form.cleaned_data['idp_slo_url']:
idp_data['idp_slo_url'] = form.cleaned_data['idp_slo_url']
return idp_data
@staticmethod
def extract_attribute_mapping_from_form(form):
mapping_data = {
'attribute_mapping_email': form.cleaned_data['attribute_mapping_email'],
'attribute_mapping_firstname': form.cleaned_data['attribute_mapping_firstname']
}
return mapping_data
@staticmethod
def extract_idp_data_from_parsed_data(data):
idp_data = {}
if 'entityId' in data['idp']:
idp_data['idp_entityid'] = data['idp']['entityId']
if 'singleSignOnService' in data['idp'] and 'url' in data['idp']['singleSignOnService']:
idp_data['idp_sso_url'] = data['idp']['singleSignOnService']['url']
if 'singleLogoutService' in data['idp'] and 'url' in data['idp']['singleLogoutService']:
idp_data['idp_slo_url'] = data['idp']['singleLogoutService']['url']
if 'x509cert' in data['idp']:
idp_data['idp_x509cert'] = data['idp']['x509cert']
return idp_data
@staticmethod
def extract_parsed_data_from_idp_data(data):
parsed_data = {}
if 'idp' in data:
if 'idp_entityid' in data['idp']:
parsed_data['entityId'] = data['idp']['idp_entityid']
if 'idp_sso_url' in data['idp']:
parsed_data['singleSignOnService'] = {}
parsed_data['singleSignOnService']['url'] = data['idp']['idp_sso_url']
if 'idp_slo_url' in data['idp']:
parsed_data['singleLogoutService'] = {}
parsed_data['singleLogoutService']['url'] = data['idp']['idp_slo_url']
if 'idp_x509cert' in data['idp']:
parsed_data['x509cert'] = data['idp']['idp_x509cert']
return parsed_data
def refresh_identity(self, auth_identity):
# Nothing to refresh
return
```
#### File: sentry/filters/preprocess_hashes.py
```python
from __future__ import absolute_import
from django.core.cache import cache, get_cache, InvalidCacheBackendError
try:
hash_cache = get_cache('preprocess_hash')
except InvalidCacheBackendError:
hash_cache = cache
def get_raw_cache_key(project_id, event_id):
return 'e:raw:{1}:{0}'.format(project_id, event_id)
```
#### File: tests/acceptance/test_create_project.py
```python
from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
from sentry.models import Project
class CreateProjectTest(AcceptanceTestCase):
def setUp(self):
super(CreateProjectTest, self).setUp()
self.user = self.create_user('<EMAIL>')
self.org = self.create_organization(
name='<NAME>',
owner=None,
)
self.project = self.create_project(
organization=self.org,
team=self.team,
name='Bengal',
)
self.login_as(self.user)
self.path = '/organizations/{}/projects/new/'.format(self.org.slug)
def test_simple(self):
self.team = self.create_team(organization=self.org, name='Mariachi Band')
self.create_member(
user=self.user,
organization=self.org,
role='owner',
teams=[self.team],
)
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.click('.platformicon-java')
self.browser.snapshot(name='create project')
self.browser.click('.submit-new-team')
self.browser.wait_until_not('.loading')
assert Project.objects.get(team__organization=self.org, name='Java')
self.browser.snapshot(name='docs redirect')
def test_no_teams(self):
self.create_member(
user=self.user,
organization=self.org,
role='owner',
teams=[],
)
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.snapshot(name='create project no teams')
def test_many_teams(self):
self.team = self.create_team(organization=self.org, name='Mariachi Band')
self.team2 = self.create_team(organization=self.org, name='team two')
self.create_member(
user=self.user,
organization=self.org,
role='owner',
teams=[self.team, self.team2],
)
self.browser.get(self.path)
self.browser.wait_until_not('.loading')
self.browser.snapshot(name='create project many teams')
```
#### File: tests/acceptance/test_organization_onboarding.py
```python
from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
class CreateOrganizationTest(AcceptanceTestCase):
def setUp(self):
super(CreateOrganizationTest, self).setUp()
self.user = self.create_user('<EMAIL>')
self.org = self.create_organization(
name='<NAME>',
owner=self.user,
)
self.team = self.create_team(organization=self.org, name='<NAME>')
self.login_as(self.user)
def test_simple(self):
self.browser.get('/onboarding/%s/' % self.org.slug)
self.browser.wait_until_not('.loading')
self.browser.wait_until('.step-container')
self.browser.snapshot(name='organization onboarding')
```
#### File: integrations/cloudflare/test_webhook.py
```python
from __future__ import absolute_import
from hashlib import sha256
import hmac
import json
import six
from sentry import options
from sentry.models import ApiToken, ProjectKey
from sentry.testutils import TestCase
UNSET = object()
class BaseWebhookTest(TestCase):
def setUp(self):
super(BaseWebhookTest, self).setUp()
self.user = self.create_user(is_superuser=False)
self.org = self.create_organization(owner=None)
self.team = self.create_team(organization=self.org)
self.create_member(organization=self.org, user=self.user, role='owner', teams=[self.team])
self.project = self.create_project(name='a', team=self.team)
self.token = ApiToken.objects.create(
user=self.user,
token='<KEY>',
)
self.key = ProjectKey.objects.get_or_create(project=self.project)[0]
def post_webhook(self, data, signature=UNSET, variant=UNSET, key=None):
if key is None:
key = options.get('cloudflare.secret-key')
if not isinstance(data, six.string_types):
body = json.dumps(data)
else:
body = data
if signature is UNSET:
signature = hmac.new(
key=key.encode('utf-8'),
msg=body.encode('utf-8'),
digestmod=sha256,
).hexdigest()
if variant is UNSET:
variant = '1'
headers = {
'HTTP_X_SIGNATURE_HMAC_SHA256_HEX': signature,
'HTTP_X_SIGNATURE_KEY_VARIANT': variant,
}
return self.client.post(
'/extensions/cloudflare/webhook/',
body,
content_type='application/json',
**headers
)
class CloudflareWebhookTest(BaseWebhookTest):
def test_missing_signature(self):
resp = self.post_webhook(
{'event': 'test'},
signature=None,
)
assert resp.status_code == 400
def test_invalid_signature(self):
resp = self.post_webhook(
{'event': 'test'},
signature='a' * 40,
)
assert resp.status_code == 400
def test_invalid_json(self):
resp = self.post_webhook('a')
assert resp.status_code == 400
def test_missing_variant(self):
resp = self.post_webhook(
{'event': 'test'},
variant=None,
)
assert resp.status_code == 400
def test_invalid_variant(self):
resp = self.post_webhook(
{'event': 'test'},
variant='fizzbuz',
)
assert resp.status_code == 400
def test_invalid_signature_with_test_variant(self):
resp = self.post_webhook(
{'event': 'test'},
variant='test',
)
assert resp.status_code == 400
def test_invalid_app_id_test_variant(self):
resp = self.post_webhook(
{'event': 'test', 'app': {'id': 'buzz'}},
variant='test',
key='test-key',
)
assert resp.status_code == 400
def test_valid_test_variant(self):
resp = self.post_webhook(
{'event': 'test', 'app': {'id': 'local'}, 'install': {}},
variant='test',
key='test-key',
)
assert resp.status_code == 200
class PreviewWebhookTest(BaseWebhookTest):
def test_empty(self):
webhook_data = json.loads(self.load_fixture('cloudflare/preview-webhook.json'))
resp = self.post_webhook(webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data == {
'install': webhook_data['install'],
'proceed': True,
}
def test_prefills_data(self):
webhook_data = json.loads(self.load_fixture(
'cloudflare/preview-webhook-authenticated.json'))
webhook_data['install']['options']['organization'] = six.text_type(self.org.id)
resp = self.post_webhook(data=webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data['proceed']
assert resp.data['install']['schema']['properties']['organization']['enum'] == [
six.text_type(self.org.id)]
assert resp.data['install']['schema']['properties']['organization']['enumNames'] == {
six.text_type(self.org.id): self.org.slug,
}
assert resp.data['install']['options']['organization'] == six.text_type(self.org.id)
assert resp.data['install']['schema']['properties']['project']['enum'] == [
six.text_type(self.project.id)]
assert resp.data['install']['schema']['properties']['project']['enumNames'] == {
six.text_type(self.project.id): self.project.slug,
}
assert resp.data['install']['options']['project'] == six.text_type(self.project.id)
assert resp.data['install']['schema']['properties']['dsn']['enum'] == [
self.key.get_dsn(public=True)]
assert resp.data['install']['options']['dsn'] == six.text_type(
self.key.get_dsn(public=True))
def test_multiple_projects(self):
project2 = self.create_project(name='b', team=self.team)
webhook_data = json.loads(self.load_fixture(
'cloudflare/preview-webhook-authenticated.json'))
webhook_data['install']['options']['organization'] = six.text_type(self.org.id)
resp = self.post_webhook(webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data['proceed']
assert resp.data['install']['schema']['properties']['organization']['enum'] == [
six.text_type(self.org.id)]
assert resp.data['install']['options']['organization'] == six.text_type(self.org.id)
assert resp.data['install']['schema']['properties']['project']['enum'] == [
six.text_type(self.project.id), six.text_type(project2.id)]
assert resp.data['install']['options']['project'] == six.text_type(self.project.id)
assert resp.data['install']['schema']['properties']['dsn']['enum'] == [
self.key.get_dsn(public=True)]
assert resp.data['install']['options']['dsn'] == six.text_type(
self.key.get_dsn(public=True))
def test_no_projects(self):
self.project.delete()
webhook_data = json.loads(self.load_fixture(
'cloudflare/preview-webhook-authenticated.json'))
webhook_data['install']['options']['organization'] = six.text_type(self.org.id)
resp = self.post_webhook(webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data['proceed']
assert resp.data['install']['schema']['properties']['organization']['enum'] == [
six.text_type(self.org.id)]
assert resp.data['install']['options']['organization'] == six.text_type(self.org.id)
assert resp.data['install']['schema']['properties']['project']['enum'] == []
assert 'dsn' not in resp.data['install']['schema']['properties']
class OptionChangeAccountWebhookTest(BaseWebhookTest):
def test_without_authentication(self):
webhook_data = json.loads(self.load_fixture(
'cloudflare/option-change-account-webhook.json'))
del webhook_data['authentications']
resp = self.post_webhook(webhook_data)
assert resp.status_code == 401, resp.content
def test_prefills_data(self):
webhook_data = json.loads(self.load_fixture(
'cloudflare/option-change-account-webhook.json'))
resp = self.post_webhook(webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data['proceed']
assert resp.data['install']['schema']['properties']['organization']['enum'] == [
six.text_type(self.org.id)]
assert resp.data['install']['options']['organization'] == six.text_type(self.org.id)
assert resp.data['install']['schema']['properties']['project']['enum'] == [
six.text_type(self.project.id)]
assert resp.data['install']['options']['project'] == six.text_type(self.project.id)
assert resp.data['install']['schema']['properties']['dsn']['enum'] == [
self.key.get_dsn(public=True)]
assert resp.data['install']['options']['dsn'] == six.text_type(
self.key.get_dsn(public=True))
def test_with_invalid_organization_selected(self):
webhook_data = json.loads(self.load_fixture(
'cloudflare/option-change-account-webhook.json'))
webhook_data['install']['options']['organization'] = -1
resp = self.post_webhook(webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data['proceed']
assert resp.data['install']['schema']['properties']['organization']['enum'] == [
six.text_type(self.org.id)]
assert resp.data['install']['options']['organization'] == six.text_type(self.org.id)
assert resp.data['install']['schema']['properties']['project']['enum'] == [
six.text_type(self.project.id)]
assert resp.data['install']['options']['project'] == six.text_type(self.project.id)
assert resp.data['install']['schema']['properties']['dsn']['enum'] == [
self.key.get_dsn(public=True)]
assert resp.data['install']['options']['dsn'] == six.text_type(
self.key.get_dsn(public=True))
def test_with_existing_project_selected_and_no_keys(self):
project2 = self.create_project(name='b', team=self.team)
# kill the automatically generated keys
ProjectKey.objects.filter(project=project2).delete()
webhook_data = json.loads(self.load_fixture(
'cloudflare/option-change-account-webhook.json'))
webhook_data['install']['options']['organization'] = six.text_type(self.org.id)
webhook_data['install']['options']['project'] = six.text_type(project2.id)
resp = self.post_webhook(webhook_data)
assert resp.status_code == 200, resp.content
assert resp.data['proceed']
assert resp.data['install']['schema']['properties']['organization']['enum'] == [
six.text_type(self.org.id)]
assert resp.data['install']['options']['organization'] == six.text_type(self.org.id)
assert resp.data['install']['schema']['properties']['project']['enum'] == [
six.text_type(self.project.id), six.text_type(project2.id)]
assert resp.data['install']['options']['project'] == six.text_type(project2.id)
assert resp.data['install']['schema']['properties']['dsn']['enum'] == []
assert 'dsn' not in resp.data['install']['options']
``` |
{
"source": "jian-yu/autotx",
"score": 3
} |
#### File: autotx/bank/base.py
```python
from abc import ABCMeta, abstractmethod
class Bank(metaclass=ABCMeta):
# 查询账户资产
# @abstractmethod
# def GetBalance(self, account):
# pass
# 发送代币
@abstractmethod
def SendCoins(self, srcAccount, dstAccount, fees, gas, gasAdjust):
pass
```
#### File: autotx/common/baseReq_test.py
```python
import unittest
from autotx.common.baseReq import GenBaseReqJson
from autotx.auth.account import Account
from autotx import HSN_CHAIN_ID
class TestBaseReq(unittest.TestCase):
def setUp(self):
self.account = Account('hsn1', '<PASSWORD>', 'local', 0, 14, 'hsn1p8hqjcsxat30zgllpdkvgtutctrhun70uv9ts0', 'hsnpub1addwnpepqvfe59jmpyjqxjkez68gh3f60utmljpzhfm29af9z98n758zpqns7m4aj02')
def test_baseReq(self):
baseReqJson, err = GenBaseReqJson(self.account.getAddress(), HSN_CHAIN_ID, str(self.account.getAccNum()), str(self.account.getSequence()), [{'denom': 'hsn', 'amount': '10'}], False, 'send money', '20000', '1.0')
print(baseReqJson)
self.assertIsNone(err)
if __name__ == '__main__':
unittest.main()
```
#### File: autotx/distribution/distribution.py
```python
import json
import time
import urllib3
from autotx import HSN_CHAIN_ID, UNSIGN_JSON_DIR
from autotx.bank.bank import QueryAccountInfo
from autotx.common.baseReq import GenBaseReqJson
from autotx.distribution.base import Distribute
from autotx.module.module import Module
from autotx.utils.contants import HTTP_METHOD_GET, HTTP_METHOD_POST, DELEGATOR_REWARD_URL
from autotx.utils.file import WriteToFile
from autotx.utils.timestamp import now_timestamp
from autotx.distribution.req import GenWithdrawDelegatorOneRewardTxJson
from decimal import Decimal
http = urllib3.PoolManager()
class Distributor(Module, Distribute):
def WithdrawDelegatorOneReward(self, delegator, validator, fees, gas, gasAdjust):
self.IncrHandingCount()
self.IncrCalledCount()
now = now_timestamp()
try:
rewards, err = self.QueryDelegatorRewardWithValidator(delegator, validator)
if err is not None:
return None, DistributorError('WithdrawDelegatorOneReward: ' + err.msg)
memo = '%s withdraw reward from %s' % (delegator.getAddress(), validator.operatorAddr)
if rewards:
if len(rewards) == 0:
return None, DistributorError('WithdrawDelegatorOneReward: ' + 'reward is empty')
for reward in rewards:
if reward['denom'] == 'hsn':
if Decimal(reward['amount']) == Decimal('0'):
return None, DistributorError('WithdrawDelegatorOneReward: ' + 'reward is 0')
delegator = QueryAccountInfo(delegator)
# 获取账户最新信息
if delegator is None:
return None, DistributorError('WithdrawDelegatorOneReward: ' + 'delegator is invalid!')
baseReqJson, err = GenBaseReqJson(delegator, HSN_CHAIN_ID, fees, False, memo, gas, gasAdjust)
if err is not None:
return None, DistributorError('WithdrawDelegatorOneReward: ' + err.msg)
withdrawTxJson, err = GenWithdrawDelegatorOneRewardTxJson(baseReqJson)
if err is not None:
return None, DistributorError('WithdrawDelegatorOneReward: ' + err.msg)
withdrawnTxJson, err = postWithdrawDelegatorOneReward(withdrawTxJson, delegator.getAddress(), validator.operatorAddr)
if err is not None:
return None, DistributorError('WithdrawDelegatorOneReward: ' + err.msg)
# 写入到文件中
unSignJsonFileName = '[withdrawreward]--' + delegator.getAddress() + '|' + str(int(round(time.time() * 1000))) + '.json'
unSignJsonPath, err = WriteToFile(UNSIGN_JSON_DIR, unSignJsonFileName, withdrawnTxJson)
if err is not None:
return None, DistributorError(err.msg)
return unSignJsonPath, None
return None, DistributorError('WithdrawDelegatorOneReward: ' + 'reward is invalid!')
finally:
self.SetCalculateCost(now_timestamp() - now)
self.DecrHandingCount()
def WithdrawDelegatorAllReward(self, delegator, validator):
pass
def ReplaceRewardAddress(self, delegator, newAddress):
pass
def WithdrawValidatorReward(self, delegator):
pass
def QueryDelegatorRewardWithValidator(self, delegator, validator):
self.IncrHandingCount()
self.IncrCalledCount()
try:
resp = http.request(HTTP_METHOD_GET, DELEGATOR_REWARD_URL % (delegator.getAddress(), validator.operatorAddr))
if resp.status == 200:
data = json.loads(resp.data.decode('utf-8'))
return data['result'], None
elif resp.status == 400:
return None, DistributorError(data['Invalid delegator address'])
elif resp.status == 500:
data = json.loads(resp.data.decode('utf-8'))
return None, DistributorError(data['error'])
except Exception as e:
return None, e
finally:
self.DecrHandingCount()
def postWithdrawDelegatorOneReward(body, delegatorAddr, validatorOpeAddr):
try:
resp = http.request(HTTP_METHOD_POST, DELEGATOR_REWARD_URL % (delegatorAddr, validatorOpeAddr), body=body)
if resp and resp.status == 200:
return resp.data.decode('utf-8'), None
elif resp and resp.status == 400:
return None, DistributorError('postWithdrawDelegatorOneReward: Invalid request')
elif resp and resp.status == 500:
return None, DistributorError('postWithdrawDelegatorOneReward: Server internal error')
else:
return None, DistributorError('postWithdrawDelegatorOneReward: Unknown error')
except Exception as e:
return None, e
class DistributorError(Exception):
def __init__(self, msg):
self.msg = msg
def Error(self):
return self.msg
def __str__(self):
return self.msg
```
#### File: autotx/distribution/distribution_test.py
```python
import unittest
from autotx.auth.account import Account
from autotx.auth.validator import Validator
from autotx.distribution.distribution import Distributor
from autotx.module.mid import GenerateMID
from autotx.module.moduletype import TYPE_DISTRIBUTION
from autotx.module.sn import SNGenerator
class TestDistribution(unittest.TestCase):
def setUp(self):
self.sn = SNGenerator(1, 0)
mid = GenerateMID(TYPE_DISTRIBUTION, self.sn.Get())
self.distributor = Distributor(mid, 0)
self.delegator = Account('hsn1', '12345678', 'local', '0', '64', '<KEY>', '<KEY>')
self.validator = Validator(
'<KEY>', '<KEY>', False, 2, '3189846465', '3189846465.000000000000000000',
{'moniker': 'node1', 'identity': '', 'website': '', 'details': ''}, '0', '1970-01-01T00:00:00Z',
{'commission_rates': {'rate': '0.100000000000000000', 'max_rate': '0.200000000000000000', 'max_change_rate': '0.010000000000000000'}, 'update_time': '2019-08-24T08:51:45.550141024Z'},
'1'
)
def test_withdrawReward(self):
withdrawRewardTxFilePath, err = self.distributor.WithdrawDelegatorOneReward(self.delegator, self.validator, [{'denom': 'hsn', 'amount': '1'}], '100000', '1.0')
print(withdrawRewardTxFilePath)
self.assertIsNone(err)
```
#### File: autotx/log/logger.py
```python
class Logger:
def __init__(self, time):
self.__time = time
def Warn(self, msg):
self.__type = '<Warn> '
self.__msg = msg
return self.__time + '<Warn> ' + msg
def Info(self, msg):
self.__type = '<Info> '
self.__msg = msg
return self.__time + '<Info> ' + msg
def Error(self, msg):
self.__type = '<Error> '
self.__msg = msg
return self.__time + '<Error> ' + msg
def __str__(self):
return self.__time + self.__type + self.__msg
```
#### File: autotx/module/base.py
```python
from abc import ABCMeta, abstractmethod
class BasicModule(metaclass=ABCMeta):
# 模块实例ID
@abstractmethod
def ID(self):
pass
# 模块被调用次数
@abstractmethod
def CalledCount(self):
pass
# 模块被调用接受次数
@abstractmethod
def AcceptedCount(self):
pass
# 模块被成功调用的次数
@abstractmethod
def CompletedCount(self):
pass
# 模块正则被调用次数
@abstractmethod
def HandingCount(self):
pass
# 所有调用次数
@abstractmethod
def Counts(self):
pass
# 模块实例计算开销(根据实例处理一个任务时间长短计算出开销)
@abstractmethod
def CalculateCost(self):
pass
```
#### File: autotx/module/moduletype.py
```python
from autotx.error.errors import IllegalError
TYPE_AUTH = 'auth'
TYPE_BANK = 'bank'
TYPE_DISTRIBUTION = 'distribution'
TYPE_STAKING = 'STAKING'
TYPE_SIGN = 'sign'
TYPE_BROADCAST = 'broadcast'
LegalTypeLetter = {
TYPE_AUTH: 'AUTH',
TYPE_BANK: 'BANK',
TYPE_DISTRIBUTION: 'DISTRIBUTION',
TYPE_STAKING: 'STAKING',
TYPE_SIGN: 'SIGN',
TYPE_BROADCAST: 'BROADCAST'
}
LegalLetterType = {
'AUTH': TYPE_AUTH,
'BANK': TYPE_BANK,
'DISTRIBUTION': TYPE_DISTRIBUTION,
'STAKING': TYPE_STAKING,
'SIGN': TYPE_SIGN,
'BROADCAST': TYPE_BROADCAST
}
def CheckType(moduleType, module):
if module is None or moduleType == '':
return False
if moduleType == TYPE_AUTH:
return True
elif moduleType == TYPE_BANK:
return True
elif moduleType == TYPE_DISTRIBUTION:
return True
elif moduleType == TYPE_STAKING:
return True
elif moduleType == TYPE_SIGN:
return True
elif moduleType == TYPE_BROADCAST:
return True
return False
def LegalType(moduleType):
if LegalTypeLetter[moduleType] is not None:
return True
return False
# 获取mid的类型 return type, error
def GetType(mid):
from autotx.module.mid import SplitMid
separateMID, err = SplitMid(mid)
if err is not None:
return None, err
mType = LegalLetterType[list(separateMID)[0]]
if mType is None:
return None, IllegalError('invalid mid: {mid}'.format(mid=mid))
return mType, None
```
#### File: autotx/scheduler/args.py
```python
class PoolArgs:
def __init__(self, bankerBufCap, bankerMaxBufNumber, signerBufCap, signerBufMaxNumber, broadcasterBufCap, broadcasterMaxNumber, stakingBufCap, stakingMaxNumber, distributionBufCap, distributionMaxNumber, errorBufCap, errorMaxNumber):
self.BankerBufCap = bankerBufCap
self.BankerMaxBufNumber = bankerMaxBufNumber
self.SignerBufCap = signerBufCap
self.SignerBufMaxNumber = signerBufMaxNumber
self.BroadcasterBufCap = broadcasterBufCap
self.BroadcasterMaxNumber = broadcasterMaxNumber
self.StakingBufCap = stakingBufCap
self.StakingMaxNumber = stakingMaxNumber
self.DistributionBufCap = distributionBufCap
self.DistributionMaxNumber = distributionMaxNumber
self.ErrorBufCap = errorBufCap
self.ErrorMaxNumber = errorMaxNumber
def Check(self):
if self.BankerBufCap == 0:
return PoolArgsError('zero banker buffer capacity')
if self.BankerMaxBufNumber == 0:
return PoolArgsError('zero banker max buffer number')
if self.SignerBufCap == 0:
return PoolArgsError('zero signer buffer capacity')
if self.SignerBufMaxNumber == 0:
return PoolArgsError('zero signer max buffer number')
if self.BroadcasterBufCap == 0:
return PoolArgsError('zero broadcaster buffer capacity')
if self.BroadcasterMaxNumber == 0:
return PoolArgsError('zero broadcaster max buffer number')
if self.StakingBufCap == 0:
return PoolArgsError('zero staking buffer capacity')
if self.StakingMaxNumber == 0:
return PoolArgsError('zero staking max buffer number')
if self.DistributionBufCap == 0:
return PoolArgsError('zero distribution buffer capacity')
if self.DistributionMaxNumber == 0:
return PoolArgsError('zero distribution max buffer number')
if self.ErrorBufCap == 0:
return PoolArgsError('zero error buffer capacity')
if self.ErrorMaxNumber == 0:
return PoolArgsError('zero error max buffer number')
return None
class PoolArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class ModuleArgs:
def __init__(self, bankers, signers, broadcasters, stakings, distributors):
self.Bankers = bankers
self.Signers = signers
self.Broadcasters = broadcasters
self.Stakings = stakings
self.Distributors = distributors
def Check(self):
if len(self.Bankers) == 0:
return ModuleArgsError('empty banker list')
if len(self.Signers) == 0:
return ModuleArgsError('empty signer list')
if len(self.Broadcasters) == 0:
return ModuleArgsError('empty broadcaster list')
if len(self.Stakings) == 0:
return ModuleArgsError('empty stakinger list')
if len(self.Distributors) == 0:
return ModuleArgsError('empty distributor list')
return None
class ModuleArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class SendCoinArgs:
def __init__(self, srcAccount, dstAccount, coins, fees, gas, gasAdjust):
self.srcAccount = srcAccount
self.dstAccount = dstAccount
self.coins = coins
self.fees = fees
self.gas = gas
self.gasAdjust = gasAdjust
def Check(self):
if self.srcAccount is None or self.srcAccount.getAddress() == '':
return SendCoinArgsError('srcAccount is invalid')
if self.dstAccount is None or self.dstAccount.getAddress() == '':
return SendCoinArgsError('dstAccount is invalid')
if self.coins is None or len(self.coins) == 0:
return SendCoinArgsError('empty coins')
if self.fees is None or len(self.fees) == 0:
return SendCoinArgsError('empty fess')
if self.gas is None:
return SendCoinArgsError('empty gas')
if self.gasAdjust is None:
return SendCoinArgsError('empty gasAdjust')
return None
class SendCoinArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class SendSignArgs:
def __init__(self, srcAccount, sendedJsonFilePath, node):
self.srcAccount = srcAccount
self.sendedJsonFilePath = sendedJsonFilePath
self.node = node
def Check(self):
if self.srcAccount is None or self.srcAccount.getAddress() == '':
return SendSignArgsError('srcAccount is invalid')
if self.sendedJsonFilePath is None:
return SendSignArgsError('empty sendedJsonFilePath')
if self.node is None:
return SendSignArgsError('empty node')
return None
class SendSignArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class SendBroadcastArgs:
def __init__(self, srcAccount, body, mode='sync'):
self.srcAccount = srcAccount
self.body = body
self.mode = mode
def Check(self):
if self.body is None:
return SendBroadcastArgsError('empty broadcast body')
if self.srcAccount is None:
return SendBroadcastArgsError('unknown tx src account')
return None
class SendBroadcastArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class DelegateArgs():
def __init__(self, delegator, validator, coin, fees, gas, gasAdjust):
self.delegator = delegator
self.validator = validator
self.coin = coin
self.fees = fees
self.gas = gas
self.gasAdjust = gasAdjust
def Check(self):
if self.delegator is None or self.delegator.getAddress() == '':
return DelegateArgsError('delegator is invalid')
if self.validator is None:
return DelegateArgsError('validator is invalid')
if self.coin is None:
return DelegateArgsError('empty coins')
if self.fees is None or len(self.fees) == 0:
return DelegateArgsError('empty fess')
if self.gas is None:
return DelegateArgsError('empty gas')
if self.gasAdjust is None:
return DelegateArgsError('empty gasAdjust')
return None
class StakingArgs():
def __init__(self, _type, data):
self._type = _type
self.data = data
def getType(self):
return self._type
def getData(self):
return self.data
class WithdrawDelegatorOneRewardArgs():
def __init__(self, delegator, validator, fees, gas, gasAdjust):
self.delegator = delegator
self.validator = validator
self.fees = fees
self.gas = gas
self.gasAdjust = gasAdjust
def Check(self):
if self.delegator is None or self.delegator.getAddress() == '':
return DelegateArgsError('delegator is invalid')
if self.validator is None:
return DelegateArgsError('validator is invalid')
if self.fees is None or len(self.fees) == 0:
return DelegateArgsError('empty fess')
if self.gas is None:
return DelegateArgsError('empty gas')
if self.gasAdjust is None:
return DelegateArgsError('empty gasAdjust')
return None
class DistributionArgs():
def __init__(self, _type, data):
self._type = _type
self.data = data
def getType(self):
return self._type
def getData(self):
return self.data
class DelegateArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
```
#### File: autotx/staking/req.py
```python
import json
def GenDelegateTxJson(baseReq, delegator, validator, coin):
if baseReq is None:
return None, ArgsError('args are insufficient')
baseReqJsonObj = json.loads(baseReq)
if baseReqJsonObj is None:
return None, ParseError('json parse error')
data = {'base_req': baseReqJsonObj, 'delegator_address': delegator.getAddress(), 'validator_address': validator.operatorAddr, 'amount': coin}
jsonData = json.dumps(data)
if jsonData is None:
return None, ParseError('json parse error')
return jsonData, None
class ArgsError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class ParseError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
```
#### File: autotx/utils/buffer_test.py
```python
import unittest
from autotx.utils.buffer import Buffer
class TestBuffer(unittest.TestCase):
def test_create(self):
size = 10
buffer = Buffer(size)
self.assertEqual(size, buffer.Cap())
def test_bufPut(self):
size = 10
buffer = Buffer(size)
data = []
# 情况一:新缓存
for i in range(0, size):
data.append(i)
count = 0
for item in data:
ok, err = buffer.Put(item)
self.assertIsNone(err)
self.assertTrue(ok)
count += 1
self.assertEqual(buffer.Len(), count)
# 情况二:缓存已满
ok, err = buffer.Put(data)
self.assertIsNone(err)
self.assertFalse(ok)
buffer.Close()
# 情况三:缓存已关闭
ok, err = buffer.Put(data)
self.assertIsNotNone(err)
def test_bufGet(self):
size = 10
# 情况一:新缓存
buffer = Buffer(size)
for i in range(0, size):
buffer.Put(i)
count = size
while count >= 1:
data, err = buffer.Get()
self.assertEqual(data, size - count)
self.assertIsNone(err)
count -= 1
# 情况二:缓存已空
data, err = buffer.Get()
self.assertIsNone(data)
self.assertIsNone(err)
buffer.Close()
# 情况三:缓存已关闭
data, err = buffer.Get()
self.assertIsNone(data)
self.assertIsNotNone(err)
def test_bufPutAndGet(self):
size = 10
buffer = Buffer(size)
data = []
# 情况一:新缓存
for i in range(0, size):
data.append(i)
count = 0
# 情况二:把输入放入缓存
for item in data:
ok, err = buffer.Put(item)
self.assertIsNone(err)
self.assertTrue(ok)
count += 1
self.assertEqual(buffer.Len(), count)
# 情况三:取出缓存数据
while count >= 4:
val, err = buffer.Get()
self.assertEqual(val, data[size - count])
self.assertIsNone(err)
count -= 1
# 情况四:缓存已关闭
buffer.Close()
ok, err = buffer.Put(data)
self.assertIsNotNone(err)
data, err = buffer.Get()
self.assertIsNone(data)
self.assertIsNotNone(err)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jianyuh/FBGEMM",
"score": 2
} |
#### File: fbgemm_gpu/test/split_table_batched_embeddings_test.py
```python
import copy
import unittest
from typing import Any, Callable, List, Optional, Tuple
import hypothesis.strategies as st
import numpy as np
import split_table_batched_embeddings_ops
import torch
from hypothesis import Verbosity, assume, given, settings
from split_table_batched_embeddings_ops import OptimType
MAX_EXAMPLES = 40
def div_round_up(a: int, b: int) -> int:
return int((a + b - 1) // b) * b
def get_offsets_from_dense(indices: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
(B, L) = indices.size()
return (
indices.contiguous().view(-1),
torch.tensor(
np.cumsum(np.asarray([0] + [L for _ in range(B)])[:-1]).astype(np.int64)
),
)
def to_device(t: torch.Tensor, use_cpu: bool):
return t.cpu() if use_cpu else t.cuda()
def b_indices(
b: Callable, x: torch.Tensor, per_sample_weights=None, use_cpu=False
) -> Any:
(indices, offsets) = get_offsets_from_dense(x)
return b(
to_device(indices, use_cpu),
to_device(offsets, use_cpu),
per_sample_weights=per_sample_weights,
)
def get_table_batched_offsets_from_dense(
merged_indices: torch.Tensor, use_cpu=False
) -> Tuple[torch.Tensor, torch.Tensor]:
(T, B, L) = merged_indices.size()
lengths = np.ones((T, B)) * L
flat_lengths = lengths.flatten()
return (
to_device(merged_indices.contiguous().view(-1), use_cpu),
to_device(
torch.tensor(([0] + np.cumsum(flat_lengths).tolist())).long(),
use_cpu,
),
)
def table_batched_embeddings_indices_and_offsets(
indices_per_table: List[torch.Tensor],
offsets_per_table: List[torch.Tensor],
pinned_total_indices_per_table_buffer: Optional[List[torch.Tensor]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
torch.ops.load_library("//hpc/ops:sparse_ops")
if pinned_total_indices_per_table_buffer is None:
pinned_total_indices_per_table_buffer = torch.tensor(
[indices.numel() for indices in indices_per_table]
)
pinned_total_indices_per_table_buffer = (
pinned_total_indices_per_table_buffer.pin_memory()
)
else:
pinned_total_indices_per_table_buffer[:] = torch.tensor(
[indices.numel() for indices in indices_per_table]
)
return (
torch.cat(indices_per_table, dim=0),
torch.cumsum(
torch.ops.fb.construct_offsets(
torch.stack(offsets_per_table),
pinned_total_indices_per_table_buffer.cuda(non_blocking=True),
),
dim=0,
),
)
def generate_requests(
iters: int,
B: int,
T: int,
L: int,
E: int,
# inter-batch indices reuse rate
reuse: float = 0.0,
# alpha <= 1.0: use uniform distribution
# alpha > 1.0: use zjpf distribution
alpha: float = 1.0,
fp16: bool = False,
weighted: bool = False,
) -> List[Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]]:
if alpha <= 1.0:
all_indices = torch.randint(
low=0,
high=E,
size=(iters, T, B * L),
device=torch.cuda.current_device(),
dtype=torch.int32,
)
else:
all_indices = (
torch.as_tensor(np.random.zipf(a=alpha, size=(iters, T, B * L)))
.to(torch.cuda.current_device())
.int()
% E
)
for it in range(iters - 1):
for t in range(T):
reused_indices = torch.randperm(B * L, device=torch.cuda.current_device())[
: int(B * L * reuse)
]
all_indices[it + 1, t, reused_indices] = all_indices[it, t, reused_indices]
rs = [
get_table_batched_offsets_from_dense(all_indices[it].view(T, B, L)) + (
torch.randn(
T * B * L,
device=torch.cuda.current_device(),
dtype=torch.float16 if fp16 else torch.float32,
)
if weighted
else None,
)
for it in range(iters)
]
# pyre-fixme[7]
return rs
@unittest.skipIf(not torch.cuda.is_available(), "Skip when CUDA is not available")
class SplitTableBatchedEmbeddingsTest(unittest.TestCase):
@given(
T=st.integers(min_value=1, max_value=128),
B=st.integers(min_value=1, max_value=128),
L=st.integers(min_value=2, max_value=128),
pinned=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_construct_offsets(self, T, B, L, pinned):
Ls_per_table = [
np.random.randint(low=0, high=int(L), size=(B,)).tolist() for _ in range(T)
]
indices_per_table = [
torch.randint(low=0, high=int(1e4), size=(sum(Ls_per_table[t]),))
.cuda()
for t in range(T)
]
offsets_per_table = [
torch.cumsum(
torch.tensor([0] + Ls_per_table[t][:-1]).cuda(), dim=0
)
for t in range(T)
]
pinned_total_indices_per_table_buffer = (
torch.tensor([0 for _ in range(T)]).pin_memory()
)
(fused_indices, fused_offsets) = table_batched_embeddings_indices_and_offsets(
indices_per_table,
offsets_per_table,
pinned_total_indices_per_table_buffer if pinned else None,
)
fused_offsets = fused_offsets.cpu()
fused_indices = fused_indices.cpu()
offsets_per_table = [t.cpu() for t in offsets_per_table]
indices_per_table = [t.cpu() for t in indices_per_table]
# Verification
for t in range(T):
for b in range(B):
idx_start = fused_offsets[t * B + b]
idx_end = fused_offsets[t * B + b + 1]
L_bt = idx_end - idx_start
if b != B - 1:
assert L_bt == offsets_per_table[t][b + 1] - offsets_per_table[t][b]
else:
assert (
L_bt == indices_per_table[t].numel() - offsets_per_table[t][b]
)
torch.testing.assert_allclose(
fused_indices[idx_start : idx_start + L_bt],
indices_per_table[t][
offsets_per_table[t][b] : offsets_per_table[t][b] + L_bt
],
)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
fp16=st.booleans(),
weighted=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
pooling_mode=st.sampled_from(
split_table_batched_embeddings_ops.PoolingMode
),
use_cpu=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_forward(
self,
T,
D,
B,
log_E,
L,
fp16,
weighted,
mixed,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
):
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
)
mode = (
"sum"
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
else "mean"
)
E = int(10 ** log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [int(1e4)] * T
else:
Ds = [
div_round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA
if use_cpu:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.HOST
] * T
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU
elif use_cache:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if d < average_D
else managed[t]
)
else:
managed = [
np.random.choice(
[
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if fp16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
bs = [b.half() for b in bs]
xs = [to_device(torch.randint(low=0, high=e, size=(B, L)), use_cpu) for e in Es]
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
if fp16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
xws = [xw.half() for xw in xws]
fs = (
[b_indices(b, x, use_cpu=use_cpu) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1), use_cpu=use_cpu)
for (b, x, xw) in zip(bs, xs, xws)
]
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
cc = split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E,
D,
split_table_batched_embeddings_ops.EmbeddingLocation(M),
compute_device,
)
for (E, D, M) in zip(Es, Ds, managed)
],
fp16=fp16,
optimizer=OptimType.EXACT_SGD,
learning_rate=0.05,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
)
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
torch.testing.assert_allclose(
fc2.float(),
f.float(),
atol=8.0e-3 if fp16 else 1.0e-5,
rtol=8.0e-3 if fp16 else 1.0e-5,
)
@given(
T=st.integers(min_value=1, max_value=3),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=32),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=10),
fp16=st.booleans(),
weighted=st.booleans(),
mixed=st.booleans(),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
split_table_batched_embeddings_ops.PoolingMode
),
use_cpu=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_backward_dense(
self,
T,
D,
B,
log_E,
L,
fp16,
weighted,
mixed,
long_segments,
pooling_mode,
use_cpu,
):
# NOTE: torch.autograd.gradcheck() is too time-consuming for CPU version
# so we have to limit (T * B * L * D)!
assume(not use_cpu or T * B * L * D <= 2048)
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
)
mode = (
"sum"
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
else "mean"
)
E = int(10 ** log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
div_round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2 * E)) for _ in range(T)
]
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=False), use_cpu)
for (E, D) in zip(Es, Ds)
]
if fp16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
bs = [b.half() for b in bs]
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(e), size=(B, L), replace=True).astype(
np.int64
)
),
use_cpu,
)
for e in Es
]
if long_segments and L > 0 and not fp16:
for x in xs:
x[:, 0] = 0
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
if fp16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
xws = [xw.half() for xw in xws]
fs = (
[b_indices(b, x, use_cpu=use_cpu) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1), use_cpu=use_cpu)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
grad_weights = torch.cat([b.weight.grad.view(-1) for b in bs])
if fp16:
grad_weights = grad_weights.half()
cc = split_table_batched_embeddings_ops.DenseTableBatchedEmbeddingBagsCodegen(
[(E, D) for (E, D) in zip(Es, Ds)],
pooling_mode=pooling_mode,
use_cpu=use_cpu,
)
if fp16:
cc = cc.half()
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
torch.testing.assert_allclose(
fc2.float(),
f.float(),
atol=5.0e-3 if fp16 else 1.0e-5,
rtol=5.0e-3 if fp16 else 1.0e-5,
)
goc = torch.cat([go.view(B, -1) for go in gos], dim=1).contiguous()
fc2.backward(goc)
torch.testing.assert_allclose(
cc.weights.grad,
grad_weights,
atol=5.0e-3 if fp16 else 1.0e-4,
rtol=5.0e-3 if fp16 else 1.0e-4,
)
cc = split_table_batched_embeddings_ops.DenseTableBatchedEmbeddingBagsCodegen(
[(E, D) for (E, D) in zip(Es, Ds)],
# NOTE: only SUM pooling can work with per_sample_weights!
pooling_mode=split_table_batched_embeddings_ops.PoolingMode.SUM,
use_cpu=use_cpu,
).double()
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu).double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
torch.autograd.gradcheck(cc, (indices, offsets, per_sample_weights))
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
fp16=st.booleans(),
weighted=st.booleans(),
exact=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
split_table_batched_embeddings_ops.PoolingMode
),
use_cpu=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_backward_sgd( # noqa C901
self,
T,
D,
B,
log_E,
L,
fp16,
weighted,
exact,
mixed,
use_cache,
cache_algorithm,
long_segments,
pooling_mode,
use_cpu,
):
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
)
mode = (
"sum"
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
else "mean"
)
# only non-exact supports caching
assume(not exact or not use_cache)
E = int(10 ** log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
div_round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA
if use_cpu:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.HOST
] * T
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU
elif use_cache:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if d < average_D
else managed[t]
)
else:
managed = [
np.random.choice(
[
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if fp16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
bs = [b.half() for b in bs]
feature_table_map = list(range(T))
table_to_replicate = T // 2
bs.insert(table_to_replicate, bs[table_to_replicate])
feature_table_map.insert(table_to_replicate, table_to_replicate)
xs = [
to_device(torch.from_numpy(
np.random.choice(range(Es[t]), size=(B, L), replace=True).astype(
np.int64
)
), use_cpu)
for t in feature_table_map
]
if long_segments and L > 0:
for x in xs:
x[:, 0] = 0
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(len(xs))]
xws_acc_type = copy.deepcopy(xws)
if fp16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
xws = [xw.half() for xw in xws]
fs = (
[b_indices(b, x, use_cpu=use_cpu) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1), use_cpu=use_cpu)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
lr = 0.05
del bs[table_to_replicate]
new_weights = [(b.weight - b.weight.grad * lr) for b in bs]
cc = split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
[(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)],
optimizer=OptimType.EXACT_SGD,
feature_table_map=feature_table_map,
learning_rate=0.05,
fp16=fp16,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
goc = torch.cat([go.view(B, -1) for go in gos], dim=1).contiguous()
fc2.backward(goc)
if use_cache:
cc.flush()
for t in range(T):
torch.testing.assert_allclose(
cc.split_embedding_weights()[t],
new_weights[t].half() if fp16 and use_cpu else new_weights[t],
atol=(1.0e-2 if long_segments else 5.0e-3) if fp16 else 1.0e-5,
rtol=(1.0e-2 if long_segments else 5.0e-3) if fp16 else 1.0e-5,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
fp16=st.booleans(),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
exact=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
pooling_mode=st.sampled_from(
split_table_batched_embeddings_ops.PoolingMode
),
use_cpu=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_backward_adagrad( # noqa C901
self,
T,
D,
B,
log_E,
L,
D_gradcheck,
fp16,
stochastic_rounding,
weighted,
row_wise,
exact,
mixed,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
):
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: torch.autograd.gradcheck() is too time-consuming for CPU version
# so we have to limit (T * B * L * D)!
assume(not use_cpu or T * B * L * D <= 1024)
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
)
mode = (
"sum"
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
else "mean"
)
# stochastic rounding only implemented for rowwise
assume(not stochastic_rounding or row_wise)
# exact only implemented for rowwise non-weighted
assume(not exact or (row_wise and not weighted))
# need unique indices for non-exact tests
assume(exact or int(10 ** log_E) > int(2.1 * B * L))
# only row-wise supports caching
assume(row_wise or not use_cache)
E = int(10 ** log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
div_round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA
if use_cpu:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.HOST
] * T
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU
elif use_cache:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if d < average_D
else managed[t]
)
else:
managed = [
np.random.choice(
[
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if fp16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
bs = [b.half() for b in bs]
feature_table_map = list(range(T))
if exact:
# autograd with shared embedding only works for exact
table_to_replicate = T // 2
bs.insert(table_to_replicate, bs[table_to_replicate])
feature_table_map.insert(table_to_replicate, table_to_replicate)
xs = [
to_device(torch.from_numpy(
np.random.choice(range(Es[t]), size=(B, L), replace=exact).astype(
np.int64
)
), use_cpu)
for t in feature_table_map
]
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(len(xs))]
xws_acc_type = copy.deepcopy(xws)
if fp16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
xws = [xw.half() for xw in xws]
fs = (
[b_indices(b, x, use_cpu=use_cpu) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1), use_cpu=use_cpu)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
lr = 0.5
eps = 0.2
cc = split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
[(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)],
feature_table_map=feature_table_map,
optimizer=OptimType.EXACT_ROWWISE_ADAGRAD
if row_wise
else OptimType.EXACT_ADAGRAD,
learning_rate=lr,
eps=eps,
fp16=fp16,
stochastic_rounding=stochastic_rounding,
pooling_mode=pooling_mode,
)
if exact:
del bs[table_to_replicate]
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
fc2.backward(torch.cat([go.view(B, -1) for go in gos], dim=1))
cc.flush()
split_optimizer_states = [s for (s,) in cc.split_optimizer_states()]
for t in range(T):
ref_optimizer_state = bs[t].weight.grad.float().to_dense().pow(2)
torch.testing.assert_allclose(
split_optimizer_states[t].float(),
ref_optimizer_state.mean(dim=1) if row_wise else ref_optimizer_state,
atol=5.0e-3 if fp16 else 1.0e-4,
rtol=5.0e-3 if fp16 else 1.0e-4,
)
for t in range(T):
# optimizer_state = squares (no row-wise) or sum squares (row-wise)
torch.testing.assert_allclose(
cc.split_embedding_weights()[t].float(),
torch.addcdiv(
bs[t].weight.float(),
value=-lr,
tensor1=bs[t].weight.grad.float().to_dense(),
tensor2=split_optimizer_states[t]
.float()
.sqrt_()
.add_(eps)
.view(Es[t], 1 if row_wise else Ds[t]),
),
atol=5.0e-3 if fp16 else 1.0e-4,
rtol=5.0e-3 if fp16 else 1.0e-4,
)
if use_cpu:
D_gradcheck = (D_gradcheck + 15) // 16 * 4
else:
D_gradcheck = D_gradcheck * 4
cc = split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
[(E, D_gradcheck, M, compute_device) for (E, M) in zip(Es, managed)],
feature_table_map=feature_table_map,
optimizer=OptimType.EXACT_ROWWISE_ADAGRAD
if row_wise
else OptimType.EXACT_ADAGRAD,
learning_rate=0.0,
eps=eps,
fp16=fp16,
stochastic_rounding=stochastic_rounding,
# NOTE: only SUM pooling can work with per_sample_weights!
pooling_mode=split_table_batched_embeddings_ops.PoolingMode.SUM,
)
if use_cpu:
# NOTE: GPU version of SplitTableBatchedEmbeddingBagsCodegen doesn't support double.
cc = cc.double()
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu)
if use_cpu:
per_sample_weights = per_sample_weights.double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
torch.autograd.gradcheck(cc, (indices, offsets, per_sample_weights))
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu)
if use_cpu:
per_sample_weights = per_sample_weights.double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
y = cc(indices, offsets, per_sample_weights)
y.sum().backward()
indice_weight_grad_all = per_sample_weights.grad.clone().cpu()
T_ = len(xws)
feature_requires_grad = to_device(
torch.tensor(np.random.choice([0, 1], replace=True, size=(T_,))).int(), use_cpu
)
per_sample_weights = per_sample_weights.detach().clone()
per_sample_weights.requires_grad = True
y = cc(
indices,
offsets,
per_sample_weights,
feature_requires_grad=feature_requires_grad,
)
y.sum().backward()
indice_weight_grad_mask = per_sample_weights.grad.clone().cpu()
for t in range(T_):
if feature_requires_grad[t]:
torch.testing.assert_allclose(
indice_weight_grad_mask.view(T_, B, L)[t],
indice_weight_grad_all.view(T_, B, L)[t],
)
else:
torch.testing.assert_allclose(
indice_weight_grad_mask.view(T_, B, L)[t],
torch.zeros_like(indice_weight_grad_mask.view(T_, B, L)[t]),
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=64),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=1, max_value=20),
mixed=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_pipeline(
self, T, D, B, log_E, L, mixed, cache_algorithm
):
iters = 3
E = int(10 ** log_E)
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
div_round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if d < average_D
else managed[t]
)
cc_ref = (
split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
D,
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
)
)
cc = split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
[(E, D, managed, split_table_batched_embeddings_ops.ComputeDevice.CUDA) for (E, D) in zip(Es, Ds)],
cache_algorithm=cache_algorithm,
)
for t in range(T):
assert (
cc.split_embedding_weights()[t].size()
== cc_ref.split_embedding_weights()[t].size()
)
cc.split_embedding_weights()[t].data.copy_(
cc_ref.split_embedding_weights()[t]
)
requests = generate_requests(iters, B, T, L, min(Es), reuse=0.1)
grad_output = torch.randn(B, sum(Ds)).cuda()
for indices, offsets, _ in requests:
output = cc(indices, offsets)
output_ref = cc_ref(indices, offsets)
torch.testing.assert_allclose(output, output_ref)
output.backward(grad_output)
output_ref.backward(grad_output)
cc.flush()
for t in range(T):
torch.testing.assert_allclose(
cc.split_embedding_weights()[t], cc_ref.split_embedding_weights()[t]
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
mixed=st.booleans(),
optimizer=st.sampled_from(
[
OptimType.ADAM,
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.LAMB,
OptimType.LARS_SGD,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.PARTIAL_ROWWISE_LAMB,
]
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
split_table_batched_embeddings_ops.PoolingMode
),
use_cpu=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_backward_optimizers( # noqa C901
self,
T,
D,
B,
log_E,
L,
stochastic_rounding,
weighted,
mixed,
optimizer,
long_segments,
pooling_mode,
use_cpu,
):
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
)
mode = (
"sum"
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
else "mean"
)
E = int(10 ** log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
div_round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA
if use_cpu:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.HOST
] * T
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU
else:
managed = [
np.random.choice(
[
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
xs = [
to_device(torch.from_numpy(
np.random.choice(range(e), size=(B, L), replace=True).astype(np.int64)
), use_cpu)
for e in Es
]
if long_segments and L > 0:
for x in xs:
x[:, 0] = 0
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
fs = (
[b_indices(b, x, use_cpu=use_cpu) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1), use_cpu=use_cpu)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
optimizer_kwargs = {"learning_rate": 0.5}
(lr, eps, beta1, beta2, weight_decay, momentum, eta) = (
0.5,
1e-4,
0.9,
0.99,
0.01,
0.9,
0.01,
)
if optimizer in (OptimType.EXACT_ROWWISE_ADAGRAD, OptimType.EXACT_ADAGRAD):
optimizer_kwargs["eps"] = eps
if optimizer in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.ADAM):
optimizer_kwargs["eps"] = eps
optimizer_kwargs["beta1"] = beta1
optimizer_kwargs["beta2"] = beta2
optimizer_kwargs["weight_decay"] = weight_decay
if optimizer in (OptimType.PARTIAL_ROWWISE_LAMB, OptimType.LAMB):
optimizer_kwargs["eps"] = eps
optimizer_kwargs["beta1"] = beta1
optimizer_kwargs["beta2"] = beta2
optimizer_kwargs["weight_decay"] = weight_decay
if optimizer == OptimType.LARS_SGD:
optimizer_kwargs["weight_decay"] = weight_decay
optimizer_kwargs["momentum"] = momentum
optimizer_kwargs["eta"] = eta
cc = split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
[(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)],
optimizer=optimizer,
stochastic_rounding=stochastic_rounding,
pooling_mode=pooling_mode,
**optimizer_kwargs,
)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
fc2.backward(torch.cat([go.view(B, -1) for go in gos], dim=1))
cc.flush()
split_optimizer_states = cc.split_optimizer_states()
assert len(split_optimizer_states) == T
split_weights = cc.split_embedding_weights()
if optimizer in (OptimType.EXACT_ROWWISE_ADAGRAD, OptimType.EXACT_ADAGRAD):
rowwise = optimizer == OptimType.EXACT_ROWWISE_ADAGRAD
for t in range(T):
(m1,) = split_optimizer_states[t]
m1_ref = (
bs[t].weight.grad.to_dense().pow(2)
if not rowwise
else bs[t].weight.grad.to_dense().pow(2).mean(dim=1)
)
torch.testing.assert_allclose(
m1.float(), m1_ref.float(), atol=1.0e-4, rtol=1.0e-4
)
weights_new = split_weights[t]
weights_ref = bs[t].weight - lr * bs[t].weight.grad.to_dense() / (
torch.sqrt(
m1_ref if not rowwise else m1_ref.view(m1_ref.numel(), 1)
)
+ eps
)
# TODO: why is tolerance off here?
torch.testing.assert_allclose(
weights_new.float(), weights_ref.float(), atol=1.0e-2, rtol=1.0e-2
)
if optimizer in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.ADAM):
rowwise = optimizer == OptimType.PARTIAL_ROWWISE_ADAM
for t in range(T):
(m1, m2) = split_optimizer_states[t]
m2_ref = (
bs[t].weight.grad.to_dense().pow(2)
if not rowwise
else bs[t].weight.grad.to_dense().pow(2).mean(dim=1)
) * (1.0 - beta2)
torch.testing.assert_allclose(m2, m2_ref, atol=1.0e-4, rtol=1.0e-4)
m1_ref = bs[t].weight.grad.to_dense() * (1.0 - beta1)
torch.testing.assert_allclose(m1, m1_ref, atol=1.0e-4, rtol=1.0e-4)
iter_ = cc.iter.item()
v_hat_t = m2_ref / (1 - beta2 ** iter_)
v_hat_t = v_hat_t if not rowwise else v_hat_t.view(v_hat_t.numel(), 1)
m_hat_t = m1_ref / (1 - beta1 ** iter_)
weights_new = split_weights[t]
weights_ref = (
torch.addcdiv(
bs[t].weight,
value=-lr,
tensor1=m_hat_t,
tensor2=v_hat_t.sqrt_().add_(eps),
)
- lr * weight_decay * bs[t].weight
)
torch.testing.assert_allclose(
weights_new.index_select(dim=0, index=x[t].view(-1)),
weights_ref.index_select(dim=0, index=x[t].view(-1)),
atol=1.0e-3,
rtol=1.0e-3,
)
if optimizer in (OptimType.PARTIAL_ROWWISE_LAMB, OptimType.LAMB):
rowwise = optimizer == OptimType.PARTIAL_ROWWISE_LAMB
for t in range(T):
(m1, m2) = split_optimizer_states[t]
m2_ref = (
bs[t].weight.grad.to_dense().pow(2)
if not rowwise
else bs[t].weight.grad.to_dense().pow(2).mean(dim=1)
) * (1.0 - beta2)
torch.testing.assert_allclose(m2, m2_ref, atol=1.0e-4, rtol=1.0e-4)
m1_ref = bs[t].weight.grad.to_dense() * (1.0 - beta1)
torch.testing.assert_allclose(m1, m1_ref, atol=1.0e-4, rtol=1.0e-4)
iter_ = cc.iter.item()
v_hat_t = m2_ref / (1 - beta2 ** iter_)
v_hat_t = v_hat_t if not rowwise else v_hat_t.view(v_hat_t.numel(), 1)
m_hat_t = m1_ref / (1 - beta1 ** iter_)
rtw = (m_hat_t / (torch.sqrt(v_hat_t) + eps)) + weight_decay * bs[
t
].weight
true_ratio = torch.linalg.norm(bs[t].weight, dim=1, ord=2).view(
m1.shape[0], 1
) / torch.linalg.norm(rtw, dim=1, ord=2).view(m1.shape[0], 1)
weights_new = split_weights[t]
weights_ref = bs[t].weight - lr * true_ratio * rtw
torch.testing.assert_allclose(
weights_new.index_select(dim=0, index=x[t].view(-1)),
weights_ref.index_select(dim=0, index=x[t].view(-1)),
atol=1.0e-3,
rtol=1.0e-3,
)
if optimizer == OptimType.LARS_SGD:
for t in range(T):
(m1,) = split_optimizer_states[t]
weight_norm = torch.linalg.norm(bs[t].weight, dim=1, ord=2).view(
m1.shape[0], 1
)
grad_norm = torch.linalg.norm(
bs[t].weight.grad.to_dense(), dim=1, ord=2
).view(m1.shape[0], 1)
adjusted_lr = (
lr * eta * weight_norm / (grad_norm + weight_decay * weight_norm)
)
m1_ref = adjusted_lr * (
bs[t].weight.grad.to_dense() + weight_decay * bs[t].weight
)
torch.testing.assert_allclose(
m1.index_select(dim=0, index=x[t].view(-1)),
m1_ref.index_select(dim=0, index=x[t].view(-1)),
atol=1.0e-4,
rtol=1.0e-4,
)
weights_new = split_weights[t]
weights_ref = bs[t].weight - m1_ref
torch.testing.assert_allclose(
weights_new.index_select(dim=0, index=x[t].view(-1)),
weights_ref.index_select(dim=0, index=x[t].view(-1)),
atol=1.0e-4,
rtol=1.0e-4,
)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jianyuh/param",
"score": 2
} |
#### File: comms/pt/dlrm_data.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import numpy as np
from numpy import random as ra
import torch
from torch.utils.data import Dataset # , RandomSampler
class RandomDataset(Dataset):
""" Uniform distribution """
def __init__(
self,
m_den,
ln_emb,
data_size,
num_batches,
mini_batch_size,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
num_targets=1,
round_targets=False,
data_generation="random",
trace_file="",
enable_padding=False,
reset_seed_on_access=False,
rand_seed=0
):
# compute batch size
nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size))
if num_batches != 0:
nbatches = num_batches
data_size = nbatches * mini_batch_size
# print("Total number of batches %d" % nbatches)
# save args (recompute data_size if needed)
self.m_den = m_den
self.ln_emb = ln_emb
self.data_size = data_size
self.num_batches = nbatches
self.mini_batch_size = mini_batch_size
self.num_indices_per_lookup = num_indices_per_lookup
self.num_indices_per_lookup_fixed = num_indices_per_lookup_fixed
self.num_targets = num_targets
self.round_targets = round_targets
self.data_generation = data_generation
self.trace_file = trace_file
self.enable_padding = enable_padding
self.reset_seed_on_access = reset_seed_on_access
self.rand_seed = rand_seed
def reset_numpy_seed(self, numpy_rand_seed):
np.random.seed(numpy_rand_seed)
# torch.manual_seed(numpy_rand_seed)
def __getitem__(self, index):
if isinstance(index, slice):
return [
self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1
)
]
# WARNING: reset seed on access to first element
# (e.g. if same random samples needed across epochs)
if self.reset_seed_on_access and index == 0:
self.reset_numpy_seed(self.rand_seed)
# number of data points in a batch
n = min(self.mini_batch_size, self.data_size - (index * self.mini_batch_size))
# generate a batch of dense and sparse features
if self.data_generation == "random":
(X, lS_o, lS_i) = generate_uniform_input_batch(
self.m_den,
self.ln_emb,
n,
self.num_indices_per_lookup,
self.num_indices_per_lookup_fixed
)
# generate a batch of target (probability of a click)
T = generate_random_output_batch(n, self.num_targets, self.round_targets)
return (X, lS_o, lS_i, T)
def __len__(self):
# WARNING: note that we produce bacthes of outputs in __getitem__
# therefore we should use num_batches rather than data_size below
return self.num_batches
def collate_wrapper_random(list_of_tuples):
# where each tuple is (X, lS_o, lS_i, T)
(X, lS_o, lS_i, T) = list_of_tuples[0]
return (X,
torch.stack(lS_o),
lS_i,
T)
def make_random_data_and_loader(args, ln_emb, m_den):
train_data = RandomDataset(
m_den,
ln_emb,
args.data_size,
args.num_batches,
args.mini_batch_size,
args.num_indices_per_lookup,
args.num_indices_per_lookup_fixed,
1, # num_targets
args.round_targets,
args.data_generation,
args.data_trace_file,
args.data_trace_enable_padding,
reset_seed_on_access=True,
rand_seed=args.numpy_rand_seed
) # WARNING: generates a batch of lookups at once
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=1,
shuffle=False,
num_workers=args.num_workers,
collate_fn=collate_wrapper_random,
pin_memory=False,
drop_last=False, # True
)
return train_data, train_loader
def generate_random_output_batch(n, num_targets, round_targets=False):
# target (probability of a click)
if round_targets:
P = np.round(ra.rand(n, num_targets).astype(np.float32)).astype(np.float32)
else:
P = ra.rand(n, num_targets).astype(np.float32)
return torch.tensor(P)
# uniform ditribution (input data)
def generate_uniform_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
):
# dense feature
#Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32))
Xt = torch.tensor(ra.rand(1, m_den).astype(np.float32))
# sparse feature (sparse indices)
lS_emb_offsets = []
lS_emb_indices = []
# for each embedding generate a list of n lookups,
# where each lookup is composed of multiple sparse indices
for size in ln_emb:
lS_batch_offsets = []
lS_batch_indices = []
offset = 0
for _ in range(n):
# num of sparse indices to be used per embedding (between
if num_indices_per_lookup_fixed:
sparse_group_size = np.int64(num_indices_per_lookup)
else:
# random between [1,num_indices_per_lookup])
r = ra.random(1)
sparse_group_size = np.int64(
np.round(max([1.0], r * min(size, num_indices_per_lookup)))
)
# sparse indices to be used per embedding
r = ra.random(sparse_group_size)
sparse_group = np.unique(np.round(r * (size - 1)).astype(np.int64))
# reset sparse_group_size in case some index duplicates were removed
sparse_group_size = np.int64(sparse_group.size)
# store lengths and indices
lS_batch_offsets += [offset]
lS_batch_indices += sparse_group.tolist()
# update offset for next iteration
offset += sparse_group_size
lS_emb_offsets.append(torch.tensor(lS_batch_offsets))
lS_emb_indices.append(torch.tensor(lS_batch_indices))
return (Xt, lS_emb_offsets, lS_emb_indices)
class SyntheticDataset(Dataset):
def __init__(
self,
mini_batch_size,
nbatches=1,
synthetic_data_folder="./synthetic_data/syn_data_bs65536/",
):
self.synthetic_data_folder = synthetic_data_folder
self.num_batches = nbatches
self.mini_batch_size = mini_batch_size
self.X = torch.load(f"{self.synthetic_data_folder}/X_0.pt")
self.lS_o = torch.load(f"{self.synthetic_data_folder}/lS_o_0.pt")
self.lS_i = torch.load(f"{self.synthetic_data_folder}/lS_i_0.pt")
self.T = torch.load(f"{self.synthetic_data_folder}/T_0.pt")
# print('data loader initiated ...')
def __getitem__(self, index):
sInd = index * self.mini_batch_size
eInd = sInd + self.mini_batch_size
if sInd >= len(self.X):
sys.exit(f' mini_batch_size({self.mini_batch_size}) * '
f'num_batches({self.num_batches}) has to be less'
f' than size of data({len(self.X)})'
)
X = self.X[sInd:eInd]
lS_o = [i[:][sInd:eInd] - i[:][sInd] for i in self.lS_o]
if eInd < len(self.lS_o[0]):
lS_i = [val[self.lS_o[ind][sInd]:self.lS_o[ind][eInd]] for ind, val in enumerate(self.lS_i)]
elif sInd < len(self.lS_o[0]):
lS_i = [val[self.lS_o[ind][sInd]:] for ind, val in enumerate(self.lS_i)]
T = self.T[sInd:eInd]
return (X, lS_o, lS_i, T)
def __len__(self):
return self.num_batches
def synthetic_data_loader(args, ln_emb, m_den):
train_data = SyntheticDataset(
args.mini_batch_size,
nbatches=args.num_batches,
synthetic_data_folder=args.synthetic_data_folder,
)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=1,
shuffle=False,
num_workers=args.num_workers,
collate_fn=collate_wrapper_random,
pin_memory=False,
drop_last=False,
)
return train_data, train_loader
def data_loader(args, ln_emb, m_den):
data_gens = {"random": make_random_data_and_loader,
"synthetic": synthetic_data_loader,
}
train_data, train_ld = data_gens[args.data_generation](args, ln_emb, m_den)
return train_data, train_ld
``` |
{
"source": "JianyuTANG/GB-To-UTF8",
"score": 4
} |
#### File: JianyuTANG/GB-To-UTF8/to_utf8.py
```python
import sys
import os
origin_name = sys.argv[1]
if len(sys.argv) == 3:
to_name = sys.argv[2]
elif len(sys.argv) == 2:
to_name = origin_name
else:
print("ERROR: wrong arg number")
os._exit(0)
def write_to_file(data, filename):
with open(filename, 'wb') as f:
data = data.encode('utf-8')
f.write(data)
f.close()
return
print('ERROR: fail to create and write file: ' + filename)
with open(origin_name, 'rb') as f:
data = f.read()
try:
data = data.decode('utf-8')
print('the original file is utf-8')
print('no need to change')
os._exit(0)
except:
pass
try:
data = data.decode('gb2312')
f.close()
write_to_file(data, to_name)
print('the original encoding is gb2312')
print('already changed to UTF-8 to: ' + to_name)
os._exit(0)
except:
pass
try:
data = data.decode('gbk')
f.close()
write_to_file(data, to_name)
print('the original encoding is gbk')
print('already changed to UTF-8 to: ' + to_name)
os._exit(0)
except:
pass
try:
data = data.decode('gb18030')
f.close()
write_to_file(data, to_name)
print('the original encoding is gb18030')
print('already changed to UTF-8 to: ' + to_name)
os._exit(0)
except:
pass
print('sorry, transfer failed')
``` |
{
"source": "JianyyuWang/spconv",
"score": 2
} |
#### File: spconv/pytorch/core.py
```python
from typing import List, Optional, Union
import numpy as np
import torch
from spconv.core import ConvAlgo
from spconv.pytorch.constants import PYTORCH_VERSION
from spconv.pytorch.ops import ThrustSortAllocator
from spconv.tools import CUDAKernelTimer
if PYTORCH_VERSION >= [1, 8, 0]:
try:
import torch.fx
if PYTORCH_VERSION >= [1, 10, 0]:
from torch.fx import ProxyableClassMeta
else:
from torch.fx.symbolic_trace import ProxyableClassMeta
SpConvTensorMeta = ProxyableClassMeta
except:
class SpConvTensorMeta(type):
pass
else:
class SpConvTensorMeta(type):
pass
class IndiceData(object):
def __init__(self, out_indices, indices, indice_pairs, indice_pair_num,
spatial_shape, out_spatial_shape, is_subm: bool, algo: ConvAlgo):
self.out_indices = out_indices
self.indices = indices
self.indice_pairs = indice_pairs
self.indice_pair_num = indice_pair_num
self.spatial_shape = spatial_shape
self.out_spatial_shape = out_spatial_shape
self.is_subm = is_subm
self.algo = algo
class ImplicitGemmIndiceData(object):
def __init__(self, out_indices: torch.Tensor, indices: torch.Tensor,
pair_fwd: torch.Tensor, pair_bwd: torch.Tensor,
pair_mask_fwd_splits: List[torch.Tensor],
pair_mask_bwd_splits: List[torch.Tensor],
mask_argsort_fwd_splits: List[torch.Tensor],
mask_argsort_bwd_splits: List[torch.Tensor],
masks: List[np.ndarray], spatial_shape,
out_spatial_shape, is_subm: bool, algo: ConvAlgo):
self.out_indices = out_indices
self.indices = indices
self.pair_fwd = pair_fwd
self.pair_bwd = pair_bwd
self.pair_mask_fwd_splits = pair_mask_fwd_splits
self.pair_mask_bwd_splits = pair_mask_bwd_splits
self.mask_argsort_fwd_splits = mask_argsort_fwd_splits
self.mask_argsort_bwd_splits = mask_argsort_bwd_splits
self.masks = masks
self.spatial_shape = spatial_shape
self.out_spatial_shape = out_spatial_shape
self.is_subm = is_subm
self.algo = algo
def scatter_nd(indices, updates, shape):
"""pytorch edition of tensorflow scatter_nd.
this function don't contain except handle code. so use this carefully
when indice repeats, don't support repeat add which is supported
in tensorflow.
"""
ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device)
ndim = indices.shape[-1]
output_shape = list(indices.shape[:-1]) + shape[indices.shape[-1]:]
flatted_indices = indices.view(-1, ndim)
slices = [flatted_indices[:, i] for i in range(ndim)]
slices += [Ellipsis]
ret[slices] = updates.view(*output_shape)
return ret
# ProxyableClassMeta is used for TensorRT conversion in future.
class SparseConvTensor(metaclass=SpConvTensorMeta):
def __init__(self,
features: torch.Tensor,
indices: torch.Tensor,
spatial_shape: List[int],
batch_size: int,
grid: Optional[torch.Tensor] = None,
voxel_num: Optional[torch.Tensor] = None,
indice_dict: Optional[dict] = None,
benchmark: bool = False,
permanent_thrust_allocator: bool = False,
enable_timer: bool = False):
"""
Args:
features: [num_points, num_features] feature tensor
indices: [num_points, ndim + 1] indice tensor. batch index saved in indices[:, 0]
spatial_shape: spatial shape of your sparse data
batch_size: batch size of your sparse data
grid: pre-allocated grid tensor. should be used when the volume of spatial shape
is very large.
benchmark: whether to enable benchmark. if enabled, all sparse operators will be record to
SparseConvTensor.
"""
ndim = indices.shape[1] - 1
assert features.ndim == 2
assert indices.ndim == 2
assert len(spatial_shape) == ndim, "spatial shape must equal to ndim"
assert indices.dtype == torch.int32, "only support int32"
assert batch_size > 0
self._features = features
self.indices = indices
self.spatial_shape = spatial_shape
self.batch_size = batch_size
if indice_dict is None:
indice_dict = {}
self.indice_dict = indice_dict
if grid is None:
grid = torch.Tensor() # empty tensor
self.grid = grid
self.voxel_num = voxel_num # for tensorrt
self.benchmark = benchmark
self.benchmark_record = {}
self.thrust_allocator: Optional[ThrustSortAllocator] = None
if permanent_thrust_allocator:
self.thrust_allocator = ThrustSortAllocator(features.device)
self._timer = CUDAKernelTimer(enable_timer)
def replace_feature(self, feature):
"""we need to replace x.features = F.relu(x.features) with x = x.replace_feature(F.relu(x.features))
due to limit of torch.fx
"""
new_spt = SparseConvTensor(feature, self.indices, self.spatial_shape,
self.batch_size, self.grid, self.voxel_num,
self.indice_dict)
new_spt.benchmark = self.benchmark
new_spt.benchmark_record = self.benchmark_record
new_spt.thrust_allocator = self.thrust_allocator
new_spt._timer = self._timer
return new_spt
@property
def features(self):
return self._features
@features.setter
def features(self, val):
msg = (
"you can't set feature directly, use 'x = x.replace_feature(your_new_feature)'"
" to generate new SparseConvTensor instead.")
raise ValueError(msg)
@classmethod
def from_dense(cls, x: torch.Tensor):
"""create sparse tensor fron channel last dense tensor by to_sparse
x must be NHWC tensor, channel last
"""
x_sp = x.to_sparse(x.ndim - 1)
spatial_shape = list(x_sp.shape[1:-1])
batch_size = x_sp.shape[0]
indices_th = x_sp.indices().permute(1, 0).contiguous().int()
features_th = x_sp.values()
return cls(features_th, indices_th, spatial_shape, batch_size)
@property
def spatial_size(self):
return np.prod(self.spatial_shape)
def find_indice_pair(
self, key) -> Optional[Union[IndiceData, ImplicitGemmIndiceData]]:
if key is None:
return None
if key in self.indice_dict:
return self.indice_dict[key]
return None
def dense(self, channels_first: bool = True):
output_shape = [self.batch_size] + list(
self.spatial_shape) + [self.features.shape[1]]
res = scatter_nd(
self.indices.to(self.features.device).long(), self.features,
output_shape)
if not channels_first:
return res
ndim = len(self.spatial_shape)
trans_params = list(range(0, ndim + 1))
trans_params.insert(1, ndim + 1)
return res.permute(*trans_params).contiguous()
# remove this due to limit of torch.fx
# @property
# def sparity(self):
# return self.indices.shape[0] / np.prod(
# self.spatial_shape) / self.batch_size
def shadow_copy(self) -> "SparseConvTensor":
"""create a new spconv tensor with all member unchanged"""
tensor = SparseConvTensor(self.features, self.indices,
self.spatial_shape, self.batch_size,
self.grid, self.voxel_num, self.indice_dict,
self.benchmark)
tensor.benchmark_record = self.benchmark_record
tensor.thrust_allocator = self.thrust_allocator
tensor._timer = self._timer
return tensor
``` |
{
"source": "jianzhangbjz/operator-courier",
"score": 3
} |
#### File: operator-courier/operatorcourier/api.py
```python
import os
import logging
from tempfile import TemporaryDirectory
import yaml
import json
from operatorcourier.build import BuildCmd
from operatorcourier.validate import ValidateCmd
from operatorcourier.push import PushCmd
from operatorcourier.format import format_bundle
from operatorcourier.nest import nest_bundles
logger = logging.getLogger(__name__)
def build_and_verify(source_dir=None, yamls=None, ui_validate_io=False,
validation_output=None):
"""Build and verify constructs an operator bundle from
a set of files and then verifies it for usefulness and accuracy.
It returns the bundle as a string.
:param source_dir: Path to local directory of yaml files to be read.
:param yamls: List of yaml strings to create bundle with
"""
if source_dir is not None and yamls is not None:
logger.error("Both source_dir and yamls cannot be defined.")
raise TypeError(
"Both source_dir and yamls cannot be specified on function call.")
yaml_files = []
if source_dir is not None:
for filename in os.listdir(source_dir):
if filename.endswith(".yaml") or filename.endswith(".yml"):
with open(source_dir + "/" + filename) as f:
yaml_files.append(f.read())
elif yamls is not None:
yaml_files = yamls
bundle = BuildCmd().build_bundle(yaml_files)
valid, validation_results_dict = ValidateCmd(ui_validate_io).validate(bundle)
if not valid:
bundle = None
logger.error("Bundle failed validation.")
raise ValueError("Resulting bundle is invalid, input yaml is improperly defined.")
else:
bundle = format_bundle(bundle)
if validation_output is not None:
with open(validation_output, 'w') as f:
f.write(json.dumps(validation_results_dict) + "\n")
return bundle
def build_verify_and_push(namespace, repository, revision, token,
source_dir=None, yamls=None,
validation_output=None):
"""Build verify and push constructs the operator bundle,
verifies it, and pushes it to an external app registry.
Currently the only supported app registry is the one
located at Quay.io (https://quay.io/cnr/api/v1/packages/)
:param namespace: Quay namespace where the repository we are
pushing the bundle is located.
:param repository: Application repository name the application is bundled for.
:param revision: Release version of the bundle.
:param source_dir: Path to local directory of yaml files to be read
:param yamls: List of yaml strings to create bundle with
"""
bundle = build_and_verify(source_dir, yamls, validation_output=validation_output)
with TemporaryDirectory() as temp_dir:
with open('%s/bundle.yaml' % temp_dir, 'w') as outfile:
yaml.dump(bundle, outfile, default_flow_style=False)
outfile.flush()
PushCmd().push(temp_dir, namespace, repository, revision, token)
def nest(source_dir, registry_dir):
"""Nest takes a flat bundle directory and version nests it
to eventually be consumed as part of an operator-registry image build.
:param source_dir: Path to local directory of yaml files to be read
:param output_dir: Path of your directory to be populated.
If directory does not exist, it will be created.
"""
yaml_files = []
if source_dir is not None:
for filename in os.listdir(source_dir):
if filename.endswith(".yaml") or filename.endswith(".yml"):
with open(source_dir + "/" + filename) as f:
yaml_files.append(f.read())
with TemporaryDirectory() as temp_dir:
nest_bundles(yaml_files, registry_dir, temp_dir)
```
#### File: operator-courier/tests/test_api.py
```python
import pytest
import yaml
from operatorcourier import api
from operatorcourier.format import unformat_bundle
@pytest.mark.parametrize('directory,expected', [
("tests/test_files/bundles/api/bundle1",
"tests/test_files/bundles/api/bundle1/results/bundle.yaml"),
])
def test_make_bundle(directory, expected):
bundle = api.build_and_verify(source_dir=directory)
with open(expected, "r") as expected_file:
expected_bundle = yaml.safe_load(expected_file)
assert unformat_bundle(bundle) == unformat_bundle(expected_bundle)
@pytest.mark.parametrize('yaml_files,expected', [
(
[
"tests/test_files/bundles/api/bundle1/crd.yml",
"tests/test_files/bundles/api/bundle1/csv.yaml",
"tests/test_files/bundles/api/bundle1/packages.yaml"
],
"tests/test_files/bundles/api/bundle1/results/bundle.yaml"
),
])
def test_make_bundle_with_yaml_list(yaml_files, expected):
yamls = []
for file in yaml_files:
with open(file, "r") as yaml_file:
yamls.append(yaml_file.read())
bundle = api.build_and_verify(yamls=yamls)
with open(expected, "r") as expected_file:
expected_bundle = yaml.safe_load(expected_file)
assert unformat_bundle(bundle) == unformat_bundle(expected_bundle)
``` |
{
"source": "jianzhangcs/DenoiSeg",
"score": 2
} |
#### File: reproducibility/cluster_execution/validation_evaluation.py
```python
from denoiseg.models import DenoiSeg, DenoiSegConfig
from skimage import io
import csv
import numpy as np
import pickle
import os
from os.path import join, exists
from os import makedirs as mkdir
from denoiseg.utils.seg_utils import *
from denoiseg.utils.compute_precision_threshold import measure_precision, measure_seg
import argparse
import json
def main():
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
parser = argparse.ArgumentParser(description="Noise2Seg headless score-on-validation-data-script.")
parser.add_argument('--temp_conf')
args = parser.parse_args()
with open(args.temp_conf) as f:
conf = json.load(f)
# load data
trainval_data = np.load(conf['train_data_path'])
val_images = trainval_data['X_val'].astype(np.float32)
val_masks = trainval_data['Y_val']
print("Shape of val_images: ", val_images.shape, ", Shape of val_masks: ", val_masks.shape)
print("Validation Data \n..................")
X_val, Y_val_masks = val_images, val_masks
# one-hot-encoding
X_val = X_val[...,np.newaxis]
Y_val = convert_to_oneHot(Y_val_masks)
print("Shape of validation images: ", X_val.shape, ", Shape of validation masks: ", Y_val.shape)
# load model
n2s_model = DenoiSeg(None, conf['model_name'], conf['basedir'])
# compute AP results
ap_threshold, validation_ap_score = n2s_model.optimize_thresholds(val_images, Y_val_masks, measure=measure_precision())
print("Average precision over all validation images at IOU = 0.5 with threshold = {}: ".format(ap_threshold), validation_ap_score)
# use ap-threshold to compute SEG-scores
predicted_ap_seg_images, ap_seg_result = n2s_model.predict_label_masks(val_images, Y_val_masks, ap_threshold,
measure=measure_seg())
print("SEG score over all validation images at IOU = 0.5 with ap-threshold = {}: ".format(ap_threshold), ap_seg_result)
# compute SEG results
seg_threshold, validation_seg_score = n2s_model.optimize_thresholds(val_images, Y_val_masks, measure=measure_seg())
print("SEG over all validation images at IOU = 0.5 with threshold = {}: ".format(seg_threshold), validation_seg_score)
with open(join(conf['basedir'], "validation_scores.csv"), mode='w') as f:
writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['AP', validation_ap_score])
writer.writerow(['SEG', validation_seg_score])
writer.writerow(['SEG optimized for AP', ap_seg_result])
if __name__=="__main__":
main()
``` |
{
"source": "jianzhichun/getprovinces",
"score": 2
} |
#### File: jianzhichun/getprovinces/getprovinces.py
```python
import bisect
import pickle
import threading
import sys
import time
import numpy as np
from numpy import deg2rad
__init_flag__ = False
def init():
'''''
'''
j = ['|', '/', '-', '\\']
i = 0
while not __init_flag__:
sys.stdout.write(j[i%4]+" init\r")
i = i + 1
sys.stdout.flush()
time.sleep(0.1)
threading.Thread(target=init).start()
__database__ = {
'locations':pickle.load(file('locations.obj', 'rb')),
'matrix':pickle.load(file('matrix.obj', 'rb')),
# 'id_province':pickle.load(file('id_province.obj', 'rb')),
'id_province':[u'\u6d77\u5357\u7701',\
u'\u5e7f\u4e1c\u7701',\
u'\u5e7f\u897f\u58ee\u65cf\u81ea\u6cbb\u533a',\
u'\u4e91\u5357\u7701',\
u'\u53f0\u6e7e\u7701',\
u'\u6fb3\u95e8\u7279\u522b\u884c\u653f\u533a',\
u'\u9999\u6e2f\u7279\u522b\u884c\u653f\u533a',\
u'\u798f\u5efa\u7701',\
u'\u6c5f\u897f\u7701',\
u'\u8d35\u5dde\u7701',\
u'\u6e56\u5357\u7701',\
u'\u56db\u5ddd\u7701',\
u'\u897f\u85cf\u81ea\u6cbb\u533a',\
u'\u6d59\u6c5f\u7701',\
u'\u91cd\u5e86\u5e02',\
u'\u6e56\u5317\u7701',\
u'\u5b89\u5fbd\u7701',\
u'\u4e0a\u6d77\u5e02',\
u'\u6c5f\u82cf\u7701',\
u'\u6cb3\u5357\u7701',\
u'\u9752\u6d77\u7701',\
u'\u9655\u897f\u7701',\
u'\u7518\u8083\u7701',\
u'\u65b0\u7586\u7ef4\u543e\u5c14\u81ea\u6cbb\u533a',\
u'\u5c71\u4e1c\u7701',\
u'\u5c71\u897f\u7701',\
u'\u5b81\u590f\u56de\u65cf\u81ea\u6cbb\u533a',\
u'\u6cb3\u5317\u7701',\
u'\u5185\u8499\u53e4\u81ea\u6cbb\u533a',\
u'\u8fbd\u5b81\u7701',\
u'\u5929\u6d25\u5e02',\
u'\u5317\u4eac\u5e02',\
u'\u5409\u6797\u7701',\
u'\u9ed1\u9f99\u6c5f\u7701']
}
__database__['lats'] = sorted(__database__['locations'].keys())
__init_flag__ = True
def get_centre(lat, lng):
'''''
'''
#deal lat
left_lat_index = bisect.bisect_left(__database__['lats'], lat)
left_lat = __database__['lats'][left_lat_index]
right_lat = __database__['lats'][left_lat_index + 1]
mlat = right_lat if (right_lat - lat) < (lat - left_lat) else left_lat
#deal lng
lng_coordinate = dict(__database__['locations'][mlat])
lngs = sorted(lng_coordinate.keys())
left_lng_index = bisect.bisect_left(lngs, lng)
left_lng = lngs[left_lng_index]
right_lng = lngs[left_lng_index + 1]
mlng = right_lng if (right_lng - lng) < (lng - left_lng) else left_lng
centre = lng_coordinate[mlng]
# print str((lat, lng)) + ' coordinate is ' + str(centre)
return centre
def sector_mask(shape, centre, radius, angle_range):
"""
Return a boolean mask for a circular sector. The start/stop angles in
`angle_range` should be given in clockwise order.
"""
x,y = np.ogrid[:shape[0],:shape[1]]
cx,cy = centre
tmin, tmax = deg2rad(angle_range)
# ensure stop angle > start angle
if tmax < tmin:
tmax += 2*np.pi
# convert cartesian --> polar coordinates
r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)
theta = np.arctan2(x-cx,y-cy) - tmin
# wrap angles between 0 and 2*pi
theta %= (2*np.pi)
# circular mask
circmask = r2 <= radius*radius
# angular mask
anglemask = theta <= (tmax-tmin)
return circmask*anglemask
def get_provinces():
'''''
'''
# print 'success'
# from matplotlib import pyplot as pp
lat = float(raw_input(u'input lat(>3.91 & < 53.5): '))
lng = float(raw_input(u'input lng(>73.6 & < 135): '))
radius = float(raw_input(u'input radius: '))
try:
print 'centre is ' + str((lat, lng)) + ' radius is ' + str(radius) + ' km'
matrix = pickle.load(file('matrix.obj', 'rb'))
mask = sector_mask(matrix.shape, get_centre(lat, lng), radius/5, (0, 360))
matrix[~mask] = -1
idset = set()
for row in matrix:
idset = idset|set(row)
# return [__database__['id_province'][item] for item in idset if item != -1]
# print idset
for item in idset:
if item != -1:
print __database__['id_province'][item]
except:
print 'please input correct format'
if __name__ == "__main__":
while 1:
get_provinces()
``` |
{
"source": "jianzhnie/AutoML-Tools",
"score": 3
} |
#### File: examples/hyperopt/hyperopt_rf.py
```python
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from hyperopt import tpe, hp, fmin, STATUS_OK,Trials
from hyperopt.pyll.base import scope
import warnings
warnings.filterwarnings("ignore")
data = pd.read_csv("../data/mobile_price_data/train.csv")
# load data
X = data.drop("price_range", axis=1).values
y = data.price_range.values
# 标准化特征变量
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# 定义搜索空间
space = {
"n_estimators": hp.choice("n_estimators", [100, 200, 300, 400, 500, 600]),
"max_depth": hp.quniform("max_depth", 1, 15, 1),
"criterion": hp.choice("criterion", ["gini", "entropy"]),
}
# 定义目标函数
def hyperparameter_tuning(params):
clf = RandomForestClassifier(**params, n_jobs=-1)
acc = cross_val_score(clf, X_scaled, y, scoring="accuracy").mean()
return {"loss": -acc, "status": STATUS_OK}
# 初始化Trial 对象
trials = Trials()
best = fmin(
fn=hyperparameter_tuning,
space=space,
algo=tpe.suggest,
max_evals=100,
trials=trials
)
if __name__ == "__main__":
print("Best: {}".format(best))
print(trials.results)
print(trials.losses())
print(trials.statuses())
```
#### File: examples/optuna/optuna_db.py
```python
import optuna
## New Study
study_name = 'example-study' # Unique identifier of the study.
study = optuna.create_study(study_name=study_name, storage='sqlite:///results/example.db')
def objective(trial):
x = trial.suggest_uniform('x', -10, 10)
return (x - 2) ** 2
study.optimize(objective, n_trials=3)
## Resume Study
study = optuna.create_study(study_name='example-study', storage='sqlite:///results/example.db', load_if_exists=True)
study.optimize(objective, n_trials=3)
## Experimental History
df = study.trials_dataframe(attrs=('number', 'value', 'params', 'state'))
print(df)
```
#### File: examples/optuna/optuna_example.py
```python
import optuna
def objective(trial):
x = trial.suggest_uniform('x', -10, 10)
y = trial.suggest_uniform('y', -10, 10)
return (x + y) ** 2
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=100)
print(study.best_params)
print(study.best_value)
```
#### File: examples/ray/ray_xgboost.py
```python
import sklearn.datasets
import sklearn.metrics
from sklearn.model_selection import train_test_split
import xgboost as xgb
from ray import tune
def train_breast_cancer(config):
# Load dataset
data, labels = sklearn.datasets.load_breast_cancer(return_X_y=True)
# Split into train and test set
train_x, test_x, train_y, test_y = train_test_split(
data, labels, test_size=0.25)
# Build input matrices for XGBoost
train_set = xgb.DMatrix(train_x, label=train_y)
test_set = xgb.DMatrix(test_x, label=test_y)
# Train the classifier
results = {}
xgb.train(
config,
train_set,
evals=[(test_set, "eval")],
evals_result=results,
verbose_eval=False)
# Return prediction accuracy
accuracy = 1. - results["eval"]["error"][-1]
tune.report(mean_accuracy=accuracy, done=True)
if __name__ == "__main__":
config = {
"objective": "binary:logistic",
"eval_metric": ["logloss", "error"],
"max_depth": tune.randint(1, 9),
"min_child_weight": tune.choice([1, 2, 3]),
"subsample": tune.uniform(0.5, 1.0),
"eta": tune.loguniform(1e-4, 1e-1)
}
analysis = tune.run(
train_breast_cancer,
resources_per_trial={"cpu": 1},
config=config,
num_samples=100)
``` |
{
"source": "jianzhnie/AutoTabular",
"score": 3
} |
#### File: autofe/optuna_tuner/xgboost_optuna.py
```python
import numpy as np
import optuna
import pandas as pd
from autofe.optuna_tuner.registry import (BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
REGRESSION,
default_optimizer_direction,
default_task_metric, get_metric_fn,
support_ml_task)
from autofe.utils.logger import get_root_logger
from sklearn.model_selection import train_test_split
from xgboost.sklearn import XGBClassifier, XGBRegressor
logger = get_root_logger(log_file=None)
class XGBoostOptuna(object):
def __init__(
self,
task: str = BINARY_CLASSIFICATION,
metric: str = 'accuracy',
random_state=42,
):
self.task = task
self.seed = random_state
if metric is None:
self.metric = default_task_metric[task]
else:
self.metric = metric
assert self.task in support_ml_task, 'Only Support ML Tasks: %s' % support_ml_task
if self.task == REGRESSION:
self.estimator = XGBRegressor
else:
self.estimator = XGBClassifier
def fit(self,
X_train,
y_train,
X_val=None,
y_val=None,
split_ratio=0.2,
max_evals: int = 100,
timeout=3600):
if X_val is not None:
X_train, X_val = self._validate_fit_data(
train_data=X_train, tuning_data=X_val)
else:
logger.info(
'Tuning data is None, the original train_data will be split: train vs val = %2s vs %2s'
% (1 - split_ratio, split_ratio))
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=split_ratio)
objective = self.get_objective(X_train, y_train, X_val, y_val)
logger.info('===== Beginning RandomForest Hpo training ======')
logger.info('Max Hpo trials: %s' % max_evals)
logger.info('Time Out: %s s ' % timeout)
optimizer_direction = self.get_optimizer_direction(
self.task, self.metric)
self.n_warmup_steps = 20
try:
study = optuna.create_study(
direction=optimizer_direction,
sampler=optuna.samplers.TPESampler(seed=self.seed),
pruner=optuna.pruners.MedianPruner(
n_warmup_steps=self.n_warmup_steps),
)
study.optimize(objective, n_trials=max_evals, timeout=timeout)
trial = study.best_trial
best_param = trial.params
logger.info('====== Finished RandomForest Hpo training ======')
logger.info('Get the best model params ...')
logger.info('parms: %s', best_param)
logger.info('Retraining on the whole dataset.')
self.model = self.estimator(**best_param).fit(X_train, y_train)
except optuna.exceptions.TrialPruned as e:
raise e
except Exception as e:
print('Exception in RandomForestObjective', str(e))
return None
return best_param
def predict(self, X_test):
return self.model.predict(X_test)
def predict_proba(self, X_test):
return self.model.predict_proba(X_test)
def get_score_fn(self, task, metric):
if metric is None:
metric = default_task_metric[task]
score_fn = get_metric_fn[metric]
return score_fn
def get_optimizer_direction(self, task, metric):
if metric is not None:
metric = default_task_metric[task]
direction = default_optimizer_direction[metric]
return direction
def xgboost_objective(self, ml_task):
objective = 'reg:squarederror'
if ml_task == BINARY_CLASSIFICATION:
objective = 'binary:logistic'
elif ml_task == MULTICLASS_CLASSIFICATION:
objective = 'multi:softprob'
else: # ml_task == REGRESSION
objective = 'reg:squarederror'
return objective
def get_objective(self,
X_train,
y_train,
X_val=None,
y_val=None,
**kwargs):
def objective(trial):
obj = self.xgboost_objective(self.task)
param = {
'verbosity':
0,
'objective':
obj,
'use_label_encoder':
False,
'booster':
trial.suggest_categorical('booster',
['gbtree', 'gblinear', 'dart']),
'learning_rate':
trial.suggest_float('learning_rate', 0.01, 0.3),
'max_depth':
trial.suggest_int('max_depth', 2, 32, step=1),
'n_estimators':
trial.suggest_int('n_estimators', 100, 1000, step=100),
'subsample':
trial.suggest_float('subsample', 0.2, 1.0),
'colsample_bytree':
trial.suggest_float('colsample_bytree', 0.2, 1.0),
'lambda':
trial.suggest_float('lambda', 1e-8, 1.0, log=True),
'alpha':
trial.suggest_float('alpha', 1e-8, 1.0, log=True),
}
if param['booster'] == 'gbtree' or param['booster'] == 'dart':
param['max_depth'] = trial.suggest_int('max_depth', 1, 9)
param['eta'] = trial.suggest_float('eta', 1e-8, 1.0, log=True)
param['min_child_weight'] = trial.suggest_int(
'min_child_weight', 2, 10)
param['gamma'] = trial.suggest_float(
'gamma', 1e-8, 1.0, log=True)
param['grow_policy'] = trial.suggest_categorical(
'grow_policy', ['depthwise', 'lossguide'])
if param['booster'] == 'dart':
param['sample_type'] = trial.suggest_categorical(
'sample_type', ['uniform', 'weighted'])
param['normalize_type'] = trial.suggest_categorical(
'normalize_type', ['tree', 'forest'])
param['rate_drop'] = trial.suggest_float(
'rate_drop', 1e-8, 1.0, log=True)
param['skip_drop'] = trial.suggest_float(
'skip_drop', 1e-8, 1.0, log=True)
model = self.estimator(**param).fit(X_train, y_train)
preds = model.predict(X_val)
score_fn = self.get_score_fn(self.task, self.metric)
score = score_fn(y_val, preds)
return score
return objective
def _validate_fit_data(self, train_data, tuning_data=None):
if not isinstance(train_data, pd.DataFrame):
raise AssertionError(
f'train_data is required to be a pandas DataFrame, but was instead: {type(train_data)}'
)
if len(set(tuning_data.columns)) < len(train_data.columns):
raise ValueError(
"Column names are not unique, please change duplicated column names (in pandas: train_data.rename(columns={'current_name':'new_name'})"
)
if tuning_data is not None:
if not isinstance(tuning_data, pd.DataFrame):
raise AssertionError(
f'tuning_data is required to be a pandas DataFrame, but was instead: {type(tuning_data)}'
)
train_features = train_data.columns
tuning_features = tuning_data.columns
train_features = np.array(train_features)
tuning_features = np.array(tuning_features)
if np.any(train_features != tuning_features):
raise ValueError(
'Column names must match between training and tuning data')
return train_data, tuning_data
if __name__ == '__main__':
import sklearn.datasets
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
X, y = sklearn.datasets.load_iris(return_X_y=True, as_frame=True)
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
rf = XGBoostOptuna(task='multiclass_classification')
X_train, X_val, y_train, y_val = train_test_split(X, y)
rf.fit(X_train, y_train, X_val=None, y_val=None, max_evals=10)
preds = rf.predict(X_val)
acc = accuracy_score(y_val, preds)
print(acc)
```
#### File: autofe/tabular_embedding/tab-transformer.py
```python
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum, nn
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
# attention
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, mult=4, dropout=0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2), GEGLU(), nn.Dropout(dropout),
nn.Linear(dim * mult, dim))
def forward(self, x, **kwargs):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads=8, dim_head=16, dropout=0.):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.scale = dim_head**-0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
h = self.heads
q, k, v = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h),
(q, k, v))
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = sim.softmax(dim=-1)
attn = self.dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)', h=h)
return self.to_out(out)
# transformer
class Transformer(nn.Module):
def __init__(self, num_tokens, dim, depth, heads, dim_head, attn_dropout,
ff_dropout):
super().__init__()
self.embeds = nn.Embedding(num_tokens, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(
nn.ModuleList([
Residual(
PreNorm(
dim,
Attention(
dim,
heads=heads,
dim_head=dim_head,
dropout=attn_dropout))),
Residual(
PreNorm(dim, FeedForward(dim, dropout=ff_dropout))),
]))
def forward(self, x):
x = self.embeds(x)
for attn, ff in self.layers:
x = attn(x)
x = ff(x)
return x
# mlp
class MLP(nn.Module):
def __init__(self, dims, act=None):
super().__init__()
dims_pairs = list(zip(dims[:-1], dims[1:]))
layers = []
for ind, (dim_in, dim_out) in enumerate(dims_pairs):
is_last = ind >= (len(dims_pairs) - 1)
linear = nn.Linear(dim_in, dim_out)
layers.append(linear)
if is_last:
continue
act = default(act, nn.ReLU())
layers.append(act)
self.mlp = nn.Sequential(*layers)
def forward(self, x):
return self.mlp(x)
# main class
class TabTransformer(nn.Module):
def __init__(self,
*,
categories,
num_continuous,
dim,
depth,
heads,
dim_head=16,
dim_out=1,
mlp_hidden_mults=(4, 2),
mlp_act=None,
num_special_tokens=2,
continuous_mean_std=None,
attn_dropout=0.,
ff_dropout=0.):
super().__init__()
assert all(map(lambda n: n > 0,
categories)), 'number of each category must be positive'
# categories related calculations
self.num_categories = len(categories)
self.num_unique_categories = sum(categories)
# create category embeddings table
self.num_special_tokens = num_special_tokens
total_tokens = self.num_unique_categories + num_special_tokens
# for automatically offsetting unique category ids to the correct position in the categories embedding table
categories_offset = F.pad(
torch.tensor(list(categories)), (1, 0), value=num_special_tokens)
categories_offset = categories_offset.cumsum(dim=-1)[:-1]
self.register_buffer('categories_offset', categories_offset)
# continuous
if exists(continuous_mean_std):
assert continuous_mean_std.shape == (
num_continuous, 2
), f'continuous_mean_std must have a shape of ({num_continuous}, 2) where the last dimension contains the mean and variance respectively'
self.register_buffer('continuous_mean_std', continuous_mean_std)
self.norm = nn.LayerNorm(num_continuous)
self.num_continuous = num_continuous
# transformer
self.transformer = Transformer(
num_tokens=total_tokens,
dim=dim,
depth=depth,
heads=heads,
dim_head=dim_head,
attn_dropout=attn_dropout,
ff_dropout=ff_dropout)
# mlp to logits
input_size = (dim * self.num_categories) + num_continuous
out_dim = input_size // 8
hidden_dimensions = list(map(lambda t: out_dim * t, mlp_hidden_mults))
all_dimensions = [input_size, *hidden_dimensions, dim_out]
self.mlp = MLP(all_dimensions, act=mlp_act)
def forward(self, x_categ, x_cont):
assert x_categ.shape[
-1] == self.num_categories, f'you must pass in {self.num_categories} values for your categories input'
x_categ += self.categories_offset
x = self.transformer(x_categ)
flat_categ = x.flatten(1)
assert x_cont.shape[
1] == self.num_continuous, f'you must pass in {self.num_continuous} values for your continuous input'
if exists(self.continuous_mean_std):
mean, std = self.continuous_mean_std.unbind(dim=-1)
x_cont = (x_cont - mean) / std
normed_cont = self.norm(x_cont)
x = torch.cat((flat_categ, normed_cont), dim=-1)
return self.mlp(x)
```
#### File: autofe/tabular_embedding/tabular_embedding_transformer.py
```python
from pathlib import Path
import pandas as pd
import wget
from autofe.tabular_embedding.widedeep_embedding import WideDeepEmbeddingModel, WideDeepEmbeddingModelConfig
from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig
from pytorch_tabular.feature_extractor import DeepFeatureExtractor
from pytorch_tabular.models.category_embedding.config import CategoryEmbeddingModelConfig
from pytorch_tabular.models.tab_transformer.config import TabTransformerConfig
from pytorch_tabular.tabular_model import TabularModel
from sklearn.model_selection import train_test_split
class TabularEmbeddingTransformer():
def __init__(self,
cat_col_names=None,
num_col_names=None,
date_col_names=None,
target_name=None,
num_classes=None,
model_name='category'):
self.cat_col_names = cat_col_names
self.num_col_names = num_col_names
self.date_col_names = date_col_names
self.target_name = target_name
self.model_name = model_name
data_config = DataConfig(
target=target_name,
continuous_cols=num_col_names,
categorical_cols=cat_col_names,
date_columns=date_col_names,
continuous_feature_transform='quantile_normal',
normalize_continuous_features=True,
)
category_model_config = CategoryEmbeddingModelConfig(
task='classification',
learning_rate=1e-3,
metrics=['f1', 'accuracy'],
metrics_params=[{
'num_classes': num_classes
}, {}])
tabtransformer_model_config = TabTransformerConfig(
task='classification',
metrics=['f1', 'accuracy'],
share_embedding=True,
share_embedding_strategy='add',
shared_embedding_fraction=0.25,
metrics_params=[{
'num_classes': num_classes,
'average': 'macro'
}, {}],
)
widedeep_model_config = WideDeepEmbeddingModelConfig(
task='classification',
learning_rate=1e-3,
metrics=['f1', 'accuracy'],
metrics_params=[{
'num_classes': num_classes
}, {}])
trainer_config = TrainerConfig(
gpus=0,
auto_select_gpus=False,
auto_lr_find=True,
max_epochs=1,
batch_size=1024)
optimizer_config = OptimizerConfig()
if self.model_name == 'category':
self.tabular_model = TabularModel(
data_config=data_config,
model_config=category_model_config,
optimizer_config=optimizer_config,
trainer_config=trainer_config)
elif self.model_name == 'tabtransformer':
self.tabular_model = TabularModel(
data_config=data_config,
model_config=tabtransformer_model_config,
optimizer_config=optimizer_config,
trainer_config=trainer_config)
elif self.model_name == 'widedeep':
self.tabular_model = TabularModel(
data_config=data_config,
model_config=widedeep_model_config,
optimizer_config=optimizer_config,
trainer_config=trainer_config,
model_callable=WideDeepEmbeddingModel)
else:
raise NotImplementedError
def fit(self, train_data, validation=None):
"""Just for compatibility.
Does not do anything
"""
self.tabular_model.fit(train=train_data, validation=validation)
self.transformerMoldel = DeepFeatureExtractor(
self.tabular_model, drop_original=False)
return self
def transform(self, train_data):
encoded_data = self.transformerMoldel.transform(train_data)
return encoded_data
def fit_transform(self, train_data):
"""Encode given columns of X based on the learned features.
Args:
X (pd.DataFrame): DataFrame of features, shape (n_samples, n_features). Must contain columns to encode.
y ([type], optional): Only for compatibility. Not used. Defaults to None.
Returns:
pd.DataFrame: The encoded dataframe
"""
self.fit(train_data)
return self.transform(train_data)
if __name__ == '__main__':
BASE_DIR = Path.home().joinpath('data')
datafile = BASE_DIR.joinpath('covtype.data.gz')
datafile.parent.mkdir(parents=True, exist_ok=True)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz'
if not datafile.exists():
wget.download(url, datafile.as_posix())
target_name = ['Covertype']
cat_col_names = [
'Wilderness_Area1', 'Wilderness_Area2', 'Wilderness_Area3',
'Wilderness_Area4', 'Soil_Type1', 'Soil_Type2', 'Soil_Type3',
'Soil_Type4', 'Soil_Type5', 'Soil_Type6', 'Soil_Type7', 'Soil_Type8',
'Soil_Type9', 'Soil_Type10', 'Soil_Type11', 'Soil_Type12',
'Soil_Type13', 'Soil_Type14', 'Soil_Type15', 'Soil_Type16',
'Soil_Type17', 'Soil_Type18', 'Soil_Type19', 'Soil_Type20',
'Soil_Type21', 'Soil_Type22', 'Soil_Type23', 'Soil_Type24',
'Soil_Type25', 'Soil_Type26', 'Soil_Type27', 'Soil_Type28',
'Soil_Type29', 'Soil_Type30', 'Soil_Type31', 'Soil_Type32',
'Soil_Type33', 'Soil_Type34', 'Soil_Type35', 'Soil_Type36',
'Soil_Type37', 'Soil_Type38', 'Soil_Type39', 'Soil_Type40'
]
num_col_names = [
'Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology',
'Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways',
'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm',
'Horizontal_Distance_To_Fire_Points'
]
date_col_names = []
feature_columns = (
num_col_names + cat_col_names + date_col_names + target_name)
df = pd.read_csv(datafile, header=None, names=feature_columns)
train, test = train_test_split(df, random_state=42)
train, val = train_test_split(train, random_state=42)
num_classes = len(set(train[target_name].values.ravel()))
# data_config = DataConfig(
# target=target_name,
# continuous_cols=num_col_names,
# categorical_cols=cat_col_names,
# continuous_feature_transform=None, #"quantile_normal",
# normalize_continuous_features=False,
# )
# model_config = CategoryEmbeddingModelConfig(
# task='classification',
# metrics=['f1', 'accuracy'],
# metrics_params=[{
# 'num_classes': num_classes
# }, {}])
# model_config = NodeConfig(
# task='classification',
# depth=4,
# num_trees=1024,
# input_dropout=0.0,
# metrics=['f1', 'accuracy'],
# metrics_params=[{
# 'num_classes': num_classes,
# 'average': 'macro'
# }, {}],
# )
# model_config = TabTransformerConfig(
# task='classification',
# metrics=['f1', 'accuracy'],
# share_embedding=True,
# share_embedding_strategy='add',
# shared_embedding_fraction=0.25,
# metrics_params=[{
# 'num_classes': num_classes,
# 'average': 'macro'
# }, {}],
# )
# trainer_config = TrainerConfig(
# gpus=-1,
# auto_select_gpus=True,
# fast_dev_run=True,
# max_epochs=5,
# batch_size=512)
# experiment_config = ExperimentConfig(
# project_name='PyTorch Tabular Example',
# run_name='node_forest_cov',
# exp_watch='gradients',
# log_target='wandb',
# log_logits=True)
# optimizer_config = OptimizerConfig()
# tabular_model = TabularModel(
# data_config=data_config,
# model_config=model_config,
# optimizer_config=optimizer_config,
# trainer_config=trainer_config,
# experiment_config=experiment_config,
# )
# sampler = get_balanced_sampler(train[target_name].values.ravel())
# tabular_model.fit(train=train, validation=val, train_sampler=sampler)
transformer = TabularEmbeddingTransformer(
cat_col_names=cat_col_names,
num_col_names=num_col_names,
date_col_names=[],
target_name=target_name,
num_classes=num_classes)
train_transform = transformer.fit_transform(df)
print(train_transform)
```
#### File: algorithms/anomaly/meta_devnet.py
```python
import argparse
import os
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.model_selection import StratifiedShuffleSplit
from torch import optim
class DevNet(nn.Module):
"""docstring for ClassName."""
def __init__(self,
feature_dim,
num_class,
ae_hidden_neurons=[512, 256, 128],
cl_hidden_neurons=[64, 32, 10],
drop_rate=0.2,
batch_norm=True,
hidden_activation='relu',
output_activation='sigmoid'):
super(DevNet, self).__init__()
self.feature_dim = feature_dim
self.num_class = num_class
self.layers_neurons_encoder = [self.feature_dim, *ae_hidden_neurons]
self.layers_neurons_decoder = self.layers_neurons_encoder[::-1]
self.cl_hidden_neurons = [ae_hidden_neurons[-1], *cl_hidden_neurons, 1]
self.drop_rate = drop_rate
self.batch_norm = batch_norm
self.hidden_activation = nn.ReLU()
self.output_activation = nn.Sigmoid()
self.encoder = nn.Sequential()
self.decoder = nn.Sequential()
self.classifier = nn.Sequential()
# create encoder model
for idx, layer in enumerate(self.layers_neurons_encoder[:-1]):
self.encoder.add_module(
'linear' + str(idx),
nn.Linear(self.layers_neurons_encoder[idx],
self.layers_neurons_encoder[idx + 1]))
self.encoder.add_module(
'batch_norm' + str(idx),
nn.BatchNorm1d(self.layers_neurons_encoder[idx + 1]))
self.encoder.add_module('dropout' + str(idx),
nn.Dropout(self.drop_rate))
self.encoder.add_module(hidden_activation + str(idx),
self.hidden_activation)
# create decoder model
for idx, layer in enumerate(self.layers_neurons_decoder[:-1]):
self.decoder.add_module(
'linear' + str(idx),
nn.Linear(self.layers_neurons_decoder[idx],
self.layers_neurons_decoder[idx + 1]))
self.decoder.add_module(
'batch_norm' + str(idx),
nn.BatchNorm1d(self.layers_neurons_decoder[idx + 1]))
self.decoder.add_module('dropout' + str(idx),
nn.Dropout(self.drop_rate))
if idx == len(self.layers_neurons_decoder) - 2:
self.decoder.add_module(output_activation + str(idx),
self.output_activation)
else:
self.decoder.add_module(hidden_activation + str(idx),
self.hidden_activation)
# create classifier
for idx, layer in enumerate(self.cl_hidden_neurons[:-2]):
self.classifier.add_module(
'linear' + str(idx),
nn.Linear(self.cl_hidden_neurons[idx],
self.cl_hidden_neurons[idx + 1]))
self.classifier.add_module(
'batch_norm' + str(idx),
nn.BatchNorm1d(self.cl_hidden_neurons[idx + 1]))
self.classifier.add_module('dropout' + str(idx),
nn.Dropout(self.drop_rate))
self.classifier.add_module(hidden_activation + str(idx),
self.hidden_activation)
idx += 1
self.classifier.add_module(
'linear' + str(idx),
nn.Linear(self.cl_hidden_neurons[idx],
self.cl_hidden_neurons[idx + 1]))
def forward(self, x):
feature_vector = self.encoder(x)
ae_output = self.decoder(feature_vector)
cls_output = self.classifier(feature_vector)
return ae_output, cls_output
class Task(object):
def __init__(self, data_name, data_path, label_name, saved=False):
super(Task, self).__init__()
self.data_name = data_name
self.data_path = data_path
self.label_name = label_name
self.saved = saved
def data_split(self):
"""split dataset to train set and test set."""
data_all = pd.read_csv(self.data_path)
data_all_x = data_all.drop(self.label_name, axis=1)
data_all_y = data_all[self.label_name]
sss = StratifiedShuffleSplit(
n_splits=1, test_size=0.2, train_size=0.8, random_state=2021)
for train_index, test_index in sss.split(data_all_x, data_all_y):
x_train, x_test = data_all_x.iloc[train_index], data_all_x.iloc[
test_index]
y_train, y_test = data_all_y.iloc[train_index], data_all_y.iloc[
test_index]
data_train = pd.concat([x_train, y_train], axis=1)
data_test = pd.concat([x_test, y_test], axis=1)
if self.saved:
to_path = os.path.join(
os.path.dirname(self.data_path), self.data_name)
if not os.path.exists(to_path):
os.makedirs(to_path)
data_train.to_csv(os.path.join(to_path, 'train.csv'))
data_test.to_csv(os.path.join(to_path, 'test.csv'))
return data_train, data_test
def generator(self,
data_train,
n_task,
n_support,
n_query,
unbalanced_rate=0.5):
"""generate meta_train task contain support set and query set."""
n_samples = n_support + n_query
normal_set = data_train[data_train[self.label_name] == 0].reset_index(
drop=True)
anomaly_set = data_train[data_train[self.label_name] == 1].reset_index(
drop=True)
num_normal = int(n_samples * (1 - unbalanced_rate))
num_anomaly = n_samples - num_normal
b_x_spt = []
b_y_spt = []
b_x_qry = []
b_y_qry = []
for i in range(n_task):
sample_normal_index = random.sample(
range(normal_set.shape[0]), num_normal)
sample_anomaly_index = random.sample(
range(anomaly_set.shape[0]), num_anomaly)
sampled_normal_set = normal_set.iloc[sample_normal_index]
sampled_anomaly_set = anomaly_set.iloc[sample_anomaly_index]
sampled_set = pd.concat([sampled_anomaly_set, sampled_normal_set],
axis=0)
sampled_set_x = sampled_set.drop(self.label_name, axis=1)
sampled_set_y = sampled_set[self.label_name]
test_rate = n_query / n_samples
train_rate = 1 - test_rate
sss = StratifiedShuffleSplit(
n_splits=1,
test_size=test_rate,
train_size=train_rate,
random_state=2021)
for train_index, test_index in sss.split(sampled_set_x,
sampled_set_y):
x_train, x_test = sampled_set_x.iloc[
train_index], sampled_set_x.iloc[test_index]
y_train, y_test = sampled_set_y.iloc[
train_index], sampled_set_y.iloc[test_index]
x_train = np.array(x_train)
x_test = np.array(x_test)
y_train = np.array(y_train).reshape(-1, 1)
y_test = np.array(y_test).reshape(-1, 1)
b_x_spt.append(x_train)
b_y_spt.append(y_train)
b_x_qry.append(x_test)
b_y_qry.append(y_test)
return np.array(b_x_spt), np.array(b_y_spt), np.array(
b_x_qry), np.array(b_y_qry)
class MetaLoss(nn.Module):
"""z-score-based deviation loss."""
def __init__(self, confidence_margin=5, alpha=0.5):
super(MetaLoss, self).__init__()
self.confidence_margin = confidence_margin
self.alpha = alpha
def forward(self, x, x_rec, y_pred, y_true):
ref = torch.randn(1, 5000).cuda()
dev = (y_pred - torch.mean(ref, dim=1)) / torch.std(ref, dim=1)
inlier_loss = torch.abs(dev)
outlier_loss = torch.abs(
torch.max(self.confidence_margin - dev,
torch.zeros(1).cuda()))
cls_loss = (1 - y_true) * inlier_loss + y_true * outlier_loss
cls_loss = torch.sum(cls_loss, dim=0)
mseloss = nn.MSELoss()
ae_loss = mseloss(x, x_rec)
loss = self.alpha * ae_loss + (1 - self.alpha) * cls_loss
return loss
class Meta(nn.Module):
"""Meta Learner."""
def __init__(self, args):
super(Meta, self).__init__()
"""
:param args:
"""
self.featrue_dim = args.feature_dim
self.num_class = args.num_class
self.alpha = args.alpha
self.alpha = torch.Tensor(np.array(self.alpha)).cuda()
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.task_num = args.task_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.net = DevNet(self.featrue_dim, self.num_class)
self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)
def assign_network_value(self, vars):
idx = 0
for name, param in self.net.named_parameters():
print(name, param)
print(vars[idx])
param.copy_(vars[idx])
idx += 1
assert idx == len(vars)
def forward(self, x_spt, y_spt, x_qry, y_qry):
"""
:param x_spt: [b, setsz, feature_dim]
:param y_spt: [b, setsz]
:param x_qry: [b, querysz, feature_dim]
:param y_qry: [b, querysz]
:return:
"""
task_num = x_spt.size(0)
querysz = x_qry.size(1)
losses_q = [0 for _ in range(self.update_step + 1)
] # losses_q[i] is the loss on step i
corrects = [0 for _ in range(self.update_step + 1)]
device = torch.device('cuda')
# self.update_lr = Variable(torch.Tensor(np.array(self.update_lr)), requires_grad=False)
for i in range(self.task_num):
# 1. run the i-th task and compute loss for k=0
x_rec, y_pred = self.net(x_spt[i])
model_loss = MetaLoss().to(device)
loss = model_loss(x_spt[i], x_rec, y_pred, y_spt[i])
self.net.parameters = nn.ParameterList(self.net.parameters())
# copy_net_parameters = copy.deepcopy(self.net.parameters)
# for name, param in self.net.named_parameters():
# print(name, param)
grad = torch.autograd.grad(loss, self.net.parameters())
fast_weights = list(
map(lambda p: p[1] - self.update_lr * p[0],
zip(grad, self.net.parameters())))
print(len(fast_weights))
# this is the loss and accuracy before first update
with torch.no_grad():
# [setsz, nway]
x_rec_q, y_pred_q = self.net(x_qry[i])
# model_loss = MetaLoss().to(device)
loss_q = model_loss(x_qry[i], x_rec_q, y_pred_q, y_qry[i])
print(f'loss_q: {loss_q}')
losses_q[0] += loss_q
pred_q = F.softmax(y_pred_q, dim=1).argmax(dim=1).unsqueeze(1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[0] = corrects[0] + correct
# this is the loss and accuracy after the first update
with torch.no_grad():
# [setsz, nway]
self.assign_network_value(fast_weights)
x_rec_q, y_pred_q = self.net(x_qry[i])
# model_loss = MetaLoss().to(device)
loss_q = model_loss(x_qry[i], x_rec_q, y_pred_q, y_qry[i])
print(f'loss_q: {loss_q}')
losses_q[1] += loss_q
# [setsz]
pred_q = F.softmax(y_pred_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i]).sum().item()
corrects[1] = corrects[1] + correct
for k in range(1, self.update_step):
# 1. run the i-th task and compute loss for k=1~K-1
x_rec, y_pred = self.net(x_spt[i])
# model_loss = MetaLoss().to(device)
loss = model_loss(x_spt[i], x_rec, y_pred, y_spt[i])
print(f'loss: {loss}')
# 2. compute grad on theta_pi
grad = torch.autograd.grad(loss, self.net.parameters())
# 3. theta_pi = theta_pi - train_lr * grad
fast_weights = list(
map(lambda p: p[1] - self.update_lr * p[0],
zip(grad, self.net.parameters())))
print(len(fast_weights))
self.assign_network_value(fast_weights)
x_rec_q, y_pred_q = self.net(x_qry[i])
# loss_q will be overwritten and just keep the loss_q on last update step.
loss_q = model_loss(x_qry[i], x_rec_q, y_pred_q, y_qry[i])
losses_q[k + 1] += loss_q
with torch.no_grad():
pred_q = F.softmax(y_pred_q, dim=1).argmax(dim=1)
correct = torch.eq(
pred_q, y_qry[i]).sum().item() # convert to numpy
corrects[k + 1] = corrects[k + 1] + correct
# end of all tasks
# sum over all losses on query set across all tasks
loss_q = losses_q[-1] / task_num
# optimize theta parameters
self.meta_optim.zero_grad()
loss_q.backward()
# print('meta update')
# for p in self.net.parameters()[:5]:
# print(torch.norm(p).item())
self.meta_optim.step()
accs = np.array(corrects) / (querysz * task_num)
return accs
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=2)
argparser.add_argument('--num_class', type=int, help='n class', default=2)
argparser.add_argument(
'--feature_dim', type=int, help='number of features', default=10)
argparser.add_argument(
'--task_num',
type=int,
help='meta batch size, namely task num',
default=32)
argparser.add_argument(
'--alpha',
type=float,
help='factor of two different loss',
default=0.5)
argparser.add_argument(
'--meta_lr',
type=float,
help='meta-level outer learning rate',
default=1e-3)
argparser.add_argument(
'--update_lr',
type=float,
help='task-level inner update learning rate',
default=0.4)
argparser.add_argument(
'--update_step',
type=int,
help='task-level inner update steps',
default=5)
argparser.add_argument(
'--update_step_test',
type=int,
help='update steps for finetunning',
default=10)
args = argparser.parse_args()
data_name = 'KDD2014'
data_path = '/home/wenqi.ao/workdir/anomaly_detection/deviation_network \
/dataset/KDD2014_donors_10feat_nomissing_normalised.csv'
label_name = 'class'
task_generator = Task(data_name, data_path, label_name)
train_set, test_set = task_generator.data_split()
device = torch.device('cuda')
maml = Meta(args).to(device)
for epoch in range(args.epoch):
x_spt, y_spt, x_qry, y_qry = task_generator.generator(
train_set, 10, 20, 10)
x_spt, y_spt, x_qry, y_qry = (
torch.from_numpy(x_spt).float().to(device),
torch.from_numpy(y_spt).float().to(device),
torch.from_numpy(x_qry).float().to(device),
torch.from_numpy(y_qry).float().to(device))
accs = maml(x_spt, y_spt, x_qry, y_qry)
print(f'Acc: {accs}')
# net = DevNet(feature_dim = 10, num_class = 2)
# print(len(nn.ParameterList(net.parameters())))
# for name, param in net.named_parameters():
# print(name, param)
```
#### File: algorithms/anomaly/rod.py
```python
from __future__ import division, print_function
import multiprocessing
from itertools import combinations as com
from multiprocessing import Pool
import numba
import numpy as np
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.utils import check_array
from .base import BaseDetector
@numba.njit
def mad(costs, median=None):
"""Apply the robust median absolute deviation (MAD) to measure the
inconsistency/variability of the rotation costs.
Parameters
----------
costs : list of rotation costs
median: float (default=None), MAD median
Returns
-------
z : float
the modified z scores
"""
costs_ = np.reshape(costs, (-1, 1))
median = np.nanmedian(costs_) if median is None else median
diff = np.abs(costs_ - median)
return np.ravel(0.6745 * diff / np.median(diff)), median
def angle(v1, v2):
"""find the angle between two 3D vectors.
Parameters
----------
v1 : list, first vector
v2 : list, second vector
Returns
-------
angle : float, the angle
"""
return np.arccos(
np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
def geometric_median(x, eps=1e-5):
"""Find the multivariate geometric L1-median by applying Vardi and Zhang
algorithm.
Parameters
----------
x : array-like, the data points
eps: float (default=1e-5), a threshold to indicate when to stop
Returns
-------
gm : array, Geometric L1-median
"""
points = np.unique(x, axis=0)
gm_ = np.mean(points, 0) # initialize geometric median
while True:
D = euclidean(points, gm_, c=True)
non_zeros = (D != 0)[:, 0]
Dinv = 1 / D[non_zeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * points[non_zeros], 0)
num_zeros = len(points) - np.sum(non_zeros)
if num_zeros == 0:
gm1 = T
elif num_zeros == len(points):
return gm_
else:
R = (T - gm_) * Dinvs
r = np.linalg.norm(R)
r_inv = 0 if r == 0 else num_zeros / r
gm1 = max(0, 1 - r_inv) * T + min(1, r_inv) * gm_
if euclidean(gm_, gm1) < eps:
return gm1
gm_ = gm1
def scale_angles(gammas, scaler1=None, scaler2=None):
"""Scale all angles in which angles <= 90.
degree will be scaled within [0 - 54.7] and
angles > 90 will be scaled within [90 - 126]
Parameters
----------
gammas : list, angles
scaler1: obj (default=None), MinMaxScaler of Angles group 1
scaler2: obj (default=None), MinMaxScaler of Angles group 2
Returns
-------
scaled angles, scaler1, scaler2
"""
first, second = [], []
first_ind, second_ind = [], []
q1 = np.pi / 2.
for i, g in enumerate(gammas):
if g <= q1:
first.append(g)
first_ind.append(i)
else:
second.append(g)
second_ind.append(i)
if scaler1 is None: # this indicates the `fit()`
min_f, max_f = 0.001, 0.955
scaler1 = MinMaxScaler(feature_range=(min_f, max_f))
# min_f and max_f are required to be fit by scaler for consistency between train and test sets
scaler1.fit(np.array(first + [min_f, max_f]).reshape(-1, 1))
first = scaler1.transform(np.array(first).reshape(
-1, 1)).reshape(-1) if first else []
else:
first = scaler1.transform(np.array(first).reshape(
-1, 1)).reshape(-1) if first else []
if scaler2 is None: # this indicates the `fit()`
min_s, max_s = q1 + 0.001, 2.186
scaler2 = MinMaxScaler(feature_range=(min_s, max_s))
# min_s and max_s are required to be fit by scaler for consistency between train and test sets
scaler2.fit(np.array(second + [min_s, max_s]).reshape(-1, 1))
second = scaler2.transform(np.array(second).reshape(
-1, 1)).reshape(-1) if second else []
else:
second = scaler2.transform(np.array(second).reshape(
-1, 1)).reshape(-1) if second else []
# restore original order
return np.concatenate(
[first, second])[np.argsort(first_ind + second_ind)], scaler1, scaler2
def euclidean(v1, v2, c=False):
"""Find the euclidean distance between two vectors or between a vector and
a collection of vectors.
Parameters
----------
v1 : list, first 3D vector or collection of vectors
v2 : list, second 3D vector
c : bool (default=False), if True, it means the v1 is a list of vectors.
Returns
-------
list of list of euclidean distances if c==True.
Otherwise float: the euclidean distance
"""
if c:
res = []
for _v in v1:
res.append([
np.sqrt((_v[0] - v2[0])**2 + (_v[1] - v2[1])**2 +
(_v[2] - v2[2])**2)
])
return np.array(res, copy=False)
return np.sqrt((v1[0] - v2[0])**2 + (v1[1] - v2[1])**2 +
(v1[2] - v2[2])**2)
def rod_3D(x, gm=None, median=None, scaler1=None, scaler2=None):
"""Find ROD scores for 3D Data. note that gm, scaler1 and scaler2 will be
returned "as they are" and without being changed if the model has been fit
already.
Parameters
----------
x : array-like, 3D data points.
gm: list (default=None), the geometric median
median: float (default=None), MAD median
scaler1: obj (default=None), MinMaxScaler of Angles group 1
scaler2: obj (default=None), MinMaxScaler of Angles group 2
Returns
-------
decision_scores, gm, scaler1, scaler2
"""
# find the geometric median if it is not already fit
gm = geometric_median(x) if gm is None else gm
# find its norm and center data around it
norm_ = np.linalg.norm(gm)
_x = x - gm
# calculate the scaled angles between the geometric median and each data point vector
v_norm = np.linalg.norm(_x, axis=1)
gammas, scaler1, scaler2 = scale_angles(
np.arccos(np.clip(np.dot(_x, gm) / (v_norm * norm_), -1, 1)),
scaler1=scaler1,
scaler2=scaler2)
# apply the ROD main equation to find the rotation costs
costs = np.power(v_norm, 3) * np.cos(gammas) * np.square(np.sin(gammas))
# apply MAD to calculate the decision scores
decision_scores, median = mad(costs, median=median)
return decision_scores, list(gm), median, scaler1, scaler2
@numba.njit
def sigmoid(x):
"""Implementation of Sigmoid function.
Parameters
----------
x : array-like, decision scores
Returns
-------
array-like, x after applying sigmoid
"""
return 1 / (1 + np.exp(-x))
def process_sub(subspace, gm, median, scaler1, scaler2):
"""Apply ROD on a 3D subSpace then process it with sigmoid to compare
apples to apples.
Parameters
----------
subspace : array-like, 3D subspace of the data
gm: list, the geometric median
median: float, MAD median
scaler1: obj, MinMaxScaler of Angles group 1
scaler2: obj, MinMaxScaler of Angles group 2
Returns
-------
ROD decision scores with sigmoid applied, gm, scaler1, scaler2
"""
mad_subspace, gm, median, scaler1, scaler2 = rod_3D(
subspace, gm=gm, median=median, scaler1=scaler1, scaler2=scaler2)
return sigmoid(np.nan_to_num(
np.array(mad_subspace))), gm, median, scaler1, scaler2
def rod_nD(X,
parallel,
gm=None,
median=None,
data_scaler=None,
angles_scalers1=None,
angles_scalers2=None):
"""Find ROD overall scores when Data is higher than 3D:
# scale dataset using Robust Scaler
# decompose the full space into a combinations of 3D subspaces,
# Apply ROD on each combination,
# squish scores per subspace, so we compare apples to apples,
# calculate average of ROD scores of all subspaces per observation.
Note that if gm, data_scaler, angles_scalers1, angles_scalers2 are None,
that means it is a `fit()` process and they will be calculated and returned
to the class to be saved for future prediction. Otherwise, if they are not None,
then it is a prediction process.
Parameters
----------
X : array-like, data points
parallel: bool, True runs the algorithm in parallel
gm: list (default=None), the geometric median
median: list (default=None), MAD medians
data_scaler: obj (default=None), RobustScaler of data
angles_scalers1: list (default=None), MinMaxScalers of Angles group 1
angles_scalers2: list (default=None), MinMaxScalers of Angles group 2
Returns
-------
ROD decision scores, gm, median, data_scaler, angles_scalers1, angles_scalers2
"""
if data_scaler is None: # for fitting
data_scaler = RobustScaler()
X = data_scaler.fit_transform(X)
else: # for prediction
X = data_scaler.transform(X)
dim = X.shape[1]
all_subspaces = [X[:, _com] for _com in com(range(dim), 3)]
all_gms = [None] * len(all_subspaces) if gm is None else gm
all_meds = [None] * len(all_subspaces) if median is None else median
all_angles_scalers1 = [None] * len(
all_subspaces) if angles_scalers1 is None else angles_scalers1
all_angles_scalers2 = [None] * len(
all_subspaces) if angles_scalers2 is None else angles_scalers2
if parallel:
p = Pool(multiprocessing.cpu_count())
args = [[
a, b, c, d, e
] for a, b, c, d, e in zip(all_subspaces, all_gms, all_meds,
all_angles_scalers1, all_angles_scalers2)]
results = p.starmap(process_sub, args)
subspaces_scores, gm, median, angles_scalers1, angles_scalers2 = [], [], [], [], []
for res in results:
subspaces_scores.append(list(res[0]))
gm.append(res[1])
median.append(res[2])
angles_scalers1.append(res[3])
angles_scalers2.append(res[4])
scores = np.average(np.array(subspaces_scores).T, axis=1).reshape(-1)
p.close()
p.join()
return scores, gm, median, data_scaler, angles_scalers1, angles_scalers2
subspaces_scores, gm, median, angles_scalers1, angles_scalers2 = [], [], [], [], []
for subspace, _gm, med, ang_s1, ang_s2 in zip(all_subspaces, all_gms,
all_meds,
all_angles_scalers1,
all_angles_scalers2):
scores_, gm_, med_, ang_s1_, ang_s2_ = process_sub(
subspace=subspace,
gm=_gm,
median=med,
scaler1=ang_s1,
scaler2=ang_s2)
subspaces_scores.append(scores_)
gm.append(gm_)
median.append(med_)
angles_scalers1.append(ang_s1_)
angles_scalers2.append(ang_s2_)
scores = np.average(np.array(subspaces_scores).T, axis=1).reshape(-1)
return scores, gm, median, data_scaler, angles_scalers1, angles_scalers2
class ROD(BaseDetector):
"""Rotation-based Outlier Detection (ROD), is a robust and parameter-free
algorithm that requires no statistical distribution assumptions and works
intuitively in three-dimensional space, where the 3D-vectors, representing
the data points, are rotated about the geometric median two times
counterclockwise using Rodrigues rotation formula. The results of the
rotation are parallelepipeds where their volumes are mathematically
analyzed as cost functions and used to calculate the Median Absolute
Deviations to obtain the outlying score. For high dimensions > 3, the
overall score is calculated by taking the average of the overall
3D-subspaces scores, that were resulted from decomposing the original data
space. See :cite:`almardeny2020novel` for details.
Parameters
----------
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e.
the proportion of outliers in the data set. Used when fitting to
define the threshold on the decision function.
parallel_execution: bool, optional (default=False).
If set to True, the algorithm will run in parallel,
for a better execution time. It is recommended to set
this parameter to True ONLY for high dimensional data > 10,
and if a proper hardware is available.
Attributes
----------
decision_scores_ : numpy array of shape (n_samples,)
The outlier scores of the training data.
The higher, the more abnormal. Outliers tend to have higher
scores. This value is available once the detector is
fitted.
threshold_ : float
The threshold is based on ``contamination``. It is the
``n_samples * contamination`` most abnormal samples in
``decision_scores_``. The threshold is calculated for generating
binary outlier labels.
labels_ : int, either 0 or 1
The binary labels of the training data. 0 stands for inliers
and 1 for outliers/anomalies. It is generated by applying
``threshold_`` on ``decision_scores_``.
"""
def __init__(self, contamination=0.1, parallel_execution=False):
super(ROD, self).__init__(contamination=contamination)
if not isinstance(parallel_execution, bool):
raise TypeError('parallel_execution should be bool. '
'Got {}'.format(type(parallel_execution)))
self.parallel = parallel_execution
self.gm = None # geometric median(s)
self.median = None # MAD median(s)
self.data_scaler = None # data scaler (in case of d>3)
self.angles_scaler1 = None # scaler(s) of Angles Group 1
self.angles_scaler2 = None # scaler(s) of Angles Group 2
def fit(self, X, y=None):
"""Fit detector. y is ignored in unsupervised methods.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
X = check_array(X)
self._set_n_classes(y)
# reset learning parameters after each fit
self.gm = None
self.median = None
self.data_scaler = None
self.angles_scaler1 = None
self.angles_scaler2 = None
self.decision_scores_ = self.decision_function(X)
self._process_decision_scores()
return self
def decision_function(self, X):
"""Predict raw anomaly score of X using the fitted detector.
The anomaly score of an input sample is computed based on different
detector algorithms. For consistency, outliers are assigned with
larger anomaly scores.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only
if they are supported by the base estimator.
Returns
-------
anomaly_scores : numpy array of shape (n_samples,)
The anomaly score of the input samples.
"""
X = check_array(X)
if X.shape[1] < 3:
X = np.hstack((X, np.zeros(shape=(X.shape[0], 3 - X.shape[1]))))
if X.shape[1] == 3:
scores, self.gm, self.median, self.angles_scaler1, self.angles_scaler2 = rod_3D(
x=X,
gm=self.gm,
median=self.median,
scaler1=self.angles_scaler1,
scaler2=self.angles_scaler2)
return scores
scores, self.gm, self.median, self.data_scaler, \
self.angles_scaler1, self.angles_scaler2 = rod_nD(X=X,
parallel=self.parallel,
gm=self.gm,
median=self.median,
data_scaler=self.data_scaler,
angles_scalers1=self.angles_scaler1,
angles_scalers2=self.angles_scaler2)
return scores
```
#### File: algorithms/ctr/layer.py
```python
import numpy as np
import torch
import torch.nn.functional as F
class FeaturesLinear(torch.nn.Module):
def __init__(self, field_dims, output_dim=1):
super().__init__()
self.fc = torch.nn.Embedding(sum(field_dims), output_dim)
self.bias = torch.nn.Parameter(torch.zeros((output_dim, )))
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]),
dtype=np.long)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return torch.sum(self.fc(x), dim=1) + self.bias
class FeaturesEmbedding(torch.nn.Module):
def __init__(self, field_dims, embed_dim):
super().__init__()
self.embedding = torch.nn.Embedding(sum(field_dims), embed_dim)
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]),
dtype=np.long)
torch.nn.init.xavier_uniform_(self.embedding.weight.data)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return self.embedding(x)
class FieldAwareFactorizationMachine(torch.nn.Module):
def __init__(self, field_dims, embed_dim):
super().__init__()
self.num_fields = len(field_dims)
self.embeddings = torch.nn.ModuleList([
torch.nn.Embedding(sum(field_dims), embed_dim)
for _ in range(self.num_fields)
])
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]),
dtype=np.long)
for embedding in self.embeddings:
torch.nn.init.xavier_uniform_(embedding.weight.data)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
xs = [self.embeddings[i](x) for i in range(self.num_fields)]
ix = list()
for i in range(self.num_fields - 1):
for j in range(i + 1, self.num_fields):
ix.append(xs[j][:, i] * xs[i][:, j])
ix = torch.stack(ix, dim=1)
return ix
class FactorizationMachine(torch.nn.Module):
def __init__(self, reduce_sum=True):
super().__init__()
self.reduce_sum = reduce_sum
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
square_of_sum = torch.sum(x, dim=1)**2
sum_of_square = torch.sum(x**2, dim=1)
ix = square_of_sum - sum_of_square
if self.reduce_sum:
ix = torch.sum(ix, dim=1, keepdim=True)
return 0.5 * ix
class MultiLayerPerceptron(torch.nn.Module):
def __init__(self, input_dim, embed_dims, dropout, output_layer=True):
super().__init__()
layers = list()
for embed_dim in embed_dims:
layers.append(torch.nn.Linear(input_dim, embed_dim))
layers.append(torch.nn.BatchNorm1d(embed_dim))
layers.append(torch.nn.ReLU())
layers.append(torch.nn.Dropout(p=dropout))
input_dim = embed_dim
if output_layer:
layers.append(torch.nn.Linear(input_dim, 1))
self.mlp = torch.nn.Sequential(*layers)
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, embed_dim)``
"""
return self.mlp(x)
class InnerProductNetwork(torch.nn.Module):
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
num_fields = x.shape[1]
row, col = list(), list()
for i in range(num_fields - 1):
for j in range(i + 1, num_fields):
row.append(i), col.append(j)
return torch.sum(x[:, row] * x[:, col], dim=2)
class OuterProductNetwork(torch.nn.Module):
def __init__(self, num_fields, embed_dim, kernel_type='mat'):
super().__init__()
num_ix = num_fields * (num_fields - 1) // 2
if kernel_type == 'mat':
kernel_shape = embed_dim, num_ix, embed_dim
elif kernel_type == 'vec':
kernel_shape = num_ix, embed_dim
elif kernel_type == 'num':
kernel_shape = num_ix, 1
else:
raise ValueError('unknown kernel type: ' + kernel_type)
self.kernel_type = kernel_type
self.kernel = torch.nn.Parameter(torch.zeros(kernel_shape))
torch.nn.init.xavier_uniform_(self.kernel.data)
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
num_fields = x.shape[1]
row, col = list(), list()
for i in range(num_fields - 1):
for j in range(i + 1, num_fields):
row.append(i), col.append(j)
p, q = x[:, row], x[:, col]
if self.kernel_type == 'mat':
kp = torch.sum(
p.unsqueeze(1) * self.kernel, dim=-1).permute(0, 2, 1)
return torch.sum(kp * q, -1)
else:
return torch.sum(p * q * self.kernel.unsqueeze(0), -1)
class CrossNetwork(torch.nn.Module):
def __init__(self, input_dim, num_layers):
super().__init__()
self.num_layers = num_layers
self.w = torch.nn.ModuleList([
torch.nn.Linear(input_dim, 1, bias=False)
for _ in range(num_layers)
])
self.b = torch.nn.ParameterList([
torch.nn.Parameter(torch.zeros((input_dim, )))
for _ in range(num_layers)
])
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
x0 = x
for i in range(self.num_layers):
xw = self.w[i](x)
x = x0 * xw + self.b[i] + x
return x
class AttentionalFactorizationMachine(torch.nn.Module):
def __init__(self, embed_dim, attn_size, dropouts):
super().__init__()
self.attention = torch.nn.Linear(embed_dim, attn_size)
self.projection = torch.nn.Linear(attn_size, 1)
self.fc = torch.nn.Linear(embed_dim, 1)
self.dropouts = dropouts
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
num_fields = x.shape[1]
row, col = list(), list()
for i in range(num_fields - 1):
for j in range(i + 1, num_fields):
row.append(i), col.append(j)
p, q = x[:, row], x[:, col]
inner_product = p * q
attn_scores = F.relu(self.attention(inner_product))
attn_scores = F.softmax(self.projection(attn_scores), dim=1)
attn_scores = F.dropout(
attn_scores, p=self.dropouts[0], training=self.training)
attn_output = torch.sum(attn_scores * inner_product, dim=1)
attn_output = F.dropout(
attn_output, p=self.dropouts[1], training=self.training)
return self.fc(attn_output)
class CompressedInteractionNetwork(torch.nn.Module):
def __init__(self, input_dim, cross_layer_sizes, split_half=True):
super().__init__()
self.num_layers = len(cross_layer_sizes)
self.split_half = split_half
self.conv_layers = torch.nn.ModuleList()
prev_dim, fc_input_dim = input_dim, 0
for i in range(self.num_layers):
cross_layer_size = cross_layer_sizes[i]
self.conv_layers.append(
torch.nn.Conv1d(
input_dim * prev_dim,
cross_layer_size,
1,
stride=1,
dilation=1,
bias=True))
if self.split_half and i != self.num_layers - 1:
cross_layer_size //= 2
prev_dim = cross_layer_size
fc_input_dim += prev_dim
self.fc = torch.nn.Linear(fc_input_dim, 1)
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
xs = list()
x0, h = x.unsqueeze(2), x
for i in range(self.num_layers):
x = x0 * h.unsqueeze(1)
batch_size, f0_dim, fin_dim, embed_dim = x.shape
x = x.view(batch_size, f0_dim * fin_dim, embed_dim)
x = F.relu(self.conv_layers[i](x))
if self.split_half and i != self.num_layers - 1:
x, h = torch.split(x, x.shape[1] // 2, dim=1)
else:
h = x
xs.append(x)
return self.fc(torch.sum(torch.cat(xs, dim=1), 2))
class AnovaKernel(torch.nn.Module):
def __init__(self, order, reduce_sum=True):
super().__init__()
self.order = order
self.reduce_sum = reduce_sum
def forward(self, x):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
batch_size, num_fields, embed_dim = x.shape
a_prev = torch.ones((batch_size, num_fields + 1, embed_dim),
dtype=torch.float).to(x.device)
for t in range(self.order):
a = torch.zeros((batch_size, num_fields + 1, embed_dim),
dtype=torch.float).to(x.device)
a[:, t + 1:, :] += x[:, t:, :] * a_prev[:, t:-1, :]
a = torch.cumsum(a, dim=1)
a_prev = a
if self.reduce_sum:
return torch.sum(a[:, -1, :], dim=-1, keepdim=True)
else:
return a[:, -1, :]
```
#### File: metalearning/metalearning/create_datasets.py
```python
import itertools
import logging
import numpy as np
import pandas as pd
import scipy.stats
def create_regression_dataset(metafeatures, experiments):
X = []
X_indices = []
Y = []
for dataset_name in experiments:
experiment = experiments[dataset_name]
mf = metafeatures.loc[dataset_name]
for i, run in enumerate(experiment):
x1 = pd.Series(
data=[run.params[param] for param in run.params],
index=run.params.keys())
x2 = mf
X.append(x1.append(x2))
X_indices.append('%s_%d' % (dataset_name, i))
Y.append(run.result)
X = pd.DataFrame(X, index=X_indices)
Y = pd.DataFrame(Y, index=X_indices)
logging.info('X.shape %s', X.shape)
logging.info('Y.shape %s', Y.shape)
return X, Y
def create_predict_spearman_rank(metafeatures, experiments, iterator):
X = []
Y = []
Y_names = []
# Calculate the pairwise ranks between datasets
dataset_names = [name for name in metafeatures.index]
cross_product = []
if iterator == 'combination':
for cross in itertools.combinations_with_replacement(
dataset_names, r=2):
cross_product.append(cross)
elif iterator == 'permutation':
for cross in itertools.permutations(dataset_names, r=2):
cross_product.append(cross)
else:
raise NotImplementedError(iterator)
logging.info('Create spearman rank dataset without CV data and %s',
iterator)
logging.info('Using %d datasets', len(dataset_names))
logging.info('This will results in %d training points', len(cross_product))
# Create inputs and targets
for cross in cross_product:
name = '%s_%s' % (cross[0], cross[1])
mf_1 = metafeatures.loc[cross[0]]
mf_2 = metafeatures.loc[cross[1]]
assert mf_1.dtype == np.float64
assert mf_2.dtype == np.float64
x = np.hstack((mf_1, mf_2))
columns = metafeatures.columns.values
index = np.hstack(('0_' + columns, '1_' + columns))
x = pd.Series(data=x, name=name, index=index)
X.append(x)
experiments_1 = experiments[cross[0]]
experiments_2 = experiments[cross[1]]
assert len(experiments_1) == len(experiments_2), name
responses_1 = np.zeros((len(experiments_1)), dtype=np.float64)
responses_2 = np.zeros((len(experiments_1)), dtype=np.float64)
for idx, zipped in enumerate(
zip(
sorted(experiments_1, key=lambda t: str(t.configuration)),
sorted(experiments_2,
key=lambda t: str(t.configuration)))):
# Test if the order of the params is the same
exp_1, exp_2 = zipped
print(exp_1.configuration, exp_2.configuration)
assert exp_1.configuration == exp_2.configuration,\
(experiments_1, experiments_2)
responses_1[idx] = exp_1.result if np.isfinite(exp_1.result) else 1
responses_2[idx] = exp_2.result if np.isfinite(exp_2.result) else 1
rho, p = scipy.stats.spearmanr(responses_1, responses_2)
# rho, p = scipy.stats.kendalltau(responses_1, responses_2)
if not np.isfinite(rho):
rho = 0
Y.append(rho)
Y_names.append(name)
X = pd.DataFrame(X)
Y = pd.Series(Y, index=Y_names)
logging.info('Metafeatures %s', metafeatures.shape)
logging.info('X.shape %s', X.shape)
logging.info('Y.shape %s', Y.shape)
assert X.shape == (len(cross_product), metafeatures.shape[1] * 2), \
(X.shape, (len(cross), metafeatures.shape[1] * 2))
assert Y.shape == (len(cross_product), )
# train sklearn regressor (tree) with 10fold CV
indices = range(len(X))
np_rs = np.random.RandomState(42)
np_rs.shuffle(indices)
X = X.iloc[indices]
Y = Y.iloc[indices]
return X, Y
def create_predict_spearman_rank_with_cv(cv_metafeatures, cv_experiments,
iterator):
X = []
Y = []
Y_names = []
# Calculate the pairwise ranks between datasets
dataset_names = [name for name in cv_metafeatures]
cross_product = []
folds_product = []
if iterator == 'combination':
for cross in itertools.combinations_with_replacement(
dataset_names, r=2):
cross_product.append(cross)
for folds in itertools.combinations_with_replacement(range(10), r=2):
folds_product.append(folds)
elif iterator == 'permutation':
for cross in itertools.permutations(dataset_names, r=2):
cross_product.append(cross)
for folds in itertools.permutations(range(10), r=2):
folds_product.append(folds)
else:
raise NotImplementedError()
logging.info('Create spearman rank dataset with CV data %s', iterator)
logging.info('Using %d datasets', len(dataset_names))
logging.info('This will results in %d training points',
len(cross_product) * len(folds_product))
logging.info('Length of dataset crossproduct %s', len(cross_product))
logging.info('Length of folds crossproduct %s', len(folds_product))
# Create inputs and targets
for i, cross in enumerate(cross_product):
print('%d/%d: %s' % (i, len(cross_product), cross), )
for folds in folds_product:
name = '%s-%d_%s-%d' % (cross[0], folds[0], cross[1], folds[1])
mf_1 = cv_metafeatures[cross[0]][folds[0]]
mf_2 = cv_metafeatures[cross[1]][folds[1]]
assert mf_1.dtype == np.float64
assert mf_2.dtype == np.float64
x = np.hstack((mf_1, mf_2))
columns = cv_metafeatures[cross[0]][folds[0]].index.values
index = np.hstack(('0_' + columns, '1_' + columns))
x = pd.Series(data=x, name=name, index=index)
X.append(x)
experiments_1 = cv_experiments[cross[0]][folds[0]]
experiments_2 = cv_experiments[cross[1]][folds[1]]
assert len(experiments_1) == len(experiments_2)
responses_1 = np.zeros((len(experiments_1)), dtype=np.float64)
responses_2 = np.zeros((len(experiments_1)), dtype=np.float64)
for idx, zipped in enumerate(zip(experiments_1, experiments_2)):
# Test if the order of the params is the same
exp_1, exp_2 = zipped
assert exp_1.params == exp_2.params
responses_1[idx] = exp_1.result
responses_2[idx] = exp_2.result
rho, p = scipy.stats.spearmanr(responses_1, responses_2)
# A nan is produced if all values of one of the response lists
# are equal. This results in a division by zero. Because there is
# no correlation if all values are the same, rho is replaced by
# zero...
# It would probably be better to assign random ranks for equal
# values, but scipy doesn't support this...
if not np.isfinite(rho):
rho = 0
Y.append(rho)
Y_names.append(name)
X = pd.DataFrame(X)
Y = pd.Series(Y, index=Y_names)
logging.info('CV_Metafeatures %s', cv_metafeatures.shape)
logging.info('X.shape %s', X.shape)
logging.info('Y.shape %s', Y.shape)
# train sklearn regressor (tree) with 10fold CV
indices = range(len(X))
np_rs = np.random.RandomState(42)
np_rs.shuffle(indices)
X = X.iloc[indices]
Y = Y.iloc[indices]
return X, Y
"""
def create_smac_warmstart_files(context, dataset, output_dir, num_warmstarts):
runs_and_results = StringIO.StringIO()
runs_and_results.write("Run Number,Run History Configuration ID,Instance ID,"
"Response Value (y),Censored?,Cutoff Time Used,Seed,"
"Runtime,Run Length,Run Result Code,Run Quality,SMAC"
" Iteration,SMAC Cumulative Runtime,Run Result,"
"Additional Algorithm Run Data,Wall Clock Time,\n")
paramstrings = StringIO.StringIO()
best_hyperparameters, distances = metalearner.metalearn_base(context)
hp_list, name_list, dist_list = metalearner.assemble_best_hyperparameters_list(
best_hyperparameters, distances)
for i in range(len(hp_list)):
print hp_list[i], name_list[i], dist_list[i]
def create_smac_files_file(cv_metafeatures, cv_experiments, dataset,
output_dir):
runs_and_results = StringIO.StringIO()
runs_and_results.write("Run Number,Run History Configuration ID,Instance ID,"
"Response Value (y),Censored?,Cutoff Time Used,Seed,"
"Runtime,Run Length,Run Result Code,Run Quality,SMAC"
" Iteration,SMAC Cumulative Runtime,Run Result,"
"Additional Algorithm Run Data,Wall Clock Time,\n")
paramstrings = StringIO.StringIO()
train_instances_file = StringIO.StringIO()
feature_file = StringIO.StringIO()
scenario_file = StringIO.StringIO()
run_number = 1
instance_number = 1
# TODO: is it possible to get_value the openml dataset id?
for dataset_number, name in enumerate(cv_experiments):
for fold in cv_experiments[name]:
configuration_id = 1
iteration = int(run_number/2)
# if name == dataset, we don't want to put the rundata in there
# because we want to optimize for name
if name != dataset:
for exp in cv_experiments[name][fold]:
str = "%s,%s,%s,%f,0,108000,-1,%f,1,1,%f,%d,%f,SAT,Aditional data,%f" \
% (run_number, configuration_id, instance_number, exp.result, 1.0,
exp.result, iteration, float(run_number), 1.0)
runs_and_results.write(str + "\n")
run_number += 1
configuration_id += 1
train_instances_file.write("%d-%d\n" % (dataset_number, fold))
instance_number += 1
if instance_number > 100:
break
configuration_id = 1
for exp in cv_experiments[name][0]:
paramstring = ", ".join(["%s='%s'" % (re.sub("^-", "",param),
exp.params[param]) for param in exp.params])
paramstrings.write("%d: %s\n" % (configuration_id, paramstring))
with open(os.path.join(output_dir, "runs_and_results-it%d.csv" %
iteration), "w") as fh:
runs_and_results.seek(0)
for line in runs_and_results:
fh.write(line)
with open(os.path.join(output_dir, "paramstrings-it%d.txt" % iteration),
"w") as fh:
paramstrings.seek(0)
for line in paramstrings:
fh.write(line)
with open(os.path.join(output_dir, "instances-train.txt"),
"w") as fh:
train_instances_file.seek(0)
for line in train_instances_file:
fh.write(line)
"""
if __name__ == '__main__':
pass
"""
# TODO: right now, this is only done for one split, namely the split of
# the directory we're inside...
# TODO: this only works in a directory, in which a metaexperiment was
# already run...
parser = ArgumentParser()
parser.add_argument("target_directory", type=str)
args = parser.parse_args()
target_directory = args.target_directory
if not os.path.exists(target_directory):
raise ValueError("Target directory %s does not exist." % target_directory)
# Important, change into some directory in which an experiment was already
# performed...
context = metalearner.setup(None)
metafeatures = context["metafeatures"]
#cv_metafeatures = context["cv_metafeatures"]
meta_base = context["meta_base"]
#cv_meta_base = context["cv_meta_base"]
savefile_prefix = "testfold_%d-%d" % (context["test_fold"],
context["test_folds"])
# Use only the pfahringer subset of the available metafeatures
#columns = list()
#columns.extend(mf.subsets["pfahringer_2000_experiment1"])
#print columns
#metafeatures = metafeatures.loc[:,columns]
#for key in cv_metafeatures:
# cv_metafeatures[key] = cv_metafeatures[key].loc[columns,:]
# savefile_prefix += "_pfahringer"
# Remove class_probability_max from the set of metafeatures
# columns = list()
# metafeature_list = mf.subsets["all"]
# metafeature_list.remove("class_probability_max")
# metafeature_list.remove("class_probability_min")
# metafeature_list.remove("class_probability_mean")
# metafeature_list.remove("class_probability_std")
# columns.extend(metafeature_list)
# metafeatures = metafeatures.loc[:,columns]
# for key in cv_metafeatures:
# cv_metafeatures[key] = cv_metafeatures[key].loc[columns,:]
# savefile_prefix += "_woclassprobs"
# Experiment is an OrderedDict, which has dataset names as keys
# The values are lists of experiments(OrderedDict of params, response)
experiments = meta_base.experiments
#cv_experiments = cv_meta_base.experiments
"""
"""
# Build the warmstart directory for SMAC, can be called with
# ./smac --scenario-file <file> --seed 0 --warmstart <foldername>
# needs paramstrings.txt and runs_and_results.txt
# plain
smac_bootstrap_output = "smac_bootstrap_plain"
for dataset in cv_metafeatures:
bootstraps = (2, 5, 10)
distance = ("l1", "l2", "learned_distance")
metafeature_subset = mf.subsets
for num_bootstrap, dist, subset in itertools.product(
bootstraps, distance, metafeature_subset, repeat=1):
context["distance_measure"] = dist
# TODO: somehow only get_value a metafeature subset
dataset_output_dir = os.path.join(target_directory,
smac_bootstrap_output, dataset +
"_bootstrapped%d_%s_%s" % (num_bootstrap, dist, subset))
if not os.path.exists(dataset_output_dir):
os.mkdirs(dataset_output_dir)
create_smac_warmstart_files(context, dataset, dataset_output_dir,
num_warmstarts=num_bootstrap)
break
# with the adjustment of Yogotama and Mann
"""
# X, Y = create_regression_dataset(metafeatures, experiments)
# with open("regression_dataset.pkl", "w") as fh:
# cPickle.dump((X, Y, metafeatures), fh, -1)
"""
# Calculate the metafeatures without the 10fold CV
X, Y = create_predict_spearman_rank(metafeatures, experiments,
iterator="permutation")
spearman_rank_file = os.path.join(target_directory,
savefile_prefix + "_spearman_rank_perm.pkl")
with open(spearman_rank_file, "w") as fh:
cPickle.dump((X, Y, metafeatures), fh, -1)
X, Y = create_predict_spearman_rank(metafeatures, experiments,
iterator="combination")
spearman_rank_file = os.path.join(target_directory,
savefile_prefix + "_spearman_rank_comb.pkl")
with open(spearman_rank_file, "w") as fh:
cPickle.dump((X, Y, metafeatures), fh, -1)
print
# Calculate the metafeatures for the 10fold CV...
X, Y = create_predict_spearman_rank_with_cv(cv_metafeatures,
cv_experiments,
iterator="combination")
spearman_rank_file = os.path.join(target_directory,
savefile_prefix + "_spearman_rank_cv_comb.pkl")
with open(spearman_rank_file, "w") as fh:
cPickle.dump((X, Y, metafeatures), fh, -1)
X, Y = create_predict_spearman_rank_with_cv(cv_metafeatures,
cv_experiments,
iterator="permutation")
spearman_rank_file = os.path.join(target_directory,
savefile_prefix + "_spearman_rank_cv_perm.pkl")
with open(spearman_rank_file, "w") as fh:
cPickle.dump((X, Y, metafeatures), fh, -1)
"""
```
#### File: components/classification/sgd.py
```python
from autotabular.pipeline.components.base import AutotabularClassificationAlgorithm, IterativeComponentWithSampleWeight
from autotabular.pipeline.constants import DENSE, PREDICTIONS, SPARSE, UNSIGNED_DATA
from autotabular.pipeline.implementations.util import softmax
from autotabular.util.common import check_for_bool
from ConfigSpace.conditions import EqualsCondition, InCondition
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter, UniformFloatHyperparameter, UnParametrizedHyperparameter
class SGD(
IterativeComponentWithSampleWeight,
AutotabularClassificationAlgorithm,
):
def __init__(self,
loss,
penalty,
alpha,
fit_intercept,
tol,
learning_rate,
l1_ratio=0.15,
epsilon=0.1,
eta0=0.01,
power_t=0.5,
average=False,
random_state=None):
self.max_iter = self.get_max_iter()
self.loss = loss
self.penalty = penalty
self.alpha = alpha
self.fit_intercept = fit_intercept
self.tol = tol
self.learning_rate = learning_rate
self.l1_ratio = l1_ratio
self.epsilon = epsilon
self.eta0 = eta0
self.power_t = power_t
self.random_state = random_state
self.average = average
self.estimator = None
self.n_iter_ = None
@staticmethod
def get_max_iter():
return 1024
def get_current_iter(self):
return self.n_iter_
def iterative_fit(self, X, y, n_iter=2, refit=False, sample_weight=None):
from sklearn.linear_model import SGDClassifier
# Need to fit at least two iterations, otherwise early stopping will not
# work because we cannot determine whether the algorithm actually
# converged. The only way of finding this out is if the sgd spends less
# iterations than max_iter. If max_iter == 1, it has to spend at least
# one iteration and will always spend at least one iteration, so we
# cannot know about convergence.
if refit:
self.estimator = None
self.n_iter_ = None
if self.estimator is None:
self.fully_fit_ = False
self.alpha = float(self.alpha)
self.l1_ratio = float(self.l1_ratio) if self.l1_ratio is not None \
else 0.15
self.epsilon = float(self.epsilon) if self.epsilon is not None \
else 0.1
self.eta0 = float(self.eta0)
self.power_t = float(self.power_t) if self.power_t is not None \
else 0.5
self.average = check_for_bool(self.average)
self.fit_intercept = check_for_bool(self.fit_intercept)
self.tol = float(self.tol)
self.estimator = SGDClassifier(
loss=self.loss,
penalty=self.penalty,
alpha=self.alpha,
fit_intercept=self.fit_intercept,
max_iter=n_iter,
tol=self.tol,
learning_rate=self.learning_rate,
l1_ratio=self.l1_ratio,
epsilon=self.epsilon,
eta0=self.eta0,
power_t=self.power_t,
shuffle=True,
average=self.average,
random_state=self.random_state,
warm_start=True)
self.estimator.fit(X, y, sample_weight=sample_weight)
self.n_iter_ = self.estimator.n_iter_
else:
self.estimator.max_iter += n_iter
self.estimator.max_iter = min(self.estimator.max_iter,
self.max_iter)
self.estimator._validate_params()
self.estimator._partial_fit(
X,
y,
alpha=self.estimator.alpha,
C=1.0,
loss=self.estimator.loss,
learning_rate=self.estimator.learning_rate,
max_iter=n_iter,
sample_weight=sample_weight,
classes=None,
coef_init=None,
intercept_init=None)
self.n_iter_ += self.estimator.n_iter_
if self.estimator.max_iter >= self.max_iter or self.estimator.max_iter > self.n_iter_:
self.fully_fit_ = True
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
elif not hasattr(self, 'fully_fit_'):
return False
else:
return self.fully_fit_
def predict(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
if self.loss in ['log', 'modified_huber']:
return self.estimator.predict_proba(X)
else:
df = self.estimator.decision_function(X)
return softmax(df)
@staticmethod
def get_properties(dataset_properties=None):
return {
'shortname': 'SGD Classifier',
'name': 'Stochastic Gradient Descent Classifier',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': False,
'handles_multioutput': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS, )
}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
loss = CategoricalHyperparameter(
'loss',
['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'],
default_value='log',
)
penalty = CategoricalHyperparameter(
'penalty', ['l1', 'l2', 'elasticnet'], default_value='l2')
alpha = UniformFloatHyperparameter(
'alpha', 1e-7, 1e-1, log=True, default_value=0.0001)
l1_ratio = UniformFloatHyperparameter(
'l1_ratio', 1e-9, 1, log=True, default_value=0.15)
fit_intercept = UnParametrizedHyperparameter('fit_intercept', 'True')
tol = UniformFloatHyperparameter(
'tol', 1e-5, 1e-1, log=True, default_value=1e-4)
epsilon = UniformFloatHyperparameter(
'epsilon', 1e-5, 1e-1, default_value=1e-4, log=True)
learning_rate = CategoricalHyperparameter(
'learning_rate', ['optimal', 'invscaling', 'constant'],
default_value='invscaling')
eta0 = UniformFloatHyperparameter(
'eta0', 1e-7, 1e-1, default_value=0.01, log=True)
power_t = UniformFloatHyperparameter(
'power_t', 1e-5, 1, default_value=0.5)
average = CategoricalHyperparameter(
'average', ['False', 'True'], default_value='False')
cs.add_hyperparameters([
loss, penalty, alpha, l1_ratio, fit_intercept, tol, epsilon,
learning_rate, eta0, power_t, average
])
# TODO add passive/aggressive here, although not properly documented?
elasticnet = EqualsCondition(l1_ratio, penalty, 'elasticnet')
epsilon_condition = EqualsCondition(epsilon, loss, 'modified_huber')
power_t_condition = EqualsCondition(power_t, learning_rate,
'invscaling')
# eta0 is only relevant if learning_rate!='optimal' according to code
# https://github.com/scikit-learn/scikit-learn/blob/0.19.X/sklearn/
# linear_model/sgd_fast.pyx#L603
eta0_in_inv_con = InCondition(eta0, learning_rate,
['invscaling', 'constant'])
cs.add_conditions([
elasticnet, epsilon_condition, power_t_condition, eta0_in_inv_con
])
return cs
```
#### File: data_preprocessing/exclude_miss_target/exclude_missing_target.py
```python
import warnings
from typing import Dict, Optional, Tuple, Union
import numpy as np
import pandas as pd
from autotabular.pipeline.base import DATASET_PROPERTIES_TYPE, PIPELINE_DATA_DTYPE
from autotabular.pipeline.components.base import AutotabularPreprocessingAlgorithm
from autotabular.pipeline.constants import DENSE, INPUT, SPARSE, UNSIGNED_DATA
from ConfigSpace.configuration_space import ConfigurationSpace
class ExcludeRowsMissingTargetTransformer(AutotabularPreprocessingAlgorithm):
def __init__(self,
sample_weight=None,
warn=False,
random_state: Optional[np.random.RandomState] = None):
self.sample_weight = sample_weight
self.warn = warn
self.random_state = random_state
def fit(
self,
X: PIPELINE_DATA_DTYPE,
y: Optional[PIPELINE_DATA_DTYPE] = None,
) -> 'ExcludeRowsMissingTargetTransformer':
self.preprocessor = ExcludeRowsMissingTarget()
return self
def transform(self, X: PIPELINE_DATA_DTYPE,
y: Optional[PIPELINE_DATA_DTYPE]) -> PIPELINE_DATA_DTYPE:
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(
X, y, sample_weight=self.sample_weight, warn=self.warn)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
'shortname': 'ExcludeRowsMissingTargetTransformer',
'name': 'ExcludeRowsMissingTargetTransformer',
'handles_regression': True,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'handles_multioutput': True,
# TODO find out of this is right!
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (INPUT, ),
}
@staticmethod
def get_hyperparameter_search_space(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> ConfigurationSpace:
return ConfigurationSpace()
class ExcludeRowsMissingTarget(object):
"""ExcludeRowsMissingTarget Transformer.
reference:
https://github.com/mljar/mljar-supervised
"""
@staticmethod
def transform(X=None, y=None, sample_weight=None, warn=False):
if y is None:
return X, y, sample_weight
y_missing = pd.isnull(y)
if np.sum(np.array(y_missing)) == 0:
return X, y, sample_weight
if warn:
warnings.warn(
'There are samples with missing target values in the data which will be excluded for further analysis'
)
y = y.drop(y.index[y_missing])
y.reset_index(drop=True, inplace=True)
if X is not None:
X = X.drop(X.index[y_missing])
X.reset_index(drop=True, inplace=True)
if sample_weight is not None:
sample_weight = sample_weight.drop(sample_weight.index[y_missing])
sample_weight.reset_index(drop=True, inplace=True)
return X, y, sample_weight
```
#### File: data_preprocessing/imputation/numerical_imputation.py
```python
from typing import Dict, Optional, Tuple, Union
import numpy as np
from autotabular.pipeline.base import DATASET_PROPERTIES_TYPE, PIPELINE_DATA_DTYPE
from autotabular.pipeline.components.base import AutotabularPreprocessingAlgorithm
from autotabular.pipeline.constants import DENSE, INPUT, SPARSE, UNSIGNED_DATA
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter
class NumericalImputation(AutotabularPreprocessingAlgorithm):
def __init__(self,
strategy: str = 'mean',
random_state: Optional[np.random.RandomState] = None):
self.strategy = strategy
self.random_state = random_state
def fit(self,
X: PIPELINE_DATA_DTYPE,
y: Optional[PIPELINE_DATA_DTYPE] = None) -> 'NumericalImputation':
import sklearn.impute
self.preprocessor = sklearn.impute.SimpleImputer(
strategy=self.strategy, copy=False)
self.preprocessor.fit(X)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
'shortname': 'NumericalImputation',
'name': 'Numerical Imputation',
'handles_missing_values': True,
'handles_nominal_values': True,
'handles_numerical_features': True,
'prefers_data_scaled': False,
'prefers_data_normalized': False,
'handles_regression': True,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'handles_multioutput': True,
'is_deterministic': True,
# TODO find out if this is right!
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (INPUT, ),
'preferred_dtype': None
}
@staticmethod
def get_hyperparameter_search_space(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None
) -> ConfigurationSpace:
# TODO add replace by zero!
strategy = CategoricalHyperparameter(
'strategy', ['mean', 'median', 'most_frequent'],
default_value='mean')
cs = ConfigurationSpace()
cs.add_hyperparameter(strategy)
return cs
```
#### File: components/feature_preprocessing/densifier.py
```python
from autotabular.pipeline.components.base import AutotabularPreprocessingAlgorithm
from autotabular.pipeline.constants import DENSE, INPUT, SPARSE, UNSIGNED_DATA
from ConfigSpace.configuration_space import ConfigurationSpace
class Densifier(AutotabularPreprocessingAlgorithm):
def __init__(self, random_state=None):
pass
def fit(self, X, y=None):
self.fitted_ = True
return self
def transform(self, X):
from scipy import sparse
if sparse.issparse(X):
return X.todense().getA()
else:
return X
@staticmethod
def get_properties(dataset_properties=None):
return {
'shortname': 'RandomTreesEmbedding',
'name': 'Random Trees Embedding',
'handles_regression': True,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'handles_multioutput': True,
'is_deterministic': True,
'input': (SPARSE, UNSIGNED_DATA),
'output': (DENSE, INPUT)
}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
return cs
```
#### File: components/feature_preprocessing/select_percentile_regression.py
```python
from autotabular.pipeline.components.base import AutotabularPreprocessingAlgorithm
from autotabular.pipeline.components.feature_preprocessing.select_percentile import SelectPercentileBase
from autotabular.pipeline.constants import DENSE, INPUT, SPARSE, UNSIGNED_DATA
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter, UniformFloatHyperparameter
class SelectPercentileRegression(SelectPercentileBase,
AutotabularPreprocessingAlgorithm):
def __init__(self,
percentile,
score_func='f_regression',
random_state=None):
""" Parameters:
random state : ignored
score_func : callable, Function taking two arrays X and y, and
returning a pair of arrays (scores, pvalues).
"""
import sklearn.feature_selection
self.random_state = random_state # We don't use this
self.percentile = int(float(percentile))
if score_func == 'f_regression':
self.score_func = sklearn.feature_selection.f_regression
elif score_func == 'mutual_info':
self.score_func = sklearn.feature_selection.mutual_info_regression
else:
raise ValueError("Don't know this scoring function: %s" %
score_func)
@staticmethod
def get_properties(dataset_properties=None):
return {
'shortname': 'SPR',
'name': 'Select Percentile Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'handles_multioutput': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (INPUT, )
}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
percentile = UniformFloatHyperparameter(
'percentile', lower=1, upper=99, default_value=50)
score_func = CategoricalHyperparameter(
name='score_func', choices=['f_regression', 'mutual_info'])
cs = ConfigurationSpace()
cs.add_hyperparameters([percentile, score_func])
return cs
```
#### File: automlbechmark/prepare_datasets/prepare_ny_taxi_trip_dutation.py
```python
import os
from multiprocessing import Pool
from pathlib import Path
from time import time
import numpy as np
import pandas as pd
from geopy.distance import geodesic
pd.options.display.max_columns = 100
ROOT_DIR = Path('/home/robin/jianzh/autotabular/examples/automlbechmark')
RAW_DATA_DIR = ROOT_DIR / 'data/raw_data/nyc_taxi'
PROCESSED_DATA_DIR = ROOT_DIR / 'processed_data/nyc_taxi/'
if not os.path.isdir(PROCESSED_DATA_DIR):
os.makedirs(PROCESSED_DATA_DIR)
nyc_taxi = pd.read_csv(
RAW_DATA_DIR / 'train_extended.csv',
parse_dates=['pickup_datetime', 'dropoff_datetime'],
)
nyc_taxi = nyc_taxi[nyc_taxi.passenger_count.between(1,
6)].reset_index(drop=True)
nyc_taxi.drop('id', axis=1, inplace=True)
# Chronological split
nyc_taxi = nyc_taxi.sort_values('pickup_datetime').reset_index(drop=True)
test_size = int(np.ceil(nyc_taxi.shape[0] * 0.1))
train_size = nyc_taxi.shape[0] - test_size * 2
# train
nyc_taxi_train = nyc_taxi.iloc[:train_size].reset_index(drop=True)
tmp = nyc_taxi.iloc[train_size:].reset_index(drop=True)
# valid and test
nyc_taxi_val = tmp.iloc[:test_size].reset_index(drop=True)
nyc_taxi_test = tmp.iloc[test_size:].reset_index(drop=True)
nyc_taxi_train['dset'] = 0
nyc_taxi_val['dset'] = 1
nyc_taxi_test['dset'] = 2
nyc_taxi = pd.concat([nyc_taxi_train, nyc_taxi_val, nyc_taxi_test])
del (nyc_taxi_train, nyc_taxi_val, nyc_taxi_test)
remove_index_cols = ['day_period', 'month', 'season', 'day_name']
for col in remove_index_cols:
nyc_taxi[col] = nyc_taxi[col].apply(lambda x: x.split('.')[-1])
txt_cols = [
'pickup_neighbourhood',
'dropoff_district',
'dropoff_neighbourhood',
'day_period',
'month',
'season',
'weekday_or_weekend',
'regular_day_or_holiday',
'day_name',
]
for col in txt_cols:
nyc_taxi[col] = nyc_taxi[col].str.lower()
neighbourhood_cols = ['pickup_neighbourhood', 'dropoff_neighbourhood']
for col in neighbourhood_cols:
nyc_taxi[col] = nyc_taxi[col].apply(
lambda x: x.replace(' ', '_').replace('-', '_'))
nyc_taxi['day_of_month'] = nyc_taxi.pickup_datetime.dt.day
def distance_travelled(coords):
return geodesic((coords[0], coords[1]), (coords[2], coords[3])).km
start_lats = nyc_taxi.pickup_latitude.tolist()
start_lons = nyc_taxi.pickup_longitude.tolist()
end_lats = nyc_taxi.dropoff_latitude.tolist()
end_lons = nyc_taxi.dropoff_longitude.tolist()
s = time()
with Pool(8) as p:
distances = p.map(distance_travelled,
zip(start_lats, start_lons, end_lats, end_lons))
e = time() - s
print('computing distances took {} secs'.format(e))
nyc_taxi['distance_travelled'] = distances
nyc_taxi['pickup_x'] = np.cos(nyc_taxi.pickup_latitude) * np.cos(
nyc_taxi.pickup_longitude)
nyc_taxi['dropoff_x'] = np.cos(nyc_taxi.dropoff_latitude) * np.cos(
nyc_taxi.dropoff_longitude)
nyc_taxi['pickup_y'] = np.cos(nyc_taxi.pickup_longitude) * np.sin(
nyc_taxi.pickup_longitude)
nyc_taxi['dropoff_y'] = np.cos(nyc_taxi.dropoff_longitude) * np.sin(
nyc_taxi.dropoff_longitude)
nyc_taxi['pickup_z'] = np.sin(nyc_taxi.pickup_latitude)
nyc_taxi['dropoff_z'] = np.sin(nyc_taxi.dropoff_latitude)
nyc_taxi['pickup_latitude'] = nyc_taxi.pickup_latitude / 60
nyc_taxi['dropoff_latitude'] = nyc_taxi.dropoff_latitude / 60
nyc_taxi['pickup_longitude'] = nyc_taxi.pickup_longitude / 180
nyc_taxi['dropoff_longitude'] = nyc_taxi.dropoff_longitude / 180
# I know we have train_duration in the data, but just for sanity
nyc_taxi['target'] = (nyc_taxi.dropoff_datetime -
nyc_taxi.pickup_datetime).astype('timedelta64[s]')
nyc_taxi_train = nyc_taxi[nyc_taxi.dset == 0].drop('dset', axis=1)
nyc_taxi_val = nyc_taxi[nyc_taxi.dset == 1].drop('dset', axis=1)
nyc_taxi_test = nyc_taxi[nyc_taxi.dset == 2].drop('dset', axis=1)
nyc_taxi_train.to_pickle(PROCESSED_DATA_DIR / 'nyc_taxi_train.p')
nyc_taxi_val.to_pickle(PROCESSED_DATA_DIR / 'nyc_taxi_val.p')
nyc_taxi_test.to_pickle(PROCESSED_DATA_DIR / 'nyc_taxi_test.p')
```
#### File: run_experiments/bank_marketing/bankm_lr.py
```python
import os
from pathlib import Path
import pandas as pd
from autofe.get_feature import get_baseline_total_data, get_GBDT_total_data, get_groupby_total_data, train_and_evaluate
from sklearn.linear_model import LogisticRegression
SEED = 42
def generate_cross_cols(self, df: pd.DataFrame, crossed_cols):
df_cc = df.copy()
crossed_colnames = []
for cols in crossed_cols:
for c in cols:
df_cc[c] = df_cc[c].astype('str')
colname = '_'.join(cols)
df_cc[colname] = df_cc[list(cols)].apply(lambda x: '-'.join(x), axis=1)
crossed_colnames.append(colname)
return df_cc[crossed_colnames]
if __name__ == '__main__':
ROOTDIR = Path('./')
PROCESSED_DATA_DIR = ROOTDIR / 'data/processed_data/bank_marketing/'
RESULTS_DIR = ROOTDIR / 'results/bank_marketing/logistic_regression'
if not RESULTS_DIR.is_dir():
os.makedirs(RESULTS_DIR)
train_datafile = PROCESSED_DATA_DIR / 'train_data.csv'
test_datafile = PROCESSED_DATA_DIR / 'test_data.csv'
train_data = pd.read_csv(PROCESSED_DATA_DIR / 'train_data.csv')
test_data = pd.read_csv(PROCESSED_DATA_DIR / 'test_data.csv')
len_train = len(train_data)
total_data = pd.concat([train_data, test_data]).reset_index(drop=True)
target_name = 'target'
print(total_data.info())
"""lr baseline"""
classfier = LogisticRegression(random_state=0)
total_data_base = get_baseline_total_data(total_data)
acc, auc = train_and_evaluate(total_data_base, target_name, len_train,
classfier)
"""groupby + lr"""
threshold = 0.9
k = 5
methods = ['min', 'max', 'sum', 'mean', 'std', 'count']
total_data_groupby = get_groupby_total_data(total_data, target_name,
threshold, k, methods)
total_data_groupby = pd.get_dummies(total_data_groupby).fillna(0)
total_data_groupby.to_csv(
PROCESSED_DATA_DIR / 'adult_groupby.csv', index=False)
acc, auc = train_and_evaluate(total_data_groupby, target_name, len_train,
classfier)
"""GBDT + lr"""
total_data_GBDT = get_GBDT_total_data(total_data, target_name)
total_data_GBDT.to_csv(PROCESSED_DATA_DIR / 'adult_gbdt.csv', index=False)
acc, auc = train_and_evaluate(total_data_GBDT, target_name, len_train,
classfier)
```
#### File: run_experiments/bank_marketing/lightgbm_optimizer.py
```python
import warnings
from typing import Any, Dict, Optional
import lightgbm as lgb
import pandas as pd
from hyperopt import Trials, fmin, hp, space_eval, tpe
from lightgbm import Dataset as lgbDataset
from optuna.integration.lightgbm import LightGBMTunerCV
from sklearn.metrics import log_loss, mean_squared_error
warnings.filterwarnings('ignore')
class LGBOptimizerHyperopt(object):
def __init__(
self,
objective: str = 'binary',
is_unbalance: bool = False,
verbose: bool = False,
num_class: Optional[int] = None,
):
self.objective = objective
if objective == 'multiclass' and not num_class:
raise ValueError(
'num_class must be provided for multiclass problems')
self.num_class = num_class
self.is_unbalance = is_unbalance
self.verbose = verbose
self.early_stop_dict: Dict = {}
def optimize(
self,
dtrain: lgbDataset,
deval: lgbDataset,
maxevals: int = 200,
):
if self.objective == 'regression':
self.best = lgb.LGBMRegressor().get_params()
else:
self.best = lgb.LGBMClassifier().get_params()
del (self.best['silent'], self.best['importance_type'])
param_space = self.hyperparameter_space()
objective = self.get_objective(dtrain, deval)
objective.i = 0
trials = Trials()
best = fmin(
fn=objective,
space=param_space,
algo=tpe.suggest,
max_evals=maxevals,
trials=trials,
verbose=self.verbose,
)
self.trials = trials
best = space_eval(param_space, trials.argmin)
best['n_estimators'] = int(best['n_estimators'])
best['num_leaves'] = int(best['num_leaves'])
best['min_child_samples'] = int(best['min_child_samples'])
best['verbose'] = -1
best['objective'] = self.objective
self.best.update(best)
def get_objective(self, dtrain: lgbDataset, deval: lgbDataset):
def objective(params: Dict[str, Any]) -> float:
# hyperopt casts as float
params['n_estimators'] = int(params['n_estimators'])
params['num_leaves'] = int(params['num_leaves'])
params['min_child_samples'] = int(params['min_child_samples'])
params['verbose'] = -1
params['seed'] = 1
params['feature_pre_filter'] = False
params['objective'] = self.objective
if self.objective != 'regression':
params['is_unbalance'] = self.is_unbalance
if self.objective == 'multiclass':
params['num_class'] = self.num_class
model = lgb.train(
params,
dtrain,
valid_sets=[deval],
early_stopping_rounds=50,
verbose_eval=False,
)
preds = model.predict(deval.data)
if self.objective != 'regression':
score = log_loss(deval.label, preds)
elif self.objective == 'regression':
score = mean_squared_error(deval.label, preds)
objective.i += 1 # type: ignore
return score
return objective
def hyperparameter_space(self,
param_space: Dict[str,
Any] = None) -> Dict[str, Any]:
space = {
'learning_rate':
hp.uniform('learning_rate', 0.01, 0.3),
'n_estimators':
hp.quniform('n_estimators', 100, 1000, 50),
'num_leaves':
hp.quniform('num_leaves', 20, 200, 10),
'min_child_samples':
hp.quniform('min_child_samples', 20, 100, 20),
'colsample_bytree':
hp.uniform('colsample_bytree', 0.5, 1.0),
'reg_alpha':
hp.choice('reg_alpha',
[0.01, 0.05, 0.1, 0.2, 0.4, 1.0, 2.0, 4.0, 10.0]),
'reg_lambda':
hp.choice('reg_lambda',
[0.01, 0.05, 0.1, 0.2, 0.4, 1.0, 2.0, 4.0, 10.0]),
}
if param_space:
return param_space
else:
return space
class LGBOptimizerOptuna(object):
def __init__(
self,
objective: str = 'binary',
is_unbalance: bool = False,
verbose: bool = False,
num_class: Optional[int] = None,
):
self.objective = objective
if objective == 'multiclass' and not num_class:
raise ValueError(
'num_class must be provided for multiclass problems')
self.num_class = num_class
self.is_unbalance = is_unbalance
self.verbose = verbose
self.best: Dict[str, Any] = {} # Best hyper-parameters
def optimize(self, dtrain: lgbDataset, deval: lgbDataset):
# Define the base parameters
if self.objective == 'binary':
params: Dict = {'objective': self.objective}
elif self.objective == 'multiclass':
params: Dict = {
'objective': self.objective,
'metric': 'multi_logloss'
}
elif self.objective == 'regression':
params: Dict = {'objective': self.objective, 'metric': 'rmse'}
if self.verbose:
params['verbosity'] = 1
else:
params['verbosity'] = -1
if self.objective != 'regression':
params['is_unbalance'] = self.is_unbalance
if self.objective == 'multiclass':
params['num_class'] = self.num_class
# Reformat the data for LightGBM cross validation method
train_set = lgb.Dataset(
data=pd.concat([dtrain.data, deval.data]).reset_index(drop=True),
label=pd.concat([dtrain.label,
deval.label]).reset_index(drop=True),
categorical_feature=dtrain.categorical_feature,
free_raw_data=False,
)
train_index = range(len(dtrain.data))
valid_index = range(len(dtrain.data), len(train_set.data))
# Run the hyper-parameter tuning
self.tuner = LightGBMTunerCV(
params=params,
train_set=train_set,
folds=[(train_index, valid_index)],
verbose_eval=False,
num_boost_round=1000,
early_stopping_rounds=50,
)
self.tuner.run()
self.best = self.tuner.best_params
# since n_estimators is not among the params that Optuna optimizes we
# need to add it manually. We add a high value since it will be used
# with early_stopping_rounds
self.best['n_estimators'] = 1000 # type: ignore
```
#### File: examples/tabular_embeddings/pytorchlightning_tabular_test.py
```python
import os
import numpy as np
import pandas as pd
import pytorch_lightning as pl
from autofe.tabular_embedding.pytorchlightning_tabular import TabularDataModule, TabularDataset, TabularNet, compute_score, predict
from pytorch_lightning.callbacks import EarlyStopping
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader
if __name__ == '__main__':
"""http://ethen8181.github.io/machine-
learning/deep_learning/tabular/tabular.html#Deep-Learning-For-Tabular-
Data."""
input_path = '/media/robin/DATA/datatsets/structure_data/UCI_Credit_Card/UCI_Credit_Card.csv'
df = pd.read_csv(input_path)
print(df.shape)
print(df.head())
id_cols = ['ID']
cat_cols = ['EDUCATION', 'SEX', 'MARRIAGE']
num_cols = [
'LIMIT_BAL', 'AGE', 'PAY_0', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5',
'PAY_6', 'BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4',
'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1', 'PAY_AMT2', 'PAY_AMT3',
'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6'
]
label_col = 'default.payment.next.month'
print('number of categorical columns: ', len(cat_cols))
print('number of numerical columns: ', len(num_cols))
test_size = 0.1
val_size = 0.3
random_state = 1234
df_train, df_test = train_test_split(
df,
test_size=test_size,
random_state=random_state,
stratify=df[label_col])
df_train, df_val = train_test_split(
df_train,
test_size=val_size,
random_state=random_state,
stratify=df_train[label_col])
print('train shape: ', df_train.shape)
print('validation shape: ', df_val.shape)
print('test shape: ', df_test.shape)
cat_code_dict = {}
for col in cat_cols:
category_col = df_train[col].astype('category')
cat_code_dict[col] = {
value: idx
for idx, value in enumerate(category_col.cat.categories)
}
print(cat_code_dict)
def preprocess(df,
scaler=None,
num_cols=None,
cat_cols=None,
label_col=None):
df = df.copy()
# numeric fields
scaler = StandardScaler()
scaler.fit(df_train[num_cols])
df[num_cols] = scaler.transform(df[num_cols])
df[num_cols] = df[num_cols].astype(np.float32)
# categorical fields
# store the category code mapping, so we can encode any new incoming data
# other than our training set
cat_code_dict = {}
for col in cat_cols:
category_col = df_train[col].astype('category')
cat_code_dict[col] = {
value: idx
for idx, value in enumerate(category_col.cat.categories)
}
for col in cat_cols:
code_dict = cat_code_dict[col]
code_fillna_value = len(code_dict)
df[col] = df[col].map(code_dict).fillna(code_fillna_value).astype(
np.int64)
# label
df[label_col] = df[label_col].astype(np.float32)
return df
df_groups = {'train': df_train, 'val': df_val, 'test': df_test}
data_dir = 'onnx_data'
os.makedirs(data_dir, exist_ok=True)
for name, df_group in df_groups.items():
filename = os.path.join(data_dir, f'{name}.csv')
df_preprocessed = preprocess(
df_group,
scaler=StandardScaler,
num_cols=num_cols,
cat_cols=cat_cols,
label_col=label_col)
df_preprocessed.to_csv(filename, index=False)
print(df_preprocessed.head())
print(df_preprocessed.dtypes)
batch_size = 64
path_train = os.path.join(data_dir, 'train.csv')
dataset = TabularDataset(path_train, num_cols, cat_cols, label_col)
data_loader = DataLoader(dataset, batch_size)
# our data loader now returns batches of numerical/categorical/label tensor
num_tensor, cat_tensor, label_tensor = next(iter(data_loader))
print('numerical value tensor:\n', num_tensor)
print('categorical value tensor:\n', cat_tensor)
print('label tensor:\n', label_tensor)
n_classes = 1
embedding_size_dict = {
col: len(code)
for col, code in cat_code_dict.items()
}
embedding_dim_dict = {
col: embedding_size // 2
for col, embedding_size in embedding_size_dict.items()
}
embedding_dim_dict
tabular_data_module = TabularDataModule(data_dir, num_cols, cat_cols,
label_col)
# we can print out the network architecture for inspection
tabular_model = TabularNet(num_cols, cat_cols, embedding_size_dict,
n_classes, embedding_dim_dict)
print(tabular_model)
callbacks = [EarlyStopping(monitor='val_loss')]
trainer = pl.Trainer(max_epochs=8, callbacks=callbacks, gpus=1)
trainer.fit(tabular_model, tabular_data_module)
y_true, y_pred = predict(tabular_model, tabular_data_module)
score = compute_score(y_true, y_pred)
print(score)
```
#### File: components/data_preprocessing/test_text_transformer.py
```python
import unittest
import pandas as pd
from autotabular.pipeline.components.data_preprocessing.text_transformer.text_transformer import TextTFIDFTransformer
from numpy.testing import assert_almost_equal
class TextTransformerTest(unittest.TestCase):
def test_transformer(self):
d = {
'col1': [
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
None,
'Is this the first document?',
]
}
df = pd.DataFrame(data=d)
df_org = df.copy()
transf = TextTFIDFTransformer()
transf.fit(df, 'col1')
df = transf.transform(df)
self.assertTrue(df.shape[0] == 5)
self.assertTrue('col1' not in df.columns)
transf2 = TextTFIDFTransformer()
transf2.from_json(transf.to_json())
df2 = transf2.transform(df_org)
self.assertTrue('col1' not in df2.columns)
assert_almost_equal(df.iloc[0, 0], df2.iloc[0, 0])
```
#### File: components/feature_preprocessing/test_fast_ica.py
```python
import unittest
import sklearn.metrics
from autotabular.pipeline.components.feature_preprocessing.fast_ica import FastICA
from autotabular.pipeline.util import PreprocessingTestCase, _test_preprocessing, get_dataset
from sklearn.linear_model import Ridge
class FastICAComponentTest(PreprocessingTestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(
FastICA, dataset='diabetes')
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertFalse((transformation == 0).all())
def test_default_configuration_regression(self):
for i in range(5):
X_train, Y_train, X_test, Y_test = get_dataset(dataset='diabetes')
configuration_space = FastICA.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = FastICA(
random_state=1,
**{hp_name: default[hp_name]
for hp_name in default})
preprocessor.fit(X_train, Y_train)
X_train_trans = preprocessor.transform(X_train)
X_test_trans = preprocessor.transform(X_test)
# fit a classifier on top
classifier = Ridge()
predictor = classifier.fit(X_train_trans, Y_train)
predictions = predictor.predict(X_test_trans)
accuracy = sklearn.metrics.r2_score(Y_test, predictions)
self.assertAlmostEqual(accuracy, 0.32614416980439365)
@unittest.skip('Always returns float64')
def test_preprocessing_dtype(self):
super(FastICAComponentTest, self)._test_preprocessing_dtype(
FastICA, dataset='diabetes')
```
#### File: components/feature_preprocessing/test_random_trees_embedding.py
```python
import unittest
import numpy as np
import scipy.sparse
from autotabular.pipeline.components.feature_preprocessing.random_trees_embedding import RandomTreesEmbedding
from autotabular.pipeline.util import _test_preprocessing, get_dataset
class RandomTreesEmbeddingComponentTest(unittest.TestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(RandomTreesEmbedding)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], 216)
self.assertIsInstance(original, np.ndarray)
self.assertTrue(scipy.sparse.issparse(transformation))
self.assertTrue(all(transformation.data == 1))
@unittest.skip('Right now, the RTE returns a float64 array!')
def test_preprocessing_dtype(self):
# Dense
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset('iris')
self.assertEqual(X_train.dtype, np.float32)
configuration_space = RandomTreesEmbedding.get_hyperparameter_search_space(
)
default = configuration_space.get_default_configuration()
preprocessor = RandomTreesEmbedding(
random_state=1,
**{hp_name: default[hp_name]
for hp_name in default})
preprocessor.fit(X_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset('iris')
X_train = X_train.astype(np.float64)
configuration_space = RandomTreesEmbedding.get_hyperparameter_search_space(
)
default = configuration_space.get_default_configuration()
preprocessor = RandomTreesEmbedding(
random_state=1,
**{hp_name: default[hp_name]
for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
``` |
{
"source": "jianzhnie/d2nlp",
"score": 2
} |
#### File: examples/d2l.ai_examples/bert.py
```python
import torch
from d2l import torch as d2l
from torch import nn
def _get_batch_loss_bert(net, loss, vocab_size, tokens_X, segments_X,
valid_lens_x, pred_positions_X, mlm_weights_X, mlm_Y,
nsp_y):
# 前向传播
_, mlm_Y_hat, nsp_Y_hat = net(tokens_X, segments_X,
valid_lens_x.reshape(-1), pred_positions_X)
# 计算遮蔽语言模型损失
mlm_l = loss(mlm_Y_hat.reshape(-1, vocab_size),
mlm_Y.reshape(-1)) * mlm_weights_X.reshape(-1, 1)
mlm_l = mlm_l.sum() / (mlm_weights_X.sum() + 1e-8)
# 计算下一句子预测任务的损失
nsp_l = loss(nsp_Y_hat, nsp_y)
l = mlm_l + nsp_l
return mlm_l, nsp_l, l
def train_bert(train_iter, net, loss, vocab_size, devices, num_steps):
net = nn.DataParallel(net, device_ids=devices).to(devices[0])
trainer = torch.optim.Adam(net.parameters(), lr=0.01)
step, timer = 0, d2l.Timer()
animator = d2l.Animator(xlabel='step',
ylabel='loss',
xlim=[1, num_steps],
legend=['mlm', 'nsp'])
# 遮蔽语言模型损失的和,下一句预测任务损失的和,句子对的数量,计数
metric = d2l.Accumulator(4)
num_steps_reached = False
while step < num_steps and not num_steps_reached:
for tokens_X, segments_X, valid_lens_x, pred_positions_X, mlm_weights_X, mlm_Y, nsp_y in train_iter:
tokens_X = tokens_X.to(devices[0])
segments_X = segments_X.to(devices[0])
valid_lens_x = valid_lens_x.to(devices[0])
pred_positions_X = pred_positions_X.to(devices[0])
mlm_weights_X = mlm_weights_X.to(devices[0])
mlm_Y, nsp_y = mlm_Y.to(devices[0]), nsp_y.to(devices[0])
trainer.zero_grad()
timer.start()
mlm_l, nsp_l, l = _get_batch_loss_bert(net, loss, vocab_size,
tokens_X, segments_X,
valid_lens_x,
pred_positions_X,
mlm_weights_X, mlm_Y, nsp_y)
l.backward()
trainer.step()
metric.add(mlm_l, nsp_l, tokens_X.shape[0], 1)
timer.stop()
animator.add(step + 1,
(metric[0] / metric[3], metric[1] / metric[3]))
step += 1
if step == num_steps:
num_steps_reached = True
break
print(f'MLM loss {metric[0] / metric[3]:.3f}, '
f'NSP loss {metric[1] / metric[3]:.3f}')
print(f'{metric[2] / timer.sum():.1f} sentence pairs/sec on '
f'{str(devices)}')
def get_bert_encoding(net, tokens_a, tokens_b=None):
tokens, segments = d2l.get_tokens_and_segments(tokens_a, tokens_b)
token_ids = torch.tensor(vocab[tokens], device=devices[0]).unsqueeze(0)
segments = torch.tensor(segments, device=devices[0]).unsqueeze(0)
valid_len = torch.tensor(len(tokens), device=devices[0]).unsqueeze(0)
encoded_X, _, _ = net(token_ids, segments, valid_len)
return encoded_X
if __name__ == '__main__':
batch_size, max_len = 512, 64
train_iter, vocab = d2l.load_data_wiki(batch_size, max_len)
net = d2l.BERTModel(len(vocab),
num_hiddens=128,
norm_shape=[128],
ffn_num_input=128,
ffn_num_hiddens=256,
num_heads=2,
num_layers=2,
dropout=0.2,
key_size=128,
query_size=128,
value_size=128,
hid_in_features=128,
mlm_in_features=128,
nsp_in_features=128)
devices = d2l.try_all_gpus()
loss = nn.CrossEntropyLoss()
train_bert(train_iter, net, loss, len(vocab), devices, 50)
tokens_a = ['a', 'crane', 'is', 'flying']
encoded_text = get_bert_encoding(net, tokens_a)
# 词元:'<cls>','a','crane','is','flying','<sep>'
encoded_text_cls = encoded_text[:, 0, :]
encoded_text_crane = encoded_text[:, 2, :]
encoded_text.shape, encoded_text_cls.shape, encoded_text_crane[0][:3]
```
#### File: examples/d2l.ai_examples/rnn_model_highlevel.py
```python
import torch
from d2l import torch as d2l
from torch import nn
from torch.nn import functional as F
class RNNModel(nn.Module):
"""The RNN model."""
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.num_hiddens = self.rnn.hidden_size
# If the RNN is bidirectional (to be introduced later),
# `num_directions` should be 2, else it should be 1.
if not self.rnn.bidirectional:
self.num_directions = 1
self.linear = nn.Linear(self.num_hiddens, self.vocab_size)
else:
self.num_directions = 2
self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)
def forward(self, inputs, state):
X = F.one_hot(inputs.T.long(), self.vocab_size)
X = X.to(torch.float32)
Y, state = self.rnn(X, state)
# The fully connected layer will first change the shape of `Y` to
# (`num_steps` * `batch_size`, `num_hiddens`). Its output shape is
# (`num_steps` * `batch_size`, `vocab_size`).
output = self.linear(Y.reshape((-1, Y.shape[-1])))
return output, state
def begin_state(self, device, batch_size=1):
if not isinstance(self.rnn, nn.LSTM):
# `nn.GRU` takes a tensor as hidden state
return torch.zeros((self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens),
device=device)
else:
# `nn.LSTM` takes a tuple of hidden states
return (torch.zeros((self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens),
device=device),
torch.zeros((self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens),
device=device))
if __name__ == '__main__':
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
num_hiddens = 256
rnn_layer = nn.RNN(len(vocab), num_hiddens)
state = torch.zeros((1, batch_size, num_hiddens))
state.shape
X = torch.rand(size=(num_steps, batch_size, len(vocab)))
Y, state_new = rnn_layer(X, state)
print(Y.shape, state_new.shape)
device = d2l.try_gpu()
net = RNNModel(rnn_layer, vocab_size=len(vocab))
net = net.to(device)
res = d2l.predict_ch8('time traveller', 10, net, vocab, device)
print(res)
num_epochs, lr = 10, 1
d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
```
#### File: examples/d2l.ai_examples/sentiment_analysis_pre.py
```python
import os
import torch
from d2l import torch as d2l
def read_imdb(data_dir, is_train):
"""读取IMDb评论数据集文本序列和标签."""
data, labels = [], []
for label in ('pos', 'neg'):
folder_name = os.path.join(data_dir, 'train' if is_train else 'test',
label)
for file in os.listdir(folder_name):
with open(os.path.join(folder_name, file), 'rb') as f:
review = f.read().decode('utf-8').replace('\n', '')
data.append(review)
labels.append(1 if label == 'pos' else 0)
return data, labels
def load_data_imdb(batch_size, num_steps=500):
"""返回数据迭代器和IMDb评论数据集的词表."""
data_dir = d2l.download_extract('aclImdb', 'aclImdb')
train_data = read_imdb(data_dir, True)
test_data = read_imdb(data_dir, False)
train_tokens = d2l.tokenize(train_data[0], token='word')
test_tokens = d2l.tokenize(test_data[0], token='word')
vocab = d2l.Vocab(train_tokens, min_freq=5)
train_features = torch.tensor([
d2l.truncate_pad(vocab[line], num_steps, vocab['<pad>'])
for line in train_tokens
])
test_features = torch.tensor([
d2l.truncate_pad(vocab[line], num_steps, vocab['<pad>'])
for line in test_tokens
])
train_iter = d2l.load_array((train_features, torch.tensor(train_data[1])),
batch_size)
test_iter = d2l.load_array((test_features, torch.tensor(test_data[1])),
batch_size,
is_train=False)
return train_iter, test_iter, vocab
if __name__ == '__main__':
d2l.DATA_HUB['aclImdb'] = (
'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz',
'01ada507287d82875905620988597833ad4e0903')
data_dir = d2l.download_extract('aclImdb', 'aclImdb')
train_data = read_imdb(data_dir, is_train=True)
print('训练集数目:', len(train_data[0]))
for x, y in zip(train_data[0][:3], train_data[1][:3]):
print('标签:', y, 'review:', x)
train_tokens = d2l.tokenize(train_data[0], token='word')
vocab = d2l.Vocab(train_tokens, min_freq=5, reserved_tokens=['<pad>'])
num_steps = 500 # 序列长度
train_features = torch.tensor([
d2l.truncate_pad(vocab[line], num_steps, vocab['<pad>'])
for line in train_tokens
])
print(train_features.shape)
train_iter = d2l.load_array((train_features, torch.tensor(train_data[1])),
64)
for X, y in train_iter:
print('X:', X.shape, ', y:', y.shape)
break
print('小批量数目:', len(train_iter))
```
#### File: examples/d2l.ai_examples/word2vec_preprocess.py
```python
import math
import os
import random
import torch
from d2l import torch as d2l
d2l.DATA_HUB['ptb'] = (d2l.DATA_URL + 'ptb.zip',
'319d85e578af0cdc590547f26231e4e31cdf1e42')
def read_ptb():
"""将PTB数据集加载到文本行的列表中."""
data_dir = d2l.download_extract('ptb')
# Readthetrainingset.
with open(os.path.join(data_dir, 'ptb.train.txt')) as f:
raw_text = f.read()
return [line.split() for line in raw_text.split('\n')]
def subsample(sentences, vocab):
"""下采样高频词."""
# 排除未知词元'<unk>'
sentences = [[token for token in line if vocab[token] != vocab.unk]
for line in sentences]
counter = d2l.count_corpus(sentences)
print(counter['a'])
num_tokens = sum(counter.values())
print(num_tokens)
# 如果在下采样期间保留词元,则返回True
def keep(token):
return (random.uniform(0, 1) < math.sqrt(
1e-4 / counter[token] * num_tokens))
return ([[token for token in line if keep(token)]
for line in sentences], counter)
def compare_counts(token):
return (f'"{token}"的数量:'
f'之前={sum([l.count(token) for l in sentences])}, '
f'之后={sum([l.count(token) for l in subsampled])}')
def get_centers_and_contexts(corpus, max_window_size):
"""返回跳元模型中的中心词和上下文词."""
centers, contexts = [], []
for line in corpus:
# 要形成“中心词-上下文词”对,每个句子至少需要有2个词
if len(line) < 2:
continue
centers += line
for i in range(len(line)): # 上下文窗口中间i
# 这里增加了一个随机数, 可以取 max_window_size 范围内的任意词
window_size = random.randint(1, max_window_size)
indices = list(
range(max(0, i - window_size),
min(len(line), i + 1 + window_size)))
# 从上下文词中排除中心词
indices.remove(i)
contexts.append([line[idx] for idx in indices])
return centers, contexts
class RandomGenerator:
"""根据n个采样权重在{1,...,n}中随机抽取."""
def __init__(self, sampling_weights):
# Exclude
self.population = list(range(1, len(sampling_weights) + 1))
self.sampling_weights = sampling_weights
self.candidates = []
self.i = 0
def draw(self):
if self.i == len(self.candidates):
# 缓存k个随机采样结果
self.candidates = random.choices(self.population,
self.sampling_weights,
k=10000)
self.i = 0
self.i += 1
return self.candidates[self.i - 1]
def get_negatives(all_contexts, vocab, counter, K):
"""返回负采样中的噪声词."""
# 索引为1、2、...(索引0是词表中排除的未知标记)
sampling_weights = [
counter[vocab.to_tokens(i)]**0.75 for i in range(1, len(vocab))
]
all_negatives, generator = [], RandomGenerator(sampling_weights)
for contexts in all_contexts:
negatives = []
while len(negatives) < len(contexts) * K:
neg = generator.draw()
# 噪声词不能是上下文词
if neg not in contexts:
negatives.append(neg)
all_negatives.append(negatives)
return all_negatives
def batchify(data):
"""返回带有负采样的跳元模型的小批量样本."""
max_len = max(len(c) + len(n) for _, c, n in data)
centers, contexts_negatives, masks, labels = [], [], [], []
for center, context, negative in data:
cur_len = len(context) + len(negative)
centers += [center]
contexts_negatives += \
[context + negative + [0] * (max_len - cur_len)]
masks += [[1] * cur_len + [0] * (max_len - cur_len)]
labels += [[1] * len(context) + [0] * (max_len - len(context))]
return (torch.tensor(centers).reshape(
(-1, 1)), torch.tensor(contexts_negatives), torch.tensor(masks),
torch.tensor(labels))
class PTBDataset(torch.utils.data.Dataset):
def __init__(self, centers, contexts, negatives):
assert len(centers) == len(contexts) == len(negatives)
self.centers = centers
self.contexts = contexts
self.negatives = negatives
def __getitem__(self, index):
return (self.centers[index], self.contexts[index],
self.negatives[index])
def __len__(self):
return len(self.centers)
def load_data_ptb(batch_size, max_window_size, num_noise_words):
"""下载PTB数据集,然后将其加载到内存中."""
num_workers = d2l.get_dataloader_workers()
sentences = read_ptb()
vocab = d2l.Vocab(sentences, min_freq=10)
subsampled, counter = subsample(sentences, vocab)
corpus = [vocab[line] for line in subsampled]
all_centers, all_contexts = get_centers_and_contexts(
corpus, max_window_size)
all_negatives = get_negatives(all_contexts, vocab, counter,
num_noise_words)
dataset = PTBDataset(all_centers, all_contexts, all_negatives)
data_iter = torch.utils.data.DataLoader(dataset,
batch_size,
shuffle=True,
collate_fn=batchify,
num_workers=num_workers)
return data_iter, vocab
if __name__ == '__main__':
sentences = read_ptb()
print(f'# sentences数: {len(sentences)}')
vocab = d2l.Vocab(sentences, min_freq=2)
print(f'vocab size: {len(vocab)}')
subsampled, counter = subsample(sentences, vocab)
print(subsampled[1])
print(sentences[1])
print(compare_counts('like'))
corpus = [vocab[line] for line in subsampled]
tiny_dataset = [list(range(7)), list(range(7, 10))]
print('数据集', tiny_dataset)
for center, context in zip(*get_centers_and_contexts(tiny_dataset, 2)):
print('中心词', center, '的上下文词是', context)
all_centers, all_contexts = get_centers_and_contexts(corpus, 5)
f'# “中心词-上下文词对”的数量: {sum([len(contexts) for contexts in all_contexts])}'
generator = RandomGenerator([1, 4, 1])
print([generator.draw() for _ in range(12)])
x_1 = (1, [2, 2], [3, 3, 3, 3])
x_2 = (1, [2, 2, 2], [3, 3])
batch = batchify((x_1, x_2))
names = ['centers', 'contexts_negatives', 'masks', 'labels']
for name, data in zip(names, batch):
print(name, '=', data)
data_iter, vocab = load_data_ptb(512, 5, 5)
for batch in data_iter:
for name, data in zip(names, batch):
print(name, 'shape:', data.shape)
break
```
#### File: examples/sentence_polarity/cnn_sent_polarity.py
```python
import sys
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset
from tqdm.auto import tqdm
from nlptoolkit.data.utils.utils import load_sentence_polarity
sys.path.append('../../')
class CnnDataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def collate_fn(examples):
inputs = [torch.tensor(ex[0]) for ex in examples]
targets = torch.tensor([ex[1] for ex in examples], dtype=torch.long)
# 对batch内的样本进行padding,使其具有相同长度
inputs = pad_sequence(inputs, batch_first=True)
return inputs, targets
class CNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, filter_size, num_filter,
num_class):
super(CNN, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.conv1d = nn.Conv1d(embedding_dim,
num_filter,
filter_size,
padding=1)
self.activate = F.relu
self.linear = nn.Linear(num_filter, num_class)
def forward(self, inputs):
embedding = self.embedding(inputs)
convolution = self.activate(self.conv1d(embedding.permute(0, 2, 1)))
pooling = F.max_pool1d(convolution, kernel_size=convolution.shape[2])
outputs = self.linear(pooling.squeeze(dim=2))
log_probs = F.log_softmax(outputs, dim=1)
return log_probs
if __name__ == '__main__':
# 超参数设置
embedding_dim = 128
hidden_dim = 256
num_class = 2
batch_size = 32
num_epoch = 5
filter_size = 3
num_filter = 100
# 加载数据
train_data, test_data, vocab = load_sentence_polarity()
train_dataset = CnnDataset(train_data)
test_dataset = CnnDataset(test_data)
train_data_loader = DataLoader(train_dataset,
batch_size=batch_size,
collate_fn=collate_fn,
shuffle=True)
test_data_loader = DataLoader(test_dataset,
batch_size=1,
collate_fn=collate_fn,
shuffle=False)
# 加载模型
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = CNN(len(vocab), embedding_dim, filter_size, num_filter, num_class)
model.to(device) # 将模型加载到CPU或GPU设备
# 训练过程
nll_loss = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001) # 使用Adam优化器
model.train()
for epoch in range(num_epoch):
total_loss = 0
for batch in tqdm(train_data_loader, desc=f'Training Epoch {epoch}'):
inputs, targets = [x.to(device) for x in batch]
log_probs = model(inputs)
loss = nll_loss(log_probs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
print(f'Loss: {total_loss:.2f}')
# 测试过程
acc = 0
for batch in tqdm(test_data_loader, desc='Testing'):
inputs, targets = [x.to(device) for x in batch]
with torch.no_grad():
output = model(inputs)
acc += (output.argmax(dim=1) == targets).sum().item()
# 输出在测试集上的准确率
print(f'Acc: {acc / len(test_data_loader):.2f}')
```
#### File: transformer_model/fintune/fintune_with_trainer.py
```python
import torch
from datasets import load_dataset
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from transformers import (BertForSequenceClassification, BertTokenizerFast,
Trainer, TrainingArguments)
def tokenize_function(examples):
tokenized = tokenizer(examples['text'],
padding='max_length',
truncation=True,
max_length=256)
return tokenized
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(labels,
preds,
average='macro')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall
}
if __name__ == '__main__':
# 加载数据集
root_dir = 'data/aclImdb/'
imdb_dataset = load_dataset('imdb')
print(imdb_dataset)
print('Length of training set: ', len(imdb_dataset))
print('First example from the dataset: \n')
print(imdb_dataset['train'][:2])
tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')
imdb_dataset = imdb_dataset.map(
lambda examples: {'labels': examples['label']}, batched=True)
tokenized_datasets = imdb_dataset.map(tokenize_function, batched=True)
tokenized_datasets.set_format(
type='torch',
columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
train_dataset = tokenized_datasets['train'].shuffle(seed=42).select(
range(100))
eval_dataset = tokenized_datasets['test'].shuffle(seed=42).select(
range(100))
dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=32)
next(iter(dataloader))
print(train_dataset.features)
model = BertForSequenceClassification.from_pretrained('bert-base-cased')
training_args = TrainingArguments(
output_dir='./results', # output directory
learning_rate=3e-4,
num_train_epochs=3, # total number of training epochs
per_device_train_batch_size=2, # batch size per device during training
per_device_eval_batch_size=2, # batch size for evaluation
logging_dir='./logs', # directory for storing logs
logging_steps=1,
do_train=True,
do_eval=True,
eval_steps=100,
evaluation_strategy='epoch')
trainer = Trainer(
model=model, # the instantiated ? Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics)
train_out = trainer.train()
```
#### File: data/embeddings/pos_encding.py
```python
import math
import torch
import torch.nn as nn
from torch import Tensor
class PositionalEncoding(nn.Module):
def __init__(self,
emb_size: int = 512,
dropout: float = 0.1,
max_len: int = 5000):
super().__init__()
if emb_size % 2 != 0:
raise ValueError(
'Cannot use sin/cos postional encoding with odd dim (got dim ={:d}'
.format(emb_size))
self.emb_size = emb_size
self.dropout = nn.Dropout(p=dropout)
# torch.Size([max_len, 1])
position = torch.arange(max_len).unsqueeze(1)
# torch.Size([emb_size//2])
div_term = torch.exp(
torch.arange(0, emb_size, 2) * (-math.log(10000.0) / emb_size))
# torch.Size([max_len, emb_size])
pos_embedding = torch.zeros(max_len, emb_size)
# 偶数位置编码
pos_embedding[:, 0::2] = torch.sin(position * div_term)
# 奇数位置编码
pos_embedding[:, 1::2] = torch.cos(position * div_term)
# [max_len, emb_size] ===> [max_len, 1, emb_size]
pos_embedding = pos_embedding.unsqueeze(-2)
# 不对位置编码求梯度
self.register_buffer('pos_embedding', pos_embedding)
def forward(self, token_embedding: Tensor) -> Tensor:
"""
Args:
token_embedding: Tensor, shape [seq_len, batch_size, embedding_dim]
"""
# 输入的词向量与位置编码相加
pos_embed = token_embedding + self.pos_embedding[:token_embedding.
size(0), :]
return self.dropout(pos_embed)
class PositionalEncodingD2L(nn.Module):
"""位置编码."""
def __init__(self, num_hiddens, dropout, max_len=1000):
super(PositionalEncodingD2L, self).__init__()
self.dropout = nn.Dropout(dropout)
# 创建一个足够长的P
self.P = torch.zeros((1, max_len, num_hiddens))
position = torch.arange(max_len, dtype=torch.float32).reshape(-1, 1)
div_term = torch.pow(
10000,
torch.arange(0, num_hiddens, 2, dtype=torch.float32) / num_hiddens)
X = position / div_term
self.P[:, :, 0::2] = torch.sin(X)
self.P[:, :, 1::2] = torch.cos(X)
def forward(self, X):
X = X + self.P[:, :X.shape[1], :].to(X.device)
return self.dropout(X)
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 10))
vocab_size = 1000
batch_size = 32
seq_len = 512
d_model = 128
drop_out = 0
max_len = 5000
pe = PositionalEncoding(emb_size=d_model, dropout=drop_out)
x = torch.from_numpy(
np.random.randint(1, vocab_size, size=(batch_size, seq_len, d_model)))
print(x.shape)
x = x.transpose(0, 1)
print(x.shape)
y = pe.forward(x)
print(y.shape)
```
#### File: nlptoolkit/layers/layers.py
```python
import torch
import torch.nn as nn
from torch.nn import functional as F
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, num_class):
super(MLP, self).__init__()
# 线性变换:输入层->隐含层
self.linear1 = nn.Linear(input_dim, hidden_dim)
# 使用ReLU激活函数
self.activate = F.relu
# 线性变换:隐含层->输出层
self.linear2 = nn.Linear(hidden_dim, num_class)
def forward(self, inputs):
hidden = self.linear1(inputs)
activation = self.activate(hidden)
outputs = self.linear2(activation)
probs = F.softmax(outputs, dim=1) # 获得每个输入属于某一类别的概率
return probs
class MLPEmbedding(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, num_class):
super(MLP, self).__init__()
# 词嵌入层
self.embedding = nn.Embedding(vocab_size, embedding_dim)
# 线性变换:词嵌入层->隐含层
self.linear1 = nn.Linear(embedding_dim, hidden_dim)
# 使用ReLU激活函数
self.activate = F.relu
# 线性变换:激活层->输出层
self.linear2 = nn.Linear(hidden_dim, num_class)
def forward(self, inputs):
embeddings = self.embedding(inputs)
# 将序列中多个embedding进行聚合(此处是求平均值)
embedding = embeddings.mean(dim=1)
hidden = self.activate(self.linear1(embedding))
outputs = self.linear2(hidden)
# 获得每个序列属于某一类别概率的对数值
probs = F.log_softmax(outputs, dim=1)
return probs
class PositionWiseFFN(nn.Module):
"""基于位置的前馈网络."""
def __init__(self,
ffn_num_input,
ffn_num_hiddens,
ffn_num_outputs,
dropout=0.1,
**kwargs):
super(PositionWiseFFN, self).__init__(**kwargs)
self.dense1 = nn.Linear(ffn_num_input, ffn_num_hiddens)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.dense2 = nn.Linear(ffn_num_hiddens, ffn_num_outputs)
def forward(self, X):
# x = [batch size, seq len, ffn_num_input]
X = self.relu(self.dense1(X))
# x = [batch size, seq len, ffn_num_hiddens]
X = self.dropout(X)
X = self.dense2(X)
# x = [batch size, seq len, hid dim]
return X
class FeedForwardNetwork(nn.Module):
"""Implements FFN equation."""
def __init__(self, d_model, d_ffn, dropout=0.1):
super().__init__()
self.dense1 = nn.Linear(d_model, d_ffn)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout)
self.dense2 = nn.Linear(d_ffn, d_model)
def forward(self, x):
x = self.relu(self.dense1(x))
x = self.dropout(x)
x = self.dense2(x)
return x
if __name__ == '__main__':
mlp = MLP(input_dim=4, hidden_dim=5, num_class=2)
inputs = torch.rand(3, 4) # 输入形状为(3, 4)的张量,其中3表示有3个输入,4表示每个输入的维度
probs = mlp(inputs) # 自动调用forward函数
print(probs) # 输出3个输入对应输出的概率
ffn = PositionWiseFFN(4, 4, 8)
print(ffn(torch.ones((2, 3, 4))).shape)
```
#### File: models/lm/glove.py
```python
import os
import d2l.torch as d2l
import torch
import torch.nn as nn
d2l.DATA_HUB['glove.6b.50d'] = (d2l.DATA_URL + 'glove.6B.50d.zip',
'0b8703943ccdb6eb788e6f091b8946e82231bc4d')
d2l.DATA_HUB['glove.6b.100d'] = (d2l.DATA_URL + 'glove.6B.100d.zip',
'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a')
d2l.DATA_HUB['glove.42b.300d'] = (d2l.DATA_URL + 'glove.42B.300d.zip',
'b5116e234e9eb9076672cfeabf5469f3eec904fa')
class GloveModel(nn.Module):
def __init__(self, vocab_size, embedding_dim):
super(GloveModel, self).__init__()
# 词嵌入及偏置向量
self.w_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.w_biases = nn.Embedding(vocab_size, 1)
# 上下文嵌入及偏置向量
self.c_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.c_biases = nn.Embedding(vocab_size, 1)
def forward_w(self, words):
w_embeds = self.w_embeddings(words)
w_biases = self.w_biases(words)
return w_embeds, w_biases
def forward_c(self, contexts):
c_embeds = self.c_embeddings(contexts)
c_biases = self.c_biases(contexts)
return c_embeds, c_biases
def forward(self, words, contexts):
w_embeds = self.w_embeddings(words)
w_biases = self.w_biases(words)
c_embeds = self.c_embeddings(contexts)
c_biases = self.c_biases(contexts)
return w_embeds, w_biases, c_embeds, c_biases
def init_weights(self):
for param in self.parameters():
torch.nn.init.uniform_(param, a=-0.1, b=0.1)
class GloveTokenEmbedding(object):
"""Token Embedding."""
def __init__(self, embedding_name):
"""Defined in :numref:`sec_synonyms`"""
self.idx_to_token, self.idx_to_vec = self._load_embedding(
embedding_name)
self.unknown_idx = 0
self.token_to_idx = {
token: idx
for idx, token in enumerate(self.idx_to_token)
}
def _load_embedding(self, embedding_name):
idx_to_token, idx_to_vec = ['<unk>'], []
data_dir = d2l.download_extract(embedding_name)
# GloVe website: https://nlp.stanford.edu/projects/glove/
# fastText website: https://fasttext.cc/
with open(os.path.join(data_dir, 'vec.txt'), 'r') as f:
for line in f:
elems = line.rstrip().split(' ')
token, elems = elems[0], [float(elem) for elem in elems[1:]]
# Skip header information, such as the top row in fastText
if len(elems) > 1:
idx_to_token.append(token)
idx_to_vec.append(elems)
idx_to_vec = [[0] * len(idx_to_vec[0])] + idx_to_vec
return idx_to_token, d2l.tensor(idx_to_vec)
def __getitem__(self, tokens):
indices = [
self.token_to_idx.get(token, self.unknown_idx) for token in tokens
]
vecs = self.idx_to_vec[d2l.tensor(indices)]
return vecs
def __len__(self):
return len(self.idx_to_token)
```
#### File: models/lm/word_similarity.py
```python
import torch
from nlptoolkit.data.utils.utils import load_pretrained
def knn(W, x, k):
similarities = torch.matmul(x, W.transpose(
1, 0)) / (torch.norm(W, dim=1) * torch.norm(x) + 1e-9)
knn = similarities.topk(k=k)
return knn.values.tolist(), knn.indices.tolist()
def find_similar_words(embeds, vocab, query, k=5):
knn_values, knn_indices = knn(embeds, embeds[vocab[query]], k + 1)
knn_words = vocab.to_tokens(knn_indices)
print(f'>>> Query word: {query}')
for i in range(k):
print(f'cosine similarity={knn_values[i + 1]:.4f}: {knn_words[i + 1]}')
def find_analogy(embeds, vocab, word_a, word_b, word_c):
vecs = embeds[vocab.to_ids([word_a, word_b, word_c])]
x = vecs[2] + vecs[1] - vecs[0]
knn_values, knn_indices = knn(embeds, x, k=1)
analogies = vocab.to_tokens(knn_indices)
print(f'>>> Query: {word_a}, {word_b}, {word_c}')
print(f'{analogies}')
if __name__ == '__main__':
word_sim_queries = ['china', 'august', 'good', 'paris']
vocab, embeds = load_pretrained('glove.vec')
for w in word_sim_queries:
find_similar_words(embeds, vocab, w)
word_analogy_queries = [['brother', 'sister', 'man'],
['paris', 'france', 'berlin']]
vocab, embeds = load_pretrained('glove.vec')
for w_a, w_b, w_c in word_analogy_queries:
find_analogy(embeds, vocab, w_a, w_b, w_c)
```
#### File: models/seq2seq/rnn_attn.py
```python
import random
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class RNNEncoder(nn.Module):
def __init__(self, vocab_size: int, embeb_dim: int, enc_hidden_size: int,
num_layers: int, dec_hidden_size: int, dropout: float):
super().__init__()
self.vocab_size = vocab_size
self.embeb_dim = embeb_dim
self.enc_hidden_size = enc_hidden_size
self.dec_hidden_size = dec_hidden_size
self.dropout = dropout
self.embedding = nn.Embedding(vocab_size, embeb_dim)
self.rnn = nn.GRU(input_size=embeb_dim,
hidden_size=enc_hidden_size,
num_layers=num_layers,
bidirectional=True,
dropout=dropout if num_layers > 1 else 0)
self.fc = nn.Linear(enc_hidden_size * 2, dec_hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, src):
# src = [src len, batch size]
embedded = self.dropout(self.embedding(src))
# embedded = [src len, batch size, emb dim]
outputs, hidden = self.rnn(embedded)
# outputs = [src len, batch size, hid dim * num directions]
# hidden = [n layers * num directions, batch size, hid dim]
# hidden is stacked [forward_1, backward_1, forward_2, backward_2, ...]
# outputs are always from the last layer
# hidden [-2, :, : ] is the last of the forwards RNN
# hidden [-1, :, : ] is the last of the backwards RNN
# initial decoder hidden is final hidden state of the forwards and backwards
# encoder RNNs fed through a linear layer
hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)
hidden = self.fc(hidden)
hidden = torch.tanh(hidden)
# outputs = [src len, batch size, enc hid dim * 2]
# hidden = [batch size, dec hid dim]
return outputs, hidden
class Attention(nn.Module):
def __init__(self, enc_hidden_size: int, dec_hidden_size: int,
attn_dim: int):
super().__init__()
self.enc_hid_dim = enc_hidden_size
self.dec_hid_dim = dec_hidden_size
self.attn = nn.Linear((enc_hidden_size * 2) + dec_hidden_size,
attn_dim)
self.fc = nn.Linear(attn_dim, 1, bias=False)
def forward(self, hidden, encoder_outputs):
# hidden = [batch size, dec hid dim]
# encoder_outputs = [src len, batch size, enc hid dim * 2]
src_len = encoder_outputs.shape[0]
# repeat decoder hidden state src_len times
hidden = hidden.unsqueeze(1).repeat(1, src_len, 1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
# hidden = [batch size, src len, dec hid dim]
# encoder_outputs = [batch size, src len, enc hid dim * 2]
energy = torch.cat((hidden, encoder_outputs), dim=2)
energy = self.attn(energy)
energy = torch.tanh(energy)
# energy = [batch size, src len, dec hid dim]
attention = self.fc(energy).squeeze(2)
# attention = torch.sum(energy, dim=2)
# attention= [batch size, src len]
return F.softmax(attention, dim=1)
class RNNDecoder(nn.Module):
def __init__(self, vocab_size: int, embed_dim: int, enc_hidden_size: int,
dec_hidden_size: int, attn_dim: int, dropout: int):
super().__init__()
self.embed_dim = embed_dim
self.enc_hidden_dim = enc_hidden_size
self.dec_hidden_dim = dec_hidden_size
self.vocab_size = vocab_size
self.dropout = dropout
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.rnn = nn.GRU((enc_hidden_size * 2) + embed_dim, dec_hidden_size)
self.attention = Attention(enc_hidden_size=enc_hidden_size,
dec_hidden_size=dec_hidden_size,
attn_dim=attn_dim)
self.fc_out = nn.Linear(
(enc_hidden_size * 2) + dec_hidden_size + embed_dim, vocab_size)
self.dropout = nn.Dropout(dropout)
def _weighted_attn(self, decoder_hidden: Tensor,
encoder_outputs: Tensor) -> Tensor:
# attn: batch_size * seq_len
attn = self.attention(decoder_hidden, encoder_outputs)
attn = attn.unsqueeze(1)
# attn = [batch size, 1, src len]
# encoder_outputs = [batch size, src len, enc hid dim * 2]
encoder_outputs = encoder_outputs.permute(1, 0, 2)
# weighted = [batch size, 1, enc hid dim * 2]
weighted = torch.bmm(attn, encoder_outputs)
# weighted = [1, batch size, enc hid dim * 2]
weighted = weighted.permute(1, 0, 2)
return weighted
def forward(self, input: Tensor, decoder_hidden: Tensor,
encoder_outputs: Tensor) -> Tuple[Tensor]:
# input = [batch size]
# hidden = [batch size, dec hid dim]
# encoder_outputs = [src len, batch size, enc hid dim * 2]
input = input.unsqueeze(0)
# input = [1, batch size]
embedded = self.dropout(self.embedding(input))
# embedded = [1, batch size, emb dim]
# weighted = [1, batch size, enc_hid_dim * 2]
weighted = self._weighted_attn(decoder_hidden, encoder_outputs)
# rnn_input = [1, batch size, (enc_hid_dim * 2) + emb dim]
rnn_input = torch.cat((embedded, weighted), dim=2)
output, decoder_hidden = self.rnn(rnn_input,
decoder_hidden.unsqueeze(0))
# output = [seq len, batch size, dec hid dim * n directions]
# hidden = [n layers * n directions, batch size, dec hid dim]
# seq len, n layers and n directions will always be 1 in this decoder, therefore:
# output = [1, batch size, dec hid dim]
# hidden = [1, batch size, dec hid dim]
# this also means that output == hidden
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted = weighted.squeeze(0)
output = self.fc_out(torch.cat((output, weighted, embedded), dim=1))
return output, decoder_hidden.squeeze(0)
class RNNSeq2SeqAttnModel(nn.Module):
def __init__(self,
src_vocab_size,
trg_vocab_size,
embed_dim,
enc_hidden_size,
dec_hidden_size,
attm_dim,
num_layers,
dropout=0.,
device='cpu'):
super().__init__()
self.src_vocab_size = src_vocab_size
self.trg_vocab_size = trg_vocab_size
self.enc_hidden_size = enc_hidden_size
self.dec_hidden_size = dec_hidden_size
self.num_layers = num_layers
self.device = device
self.init_weights()
self.encoder = RNNEncoder(src_vocab_size, embed_dim, enc_hidden_size,
num_layers, enc_hidden_size, dropout)
self.decoder = RNNDecoder(trg_vocab_size, embed_dim, enc_hidden_size,
dec_hidden_size, attm_dim, dropout)
def forward(self,
src: Tensor,
trg: Tensor,
teacher_forcing_ratio: float = 0.5) -> Tensor:
# src = [src len, batch size]
# trg = [trg len, batch size]
# teacher_forcing_ratio is probability to use teacher forcing
# e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time
batch_size = src.shape[1]
max_len = trg.shape[0]
trg_vocab_size = self.trg_vocab_size
# tensor to store decoder outputs
outputs = torch.zeros(max_len, batch_size,
trg_vocab_size).to(self.device)
# encoder_outputs is all hidden states of the input sequence, back and forwards
# hidden is the final forward and backward hidden states, passed through a linear layer
encoder_outputs, hidden = self.encoder(src)
# first input to the decoder is the <sos> token
input = trg[0, :]
for t in range(1, max_len):
# insert input token embedding, previous hidden state and all encoder hidden states
# receive output tensor (predictions) and new hidden state
output, hidden = self.decoder(input, hidden, encoder_outputs)
# place predictions in a tensor holding predictions for each token
outputs[t] = output
# decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
# get the highest predicted token from our predictions
top1 = output.max(1)[1]
# if teacher forcing, use actual next token as next input
# if not, use predicted token
input = (trg[t] if teacher_force else top1)
return outputs
def init_weights(self):
for name, param in self.named_parameters():
if 'weight' in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
```
#### File: models/seq2vec/cbow.py
```python
import torch
import torch.nn as nn
__all__ = ['BoWEncoder']
class BoWEncoder(nn.Module):
r"""
A `BoWEncoder` takes as input a sequence of vectors and returns a
single vector, which simply sums the embeddings of a sequence across the time dimension.
The input to this encoder is of shape `(batch_size, num_tokens, emb_dim)`,
and the output is of shape `(batch_size, emb_dim)`.
Args:
emb_dim(int):
The dimension of each vector in the input sequence.
"""
def __init__(self, emb_dim):
super().__init__()
self.emb_dim = emb_dim
def get_input_dim(self):
r"""
Returns the dimension of the vector input for each element in the sequence input
to a `BoWEncoder`. This is not the shape of the input tensor, but the
last element of that shape.
"""
return self.emb_dim
def get_output_dim(self):
r"""
Returns the dimension of the final vector output by this `BoWEncoder`. This is not
the shape of the returned tensor, but the last element of that shape.
"""
return self.emb_dim
def forward(self, inputs, mask=None):
r"""
It simply sums the embeddings of a sequence across the time dimension.
Args:
inputs (Tensor):
Shape as `(batch_size, num_tokens, emb_dim)` and dtype as `float32` or `float64`.
The sequence length of the input sequence.
mask (Tensor, optional):
Shape same as `inputs`.
Its each elements identify whether the corresponding input token is padding or not.
If True, not padding token. If False, padding token.
Defaults to `None`.
Returns:
Tensor:
Returns tensor `summed`, the result vector of BagOfEmbedding.
Its data type is same as `inputs` and its shape is `[batch_size, emb_dim]`.
"""
if mask is not None:
inputs = inputs * mask
# Shape: (batch_size, embedding_dim)
summed = inputs.sum(dim=1)
return summed
class BoWModel(nn.Module):
"""This class implements the Bag of Words Classification Network model to
classify texts.
At a high level, the model starts by embedding the tokens and running them through a word embedding. Then, we encode these representations with a
`BoWEncoder`. Lastly, we take the output of the encoder to create a final representation, which is passed through some feed-forward layers to output a
logits (`output_layer`).
"""
def __init__(self,
vocab_size,
num_classes,
emb_dim=128,
padding_idx=0,
hidden_size=128):
super().__init__()
self.embedder = nn.Embedding(vocab_size,
emb_dim,
padding_idx=padding_idx)
self.bow_encoder = BoWEncoder(emb_dim)
self.fc1 = nn.Linear(self.bow_encoder.get_output_dim(), hidden_size)
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, inputs, seq_len=None):
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(inputs)
# Shape: (batch_size, embedding_dim)
summed = self.bow_encoder(embedded_text)
encoded_text = torch.tanh(summed)
# Shape: (batch_size, hidden_size)
output = torch.tanh(self.fc1(encoded_text))
# Shape: (batch_size, fc_hidden_size)
output = torch.tanh(self.fc2(output))
return output
```
#### File: models/seq2vec/tcn.py
```python
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
class Chomp1d(nn.Module):
"""Remove the elements on the right.
Args:
chomp_size (int): The number of elements removed.
"""
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size]
class TemporalBlock(nn.Module):
"""The TCN block, consists of dilated causal conv, relu and residual block.
See the Figure 1(b) in https://arxiv.org/pdf/1803.01271.pdf for more
details.
Args:
n_inputs ([int]): The number of channels in the input tensor.
n_outputs ([int]): The number of filters.
kernel_size ([int]): The filter size.
stride ([int]): The stride size.
dilation ([int]): The dilation size.
padding ([int]): The size of zeros to be padded.
dropout (float, optional): Probability of dropout the units. Defaults to 0.2.
"""
def __init__(self,
n_inputs,
n_outputs,
kernel_size,
stride,
dilation,
padding,
dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(
nn.Conv1d(n_inputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation))
# Chomp1d is used to make sure the network is causal.
# We pad by (k-1)*d on the two sides of the input for convolution,
# and then use Chomp1d to remove the (k-1)*d output elements on the right.
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(
nn.Conv1d(n_outputs,
n_outputs,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1,
self.dropout1, self.conv2, self.chomp2,
self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs,
1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TCNEncoder(nn.Module):
r"""
A `TCNEncoder` takes as input a sequence of vectors and returns a
single vector, which is the last one time step in the feature map.
The input to this encoder is of shape `(batch_size, num_tokens, input_size)`,
and the output is of shape `(batch_size, num_channels[-1])` with a receptive
filed:
.. math::
receptive filed = 2 * \sum_{i=0}^{len(num\_channels)-1}2^i(kernel\_size-1).
Temporal Convolutional Networks is a simple convolutional architecture. It outperforms canonical recurrent networks
such as LSTMs in many tasks. See https://arxiv.org/pdf/1803.01271.pdf for more details.
Args:
input_size (int): The number of expected features in the input (the last dimension).
num_channels (list): The number of channels in different layer.
kernel_size (int): The kernel size. Defaults to 2.
dropout (float): The dropout probability. Defaults to 0.2.
"""
def __init__(self, input_size, num_channels, kernel_size=2, dropout=0.2):
super(TCNEncoder, self).__init__()
self.input_size = input_size
self.output_dim = num_channels[-1]
layers = nn.ModuleList()
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2**i
in_channels = input_size if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
layers.append(
TemporalBlock(in_channels,
out_channels,
kernel_size,
stride=1,
dilation=dilation_size,
padding=(kernel_size - 1) * dilation_size,
dropout=dropout))
self.network = nn.Sequential(*layers)
def get_input_dim(self):
"""Returns the dimension of the vector input for each element in the
sequence input to a `TCNEncoder`.
This is not the shape of the input tensor, but the last element of that shape.
"""
return self.input_size
def get_output_dim(self):
"""Returns the dimension of the final vector output by this
`TCNEncoder`.
This is not the shape of the returned tensor, but the last element of that shape.
"""
return self.output_dim
def forward(self, inputs):
r"""
TCNEncoder takes as input a sequence of vectors and returns a
single vector, which is the last one time step in the feature map.
The input to this encoder is of shape `(batch_size, num_tokens, input_size)`,
and the output is of shape `(batch_size, num_channels[-1])` with a receptive
filed:
.. math::
receptive filed = 2 * \sum_{i=0}^{len(num\_channels)-1}2^i(kernel\_size-1).
Args:
inputs (Tensor): The input tensor with shape `[batch_size, num_tokens, input_size]`.
Returns:
Tensor: Returns tensor `output` with shape `[batch_size, num_channels[-1]]`.
"""
inputs_t = inputs.transpose([0, 2, 1])
output = self.network(inputs_t).transpose([2, 0, 1])[-1]
return output
```
#### File: transformers/bert/bert_model.py
```python
import torch
import torch.nn as nn
from nlptoolkit.transformers.common import EncoderBlock
class Embedding(nn.Module):
def __init__(self, vocab_size, num_hiddens, n_segments, max_len=1000):
super().__init__()
self.token_embedding = nn.Embedding(vocab_size, num_hiddens)
self.segment_embedding = nn.Embedding(n_segments, num_hiddens)
self.pos_embedding = nn.Embedding(max_len, num_hiddens)
self.norm = nn.LayerNorm(num_hiddens)
def forward(self, x, seg):
seq_len = x.size(1)
pos = torch.arange(seq_len, dtype=torch.long)
pos = pos.unsqueeze(0).expand_as(
x) # (seq_len, ) --> (batch_size, seq_len)
embedding = self.token_embedding(x) + self.pos_embedding(
pos) + self.segment_embedding(seg)
return self.norm(embedding)
class BERTEncoder(nn.Module):
"""BERT encoder.
- 与原始 TransformerEncoder不同,BERTEncoder使用片段嵌入和可学习的位置嵌入。
Defined in :numref:`subsec_bert_input_rep`
"""
def __init__(self,
vocab_size,
num_hiddens,
norm_shape,
ffn_num_input,
ffn_num_hiddens,
num_heads,
num_layers,
dropout,
max_len=1000,
key_size=768,
query_size=768,
value_size=768,
**kwargs):
super(BERTEncoder, self).__init__(**kwargs)
self.token_embedding = nn.Embedding(vocab_size, num_hiddens)
self.segment_embedding = nn.Embedding(2, num_hiddens)
self.blks = nn.Sequential()
for i in range(num_layers):
self.blks.add_module(
f'{i}',
EncoderBlock(key_size=key_size,
query_size=query_size,
value_size=value_size,
num_hiddens=num_hiddens,
norm_shape=norm_shape,
ffn_num_input=ffn_num_input,
ffn_num_hiddens=ffn_num_hiddens,
num_heads=num_heads,
dropout=dropout,
use_bias=True))
# In BERT, positional embeddings are learnable, thus we create a
# parameter of positional embeddings that are long enough
self.pos_embedding = nn.Parameter(torch.randn(1, max_len, num_hiddens))
def forward(self, tokens, segments, valid_lens):
# Shape of `X` remains unchanged in the following code snippet:
# (batch size, max sequence length, `num_hiddens`)
X = self.token_embedding(tokens) + self.segment_embedding(segments)
X = X + self.pos_embedding.data[:, :X.shape[1], :]
for blk in self.blks:
X = blk(X, valid_lens)
return X
class MaskLM(nn.Module):
"""The masked language model task of BERT.
- 80%时间为特殊的“<mask>“词元(例如,“this movie is great”变为“this movie is<mask>”;
- 10%时间为随机词元(例如,“this movie is great”变为“this movie is drink”);
- 10%时间内为不变的标签词元(例如,“this movie is great”变为“this movie is great”)
实现了MaskLM类来预测BERT预训练的掩蔽语言模型任务中的掩蔽标记。
- 预测使用单隐藏层的多层感知机(self.mlp)。在前向推断中,它需要两个输入:
- BERTEncoder的编码结果和用于预测的词元位置。
- 输出是这些位置的预测结果。
Defined in :numref:`subsec_bert_input_rep`
"""
def __init__(self, vocab_size, num_hiddens, num_inputs=768, **kwargs):
super(MaskLM, self).__init__(**kwargs)
self.mlp = nn.Sequential(nn.Linear(num_inputs, num_hiddens), nn.ReLU(),
nn.LayerNorm(num_hiddens),
nn.Linear(num_hiddens, vocab_size))
def forward(self, X, pred_positions):
num_pred_positions = pred_positions.shape[1]
pred_positions = pred_positions.reshape(-1)
batch_size = X.shape[0]
batch_idx = torch.arange(0, batch_size)
# Suppose that `batch_size` = 2, `num_pred_positions` = 3, then
# `batch_idx` is `torch.tensor([0, 0, 0, 1, 1, 1])`
batch_idx = torch.repeat_interleave(batch_idx, num_pred_positions)
masked_X = X[batch_idx, pred_positions]
masked_X = masked_X.reshape((batch_size, num_pred_positions, -1))
mlm_Y_hat = self.mlp(masked_X)
return mlm_Y_hat
class NextSentencePred(nn.Module):
"""The next sentence prediction task of BERT.
- 为了帮助理解两个文本序列之间的关系,BERT在预训练中考虑了一个二元分类任务——下一句预测。
- 在为预训练生成句子对时,有一半的时间它们确实是标签为“真”的连续句子;
- 在另一半的时间里,第二个句子是从语料库中随机抽取的,标记为“假”。
- NextSentencePred类使用单隐藏层的多层感知机来预测第二个句子是否是BERT输入序列中第一个句子的下一个句子。
- 由于Transformer编码器中的自注意力,特殊词元“<cls>”的BERT表示已经对输入的两个句子进行了编码。过程如下:
- step1: 带 “<cls>”标记的词元 X ;
- step2: encoded_X = BertEncoder(X) 编码后的词元
- step3: output = MLP(encoded_X[:, 0, :]) BertModel 的 Head, 0 是“<cls>”标记的索引
- step4: output = NextSentencePred(output) 单隐藏层的 MLP 预测下一个句子.
Defined in :numref:`subsec_mlm`
"""
def __init__(self, num_inputs, **kwargs):
super(NextSentencePred, self).__init__(**kwargs)
self.output = nn.Linear(num_inputs, 2)
def forward(self, X):
# `X` shape: (batch size, `num_hiddens`)
return self.output(X)
class BERTModel(nn.Module):
"""The BERT model.
- 定义BERTModel类, 实例化三个类:
- BERTEncoder
- MaskLM
- NextSentencePred
- 前向推断返回编码后的BERT表示encoded_X、掩蔽语言模型预测mlm_Y_hat和下一句预测nsp_Y_hat。
Defined in :numref:`subsec_nsp`
"""
def __init__(self,
vocab_size,
num_hiddens,
norm_shape,
ffn_num_input,
ffn_num_hiddens,
num_heads,
num_layers,
dropout,
max_len=1000,
key_size=768,
query_size=768,
value_size=768,
hid_in_features=768,
mlm_in_features=768,
nsp_in_features=768):
super(BERTModel, self).__init__()
self.encoder = BERTEncoder(vocab_size,
num_hiddens,
norm_shape,
ffn_num_input,
ffn_num_hiddens,
num_heads,
num_layers,
dropout,
max_len=max_len,
key_size=key_size,
query_size=query_size,
value_size=value_size)
self.hidden = nn.Sequential(nn.Linear(hid_in_features, num_hiddens),
nn.Tanh())
self.mlm = MaskLM(vocab_size, num_hiddens, mlm_in_features)
self.nsp = NextSentencePred(nsp_in_features)
def forward(self, tokens, segments, valid_lens=None, pred_positions=None):
encoded_X = self.encoder(tokens, segments, valid_lens)
if pred_positions is not None:
mlm_Y_hat = self.mlm(encoded_X, pred_positions)
else:
mlm_Y_hat = None
# The hidden layer of the MLP classifier for next sentence prediction.
# 0 is the index of the '<cls>' token
nsp_Y_hat = self.nsp(self.hidden(encoded_X[:, 0, :]))
return encoded_X, mlm_Y_hat, nsp_Y_hat
if __name__ == '__main__':
vocab_size, num_hiddens, ffn_num_hiddens, num_heads = 10000, 768, 1024, 4
norm_shape, ffn_num_input, num_layers, dropout = [768], 768, 2, 0.2
encoder = BERTEncoder(vocab_size, num_hiddens, norm_shape, ffn_num_input,
ffn_num_hiddens, num_heads, num_layers, dropout)
print(encoder)
tokens = torch.randint(0, vocab_size, (2, 8))
segments = torch.tensor([[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1]])
encoded_X = encoder(tokens, segments, None)
print(encoded_X.shape)
mlm = MaskLM(vocab_size, num_hiddens)
mlm_positions = torch.tensor([[1, 5, 2], [6, 1, 5]])
mlm_Y_hat = mlm(encoded_X, mlm_positions)
print(mlm_Y_hat.shape)
mlm_Y = torch.tensor([[7, 8, 9], [10, 20, 30]])
loss = nn.CrossEntropyLoss(reduction='none')
mlm_l = loss(mlm_Y_hat.reshape((-1, vocab_size)), mlm_Y.reshape(-1))
print(mlm_l.shape)
```
#### File: nlptoolkit/transformers/common.py
```python
import math
import d2l.torch as d2l
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn import Transformer
from nlptoolkit.data.embeddings.pos_encding import (PositionalEncoding,
TokenEmbedding)
from nlptoolkit.layers.layers import PositionWiseFFN
from nlptoolkit.transformers.seq2seq import Encoder
class AddNorm(nn.Module):
"""残差连接后进行层规范化."""
def __init__(self, normalized_shape, dropout, **kwargs):
super(AddNorm, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
self.ln = nn.LayerNorm(normalized_shape)
def forward(self, X, Y):
return self.ln(self.dropout(Y) + X)
class EncoderBlock(nn.Module):
"""transformer编码器块."""
def __init__(self,
key_size,
query_size,
value_size,
num_hiddens,
norm_shape,
ffn_num_input,
ffn_num_hiddens,
num_heads,
dropout=0.1,
use_bias=False,
**kwargs):
super(EncoderBlock, self).__init__(**kwargs)
self.attention = d2l.MultiHeadAttention(key_size, query_size,
value_size, num_hiddens,
num_heads, dropout, use_bias)
self.addnorm1 = AddNorm(norm_shape, dropout)
self.ffn = PositionWiseFFN(ffn_num_input, ffn_num_hiddens, num_hiddens)
self.addnorm2 = AddNorm(norm_shape, dropout)
def forward(self, X, valid_lens):
Y = self.addnorm1(X, self.attention(X, X, X, valid_lens))
return self.addnorm2(Y, self.ffn(Y))
class TransformerEncoder(Encoder):
"""Transformer encoder.
Defined in :numref:`sec_transformer`
"""
def __init__(self,
vocab_size,
key_size,
query_size,
value_size,
num_hiddens,
norm_shape,
ffn_num_input,
ffn_num_hiddens,
num_heads,
num_layers,
dropout,
use_bias=False,
**kwargs):
super(TransformerEncoder, self).__init__(**kwargs)
self.num_hiddens = num_hiddens
self.embedding = nn.Embedding(vocab_size, num_hiddens)
self.pos_encoding = d2l.PositionalEncoding(num_hiddens, dropout)
self.blks = nn.Sequential()
for i in range(num_layers):
self.blks.add_module(
'block' + str(i),
EncoderBlock(key_size, query_size, value_size, num_hiddens,
norm_shape, ffn_num_input, ffn_num_hiddens,
num_heads, dropout, use_bias))
def forward(self, X, valid_lens, *args):
# Since positional encoding values are between -1 and 1, the embedding
# values are multiplied by the square root of the embedding dimension
# to rescale before they are summed up
X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))
self.attention_weights = [None] * len(self.blks)
for i, blk in enumerate(self.blks):
X = blk(X, valid_lens)
self.attention_weights[
i] = blk.attention.attention.attention_weights
return X
class DecoderBlock(nn.Module):
"""解码器中第i个块."""
def __init__(self, key_size, query_size, value_size, num_hiddens,
norm_shape, ffn_num_input, ffn_num_hiddens, num_heads,
dropout, i, **kwargs):
super(DecoderBlock, self).__init__(**kwargs)
self.i = i
self.attention1 = d2l.MultiHeadAttention(key_size, query_size,
value_size, num_hiddens,
num_heads, dropout)
self.addnorm1 = AddNorm(norm_shape, dropout)
self.attention2 = d2l.MultiHeadAttention(key_size, query_size,
value_size, num_hiddens,
num_heads, dropout)
self.addnorm2 = AddNorm(norm_shape, dropout)
self.ffn = PositionWiseFFN(ffn_num_input, ffn_num_hiddens, num_hiddens)
self.addnorm3 = AddNorm(norm_shape, dropout)
def forward(self, X, state):
enc_outputs, enc_valid_lens = state[0], state[1]
# 训练阶段,输出序列的所有词元都在同一时间处理,
# 因此state[2][self.i]初始化为None。
# 预测阶段,输出序列是通过词元一个接着一个解码的,
# 因此state[2][self.i]包含着直到当前时间步第i个块解码的输出表示
if state[2][self.i] is None:
key_values = X
else:
key_values = torch.cat((state[2][self.i], X), axis=1)
state[2][self.i] = key_values
if self.training:
batch_size, num_steps, _ = X.shape
# dec_valid_lens的开头:(batch_size,num_steps),
# 其中每一行是[1,2,...,num_steps]
dec_valid_lens = torch.arange(1, num_steps + 1,
device=X.device).repeat(
batch_size, 1)
else:
dec_valid_lens = None
# 自注意力
X2 = self.attention1(X, key_values, key_values, dec_valid_lens)
Y = self.addnorm1(X, X2)
# 编码器-解码器注意力。
# enc_outputs的开头:(batch_size,num_steps,num_hiddens)
Y2 = self.attention2(Y, enc_outputs, enc_outputs, enc_valid_lens)
Z = self.addnorm2(Y, Y2)
return self.addnorm3(Z, self.ffn(Z)), state
class TransformerDecoder(d2l.AttentionDecoder):
def __init__(self, vocab_size, key_size, query_size, value_size,
num_hiddens, norm_shape, ffn_num_input, ffn_num_hiddens,
num_heads, num_layers, dropout, **kwargs):
super(TransformerDecoder, self).__init__(**kwargs)
self.num_hiddens = num_hiddens
self.num_layers = num_layers
self.embedding = nn.Embedding(vocab_size, num_hiddens)
self.pos_encoding = d2l.PositionalEncoding(num_hiddens, dropout)
self.blks = nn.Sequential()
for i in range(num_layers):
self.blks.add_module(
'block' + str(i),
DecoderBlock(key_size, query_size, value_size, num_hiddens,
norm_shape, ffn_num_input, ffn_num_hiddens,
num_heads, dropout, i))
self.dense = nn.Linear(num_hiddens, vocab_size)
def init_state(self, enc_outputs, enc_valid_lens, *args):
return [enc_outputs, enc_valid_lens, [None] * self.num_layers]
def forward(self, X, state):
X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))
self._attention_weights = [[None] * len(self.blks) for _ in range(2)]
for i, blk in enumerate(self.blks):
X, state = blk(X, state)
# 解码器自注意力权重
self._attention_weights[0][
i] = blk.attention1.attention.attention_weights
# “编码器-解码器”自注意力权重
self._attention_weights[1][
i] = blk.attention2.attention.attention_weights
return self.dense(X), state
@property
def attention_weights(self):
return self._attention_weights
# Seq2Seq Network
class Seq2SeqTransformer(nn.Module):
def __init__(self,
num_encoder_layers: int,
num_decoder_layers: int,
emb_size: int,
nhead: int,
src_vocab_size: int,
tgt_vocab_size: int,
dim_feedforward: int = 512,
dropout: float = 0.1):
super(Seq2SeqTransformer, self).__init__()
self.transformer = Transformer(d_model=emb_size,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
dim_feedforward=dim_feedforward,
dropout=dropout)
self.generator = nn.Linear(emb_size, tgt_vocab_size)
self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size)
self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size)
self.positional_encoding = PositionalEncoding(emb_size,
dropout=dropout)
def forward(self, src: Tensor, trg: Tensor, src_mask: Tensor,
tgt_mask: Tensor, src_padding_mask: Tensor,
tgt_padding_mask: Tensor, memory_key_padding_mask: Tensor):
src_emb = self.positional_encoding(self.src_tok_emb(src))
tgt_emb = self.positional_encoding(self.tgt_tok_emb(trg))
outs = self.transformer(src_emb, tgt_emb, src_mask, tgt_mask, None,
src_padding_mask, tgt_padding_mask,
memory_key_padding_mask)
return self.generator(outs)
def encode(self, src: Tensor, src_mask: Tensor):
return self.transformer.encoder(
self.positional_encoding(self.src_tok_emb(src)), src_mask)
def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor):
return self.transformer.decoder(
self.positional_encoding(self.tgt_tok_emb(tgt)), memory, tgt_mask)
if __name__ == '__main__':
X = torch.ones((2, 100, 24))
valid_lens = torch.tensor([3, 2])
encoder_blk = EncoderBlock(24, 24, 24, 24, [100, 24], 24, 48, 8, 0.5)
encoder_blk.eval()
res = encoder_blk(X, valid_lens).shape
print(res)
encoder = TransformerEncoder(200, 24, 24, 24, 24, [100, 24], 24, 48, 8, 2,
0.5)
encoder.eval()
res = encoder(torch.ones((2, 100), dtype=torch.long), valid_lens).shape
print(res)
``` |
{
"source": "jianzhnie/Happywhale",
"score": 3
} |
#### File: happywhale/data/happywhale.py
```python
from typing import Callable, Dict, Optional
import pandas as pd
import torch
from PIL import Image
from torch.utils.data import Dataset
class HappyWhaleDataset(Dataset):
def __init__(self, df: pd.DataFrame, transform: Optional[Callable] = None):
self.df = df
self.transform = transform
self.image_names = self.df['image'].values
self.image_paths = self.df['image_path'].values
self.targets = self.df['individual_id'].values
def __getitem__(self, index: int) -> Dict[str, torch.Tensor]:
# 图片名字
image_name = self.image_names[index]
# 图片路径
image_path = self.image_paths[index]
image = Image.open(image_path)
if self.transform:
image = self.transform(image)
target = self.targets[index]
target = torch.tensor(target, dtype=torch.long)
return {'image_name': image_name, 'image': image, 'target': target}
def __len__(self) -> int:
return len(self.df)
```
#### File: happywhale/models/lighting_model.py
```python
import pytorch_lightning as pl
import timm
import torch
import torch.nn as nn
from timm.optim import create_optimizer_v2
from happywhale.losses.focalloss import FocalLoss
from .layers.arcmargin import ArcMarginProduct
class LitModule(pl.LightningModule):
def __init__(self, model_name, pretrained, drop_rate, embedding_size,
num_classes, arc_s, arc_m, arc_easy_margin, arc_ls_eps,
optimizer, learning_rate, weight_decay, len_train_dl, epochs):
super().__init__()
self.save_hyperparameters()
self.model = timm.create_model(model_name,
pretrained=pretrained,
drop_rate=drop_rate)
# self.model = models.__dict__[model_name](pretrained=True)
self.embedding = nn.Linear(self.model.get_classifier().in_features,
embedding_size)
self.model.reset_classifier(num_classes=0, global_pool='avg')
self.arc = ArcMarginProduct(
in_features=embedding_size,
out_features=num_classes,
s=arc_s,
m=arc_m,
easy_margin=arc_easy_margin,
ls_eps=arc_ls_eps,
)
# self.loss_fn = F.cross_entropy
self.loss_fn = FocalLoss()
def forward(self, images):
features = self.model(images)
embeddings = self.embedding(features)
return embeddings
def configure_optimizers(self):
# 优化器
optimizer = create_optimizer_v2(
self.parameters(),
opt=self.hparams.optimizer,
lr=self.hparams.learning_rate,
weight_decay=self.hparams.weight_decay,
)
# optimizer = torch.optim.SGD()
# 学习率调整
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer,
self.hparams.learning_rate,
steps_per_epoch=self.hparams.len_train_dl,
epochs=self.hparams.epochs,
)
scheduler = {'scheduler': scheduler, 'interval': 'step'}
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
return self._step(batch, 'train')
def validation_step(self, batch, batch_idx):
return self._step(batch, 'val')
def _step(self, batch, step):
images, targets = batch['image'], batch['target']
embeddings = self(images)
outputs = self.arc(embeddings, targets, self.device)
loss = self.loss_fn(outputs, targets)
# 标记该loss,用于保存模型时监控该量
self.log(f'{step}_loss', loss)
return loss
```
#### File: jianzhnie/Happywhale/train.py
```python
import argparse
import pytorch_lightning as pl
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from happywhale.data.lightingdata import LitDataModule
from happywhale.models.lighting_model import LitModule
def parse_args():
parser = argparse.ArgumentParser(
description='Model-based Asynchronous HPO')
parser.add_argument('--data_path',
default='',
type=str,
help='path to dataset')
parser.add_argument('--train_csv_file',
default='',
type=str,
help='path to train dataset')
parser.add_argument('--test_csv_file',
default='',
type=str,
help='path to test dataset')
parser.add_argument('--model_name',
metavar='MODEL',
default='resnet18',
help='model architecture: (default: resnet18)')
parser.add_argument('--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
parser.add_argument('-j',
'--workers',
type=int,
default=4,
metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--epochs',
default=90,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--image-size',
default=256,
type=int,
help='resolution of image')
parser.add_argument('-b',
'--batch-size',
default=256,
type=int,
metavar='N',
help='mini-batch size (default: 256) per gpu')
parser.add_argument('--lr',
'--learning-rate',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate',
dest='lr')
parser.add_argument('--end-lr',
'--minimum learning-rate',
default=1e-8,
type=float,
metavar='END-LR',
help='initial learning rate')
parser.add_argument(
'--lr-schedule',
default='step',
type=str,
metavar='SCHEDULE',
choices=['step', 'linear', 'cosine', 'exponential'],
help='Type of LR schedule: {}, {}, {} , {}'.format(
'step', 'linear', 'cosine', 'exponential'),
)
parser.add_argument('--warmup',
default=0,
type=int,
metavar='E',
help='number of warmup epochs')
parser.add_argument('--optimizer',
default='sgd',
type=str,
choices=('sgd', 'rmsprop', 'adamw'))
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--wd',
'--weight-decay',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument(
'--augmentation',
type=str,
default=None,
choices=[None, 'autoaugment'],
help='augmentation method',
)
parser.add_argument('--log_interval',
default=10,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument('--resume',
default=None,
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--checkpoint-filename',
default='checkpoint.pth',
type=str)
parser.add_argument('--seed',
type=int,
default=42,
metavar='S',
help='random seed (default: 42)')
parser.add_argument('--checkpoint_dir',
default='work_dirs',
type=str,
help='output directory for model and log')
args = parser.parse_args()
return args
def train(train_csv_file,
test_csv_file,
val_fold=0.0,
image_size=256,
batch_size=64,
num_workers=4,
model_name='resnet50',
pretrained=True,
drop_rate=0.0,
embedding_size=512,
num_classes=15587,
arc_s=30.0,
arc_m=0.5,
arc_easy_margin=False,
arc_ls_eps=0.0,
optimizer='adam',
learning_rate=3e-4,
weight_decay=1e-6,
checkpoint_dir=None,
accumulate_grad_batches=1,
auto_lr_find=False,
auto_scale_batch_size=False,
fast_dev_run=False,
gpus=1,
max_epochs=100,
precision=16,
stochastic_weight_avg=True,
debug=True):
pl.seed_everything(42)
# 定义数据集
datamodule = LitDataModule(
train_csv_file=train_csv_file,
test_csv_file=test_csv_file,
val_fold=val_fold,
image_size=image_size,
batch_size=batch_size,
num_workers=num_workers,
)
datamodule.setup()
len_train_dl = len(datamodule.train_dataloader())
# 定义模型
module = LitModule(model_name=model_name,
pretrained=pretrained,
drop_rate=drop_rate,
embedding_size=embedding_size,
num_classes=num_classes,
arc_s=arc_s,
arc_m=arc_m,
arc_easy_margin=arc_easy_margin,
arc_ls_eps=arc_ls_eps,
optimizer=optimizer,
learning_rate=learning_rate,
weight_decay=weight_decay,
len_train_dl=len_train_dl,
epochs=max_epochs)
# 初始化ModelCheckpoint回调,并设置要监控的量。
# monitor:需要监控的量,string类型。
# 例如'val_loss'(在training_step() or validation_step()函数中通过self.log('val_loss', loss)进行标记);
# 默认为None,只保存最后一个epoch的模型参数,
model_checkpoint = ModelCheckpoint(checkpoint_dir,
filename=f'{model_name}_{image_size}',
monitor='val_loss',
save_top_k=5)
# 定义trainer
trainer = pl.Trainer(
accumulate_grad_batches=accumulate_grad_batches, # 每k次batches累计一次梯度
auto_lr_find=auto_lr_find,
auto_scale_batch_size=auto_scale_batch_size,
benchmark=True,
callbacks=[model_checkpoint], # 添加回调函数或回调函数列表
deterministic=True,
fast_dev_run=fast_dev_run,
gpus=gpus, # 使用的gpu数量(int)或gpu节点列表(list或str)
max_epochs=2 if debug else max_epochs, # 最多训练轮数
precision=precision,
stochastic_weight_avg=stochastic_weight_avg,
limit_train_batches=0.1
if debug else 1.0, # 使用训练/测试/验证/预测数据的百分比。如果数据过多,或正在调试可以使用。
limit_val_batches=0.1 if debug else 1.0,
)
# Trainer.tune()对模型超参数进行调整
trainer.tune(module, datamodule=datamodule)
# 开始训练
# Trainer.fit() 参数详解
# model->LightningModule实例;
# train_dataloaders->训练数据加载器
# val_dataloaders->验证数据加载器
# ckpt_path->ckpt文件路径(从这里文件恢复训练)
# datamodule->LightningDataModule实例
trainer.fit(module, datamodule=datamodule)
if __name__ == '__main__':
args = parse_args()
train(train_csv_file=args.train_csv_file,
test_csv_file=args.test_csv_file,
model_name=args.model_name,
image_size=args.image_size,
batch_size=args.batch_size,
pretrained=args.pretrained,
checkpoint_dir=args.checkpoint_dir)
``` |
{
"source": "jianzhnie/models",
"score": 2
} |
#### File: slim/serving/exporter.py
```python
import os
import tensorflow.compat.v1 as tf
from tensorflow.python.tools import freeze_graph
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.python.platform import gfile
from datasets import dataset_factory
from nets import nets_factory
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to save.')
tf.app.flags.DEFINE_boolean(
'is_training', False,
'Whether to save out a training-focused version of the model.')
tf.app.flags.DEFINE_integer(
'image_size', None,
'The image size to use, otherwise use the model default_image_size.')
tf.app.flags.DEFINE_integer(
'batch_size', None,
'Batch size for the exported model. Defaulted to "None" so batch size can '
'be specified at model runtime.')
tf.app.flags.DEFINE_string('dataset_name', 'imagenet',
'The name of the dataset to use with the model.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'dataset_dir', '', 'Directory to save intermediate dataset files to')
tf.app.flags.DEFINE_bool(
'quantize', False, 'whether to use quantized graph or not.')
tf.app.flags.DEFINE_bool('write_text_graphdef', True,
'Whether to write a text version of graphdef.')
tf.app.flags.DEFINE_bool('use_grayscale', False,
'Whether to convert input images to grayscale.')
tf.app.flags.DEFINE_string(
'output_dir', '', 'Where to save the resulting file to.')
tf.app.flags.DEFINE_string(
'ckpt_file', '', 'The checkpoint file uses for convert.')
tf.app.flags.DEFINE_string(
'output_prototxt_file', 'export_model.prototxt', 'Where to save the resulting file to.')
tf.app.flags.DEFINE_string(
'output_pb_file', 'export_model.pb', 'Where to save the resulting file to.')
FLAGS = tf.app.flags.FLAGS
FLAGS.output_prototxt_file = os.path.join(FLAGS.output_dir, FLAGS.output_prototxt_file)
FLAGS.output_pb_file = os.path.join(FLAGS.output_dir, FLAGS.output_pb_file)
output_node_names = dict(resnet_v1_50='resnet_v1_50/predictions/Reshape_1',
inception_v3='InceptionV3/Predictions/Reshape_1',
vgg_16='vgg_16/Predictions/Reshape_1')
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default() as graph:
dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train',
FLAGS.dataset_dir)
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
is_training=FLAGS.is_training)
image_size = FLAGS.image_size or network_fn.default_image_size
num_channels = 1 if FLAGS.use_grayscale else 3
input_shape = [FLAGS.batch_size, image_size, image_size, num_channels]
placeholder = tf.placeholder(name='input', dtype=tf.float32,
shape=input_shape)
network_fn(placeholder)
with tf.Session(graph=tf.Graph()) as sess:
input_tensor = tf.placeholder(name='input', dtype=tf.float32,
shape=input_shape)
# perform inference on the input image
logits_tf = network_fn(input_tensor)
# extract the segmentation mask
predictions_tf = tf.argmax(logits_tf, axis=3)
# specify the directory where the pre-trained model weights are stored
pre_trained_model_dir = os.path.join(, model_name, "train")
saver = tf.train.Saver()
# Restore variables from disk.
saver.restore(sess, os.path.join(pre_trained_model_dir, "model.ckpt"))
print("Model", model_name, "restored.")
# Create SavedModelBuilder class
# defines where the model will be exported
export_path_base = FLAGS.export_model_dir
export_path = os.path.join(
tf.compat.as_bytes(export_path_base),
tf.compat.as_bytes(str(FLAGS.model_version)))
print('Exporting trained model to', export_path)
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
def export_model(session, m):
#只需要修改这一段,定义输入输出,其他保持默认即可
model_signature = signature_def_utils.build_signature_def(
inputs={"input": utils.build_tensor_info(m.a)},
outputs={
"output": utils.build_tensor_info(m.y)},
method_name=signature_constants.PREDICT_METHOD_NAME)
export_path = "pb_model/1"
if os.path.exists(export_path):
os.system("rm -rf "+ export_path)
print("Export the model to {}".format(export_path))
try:
legacy_init_op = tf.group(
tf.tables_initializer(), name='legacy_init_op')
builder = saved_model_builder.SavedModelBuilder(export_path)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
clear_devices=True,
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
model_signature,
},
legacy_init_op=legacy_init_op)
builder.save()
except Exception as e:
print("Fail to export saved model, exception: {}".format(e))
def save_model():
input_shape = [FLAGS.batch_size, image_size, image_size, num_channels]
placeholder = tf.placeholder(name='input', dtype=tf.float32,
shape=input_shape)
output = tf.placeholder(name='output', dtype=tf.float32)
with tf.Session() as sess:
model_path = './model/saved_model_builder/1/'
builder = tf.saved_model.builder.SavedModelBuilder(model_path)
inputs = {'input': tf.saved_model.utils.build_tensor_info(placeholder)}
outputs = {'output': tf.saved_model.utils.build_tensor_info(output)}
method_name = tf.saved_model.signature_constants.PREDICT_METHOD_NAME
prediction_signature = tf.saved_model.signature_def_utils.build_signature_def(inputs, outputs, method_name)
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={'predict_signature': prediction_signature})
builder.save()
def main():
save_model()
if __name__ == '__main__':
main()
``` |
{
"source": "jianzhnie/MultimodalTransformer",
"score": 2
} |
#### File: jianzhnie/MultimodalTransformer/evaluation.py
```python
import math
from typing import Callable, Dict
import numpy as np
from scipy.special import softmax
from sklearn.metrics import auc, confusion_matrix, f1_score, matthews_corrcoef, mean_absolute_error, mean_squared_error, precision_recall_curve, roc_auc_score
from transformers import EvalPrediction
def build_compute_metrics_fn(
task_name: str) -> Callable[[EvalPrediction], Dict]:
def compute_metrics_fn(p: EvalPrediction):
if task_name == 'classification':
preds_labels = np.argmax(p.predictions, axis=1)
if p.predictions.shape[-1] == 2:
pred_scores = softmax(p.predictions, axis=1)[:, 1]
else:
pred_scores = softmax(p.predictions, axis=1)
return calc_classification_metrics(pred_scores, preds_labels,
p.label_ids)
elif task_name == 'regression':
preds = np.squeeze(p.predictions)
return calc_regression_metrics(preds, p.label_ids)
else:
return {}
return compute_metrics_fn
def calc_classification_metrics(pred_scores, pred_labels, labels):
if len(np.unique(labels)) == 2: # binary classification
roc_auc_pred_score = roc_auc_score(labels, pred_scores)
precisions, recalls, thresholds = precision_recall_curve(
labels, pred_scores)
fscore = (2 * precisions * recalls) / (precisions + recalls)
fscore[np.isnan(fscore)] = 0
ix = np.argmax(fscore)
threshold = thresholds[ix].item()
pr_auc = auc(recalls, precisions)
tn, fp, fn, tp = confusion_matrix(
labels, pred_labels, labels=[0, 1]).ravel()
result = {
'roc_auc': roc_auc_pred_score,
'threshold': threshold,
'pr_auc': pr_auc,
'recall': recalls[ix].item(),
'precision': precisions[ix].item(),
'f1': fscore[ix].item(),
'tn': tn.item(),
'fp': fp.item(),
'fn': fn.item(),
'tp': tp.item()
}
else:
acc = (pred_labels == labels).mean()
f1_micro = f1_score(y_true=labels, y_pred=pred_labels, average='micro')
f1_macro = f1_score(y_true=labels, y_pred=pred_labels, average='macro')
f1_weighted = f1_score(
y_true=labels, y_pred=pred_labels, average='weighted')
result = {
'acc': acc,
'f1_micro': f1_micro,
'f1_macro': f1_macro,
'f1_weighted': f1_weighted,
'mcc': matthews_corrcoef(labels, pred_labels),
}
return result
def calc_regression_metrics(preds, labels):
mse = mean_squared_error(labels, preds)
rmse = math.sqrt(mse)
mae = mean_absolute_error(labels, preds)
return {
'mse': mse,
'rmse': rmse,
'mae': mae,
}
```
#### File: examples/clip/inference.py
```python
import sys
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from multimodal_transformers.models.clip.config_clip import ClipConfig as cfg
from multimodal_transformers.models.clip.datasets_clip import build_loaders, make_train_valid_dfs
from multimodal_transformers.models.clip.modeling_clip import CLIPModel
from tqdm import tqdm
from transformers import DistilBertTokenizer
sys.path.append('../../')
def get_image_embeddings(valid_df, model_path):
tokenizer = DistilBertTokenizer.from_pretrained(cfg.text_tokenizer)
valid_loader = build_loaders(valid_df, tokenizer, mode='valid')
model = CLIPModel().to(cfg.device)
model.load_state_dict(torch.load(model_path, map_location=cfg.device))
model.eval()
valid_image_embeddings = []
with torch.no_grad():
for batch in tqdm(valid_loader):
image_features = model.image_encoder(batch['image'].to(cfg.device))
image_embeddings = model.image_projection(image_features)
valid_image_embeddings.append(image_embeddings)
return model, torch.cat(valid_image_embeddings)
def find_matches(model, image_embeddings, query, image_filenames, n=9):
tokenizer = DistilBertTokenizer.from_pretrained(cfg.text_tokenizer)
encoded_query = tokenizer([query])
batch = {
key: torch.tensor(values).to(cfg.device)
for key, values in encoded_query.items()
}
with torch.no_grad():
text_features = model.text_encoder(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'])
text_embeddings = model.text_projection(text_features)
image_embeddings_n = F.normalize(image_embeddings, p=2, dim=-1)
text_embeddings_n = F.normalize(text_embeddings, p=2, dim=-1)
dot_similarity = text_embeddings_n @ image_embeddings_n.T
_, indices = torch.topk(dot_similarity.squeeze(0), n * 5)
matches = [image_filenames[idx] for idx in indices[::5]]
_, axes = plt.subplots(3, 3, figsize=(10, 10))
for match, ax in zip(matches, axes.flatten()):
image = cv2.imread(f'{cfg.image_path}/{match}')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
ax.imshow(image)
ax.axis('off')
plt.savefig('demo.png')
plt.show()
if __name__ == '__main__':
model_file = 'best.pt'
train_df, valid_df = make_train_valid_dfs()
model, image_embeddings = get_image_embeddings(
valid_df, model_path=model_file)
find_matches(
model,
image_embeddings,
query='one dog setting on the grass',
image_filenames=valid_df['image'].values,
n=9)
```
#### File: data/preprocessor/text_preprocessor.py
```python
from typing import Optional
import numpy as np
import pandas as pd
from pytorch_widedeep.utils.fastai_transforms import Vocab
from pytorch_widedeep.utils.text_utils import build_embeddings_matrix, get_texts, pad_sequences
from .base_preprocessor import BasePreprocessor, check_is_fitted
class TextPreprocessor(BasePreprocessor):
r"""Preprocessor to prepare the ``deeptext`` input dataset
Parameters
----------
text_col: str
column in the input dataframe containing the texts
max_vocab: int, default=30000
Maximum number of tokens in the vocabulary
min_freq: int, default=5
Minimum frequency for a token to be part of the vocabulary
maxlen: int, default=80
Maximum length of the tokenized sequences
pad_first: bool, default = True
Indicates whether the padding index will be added at the beginning or the
end of the sequences
pad_idx: int, default = 1
padding index. Fastai's Tokenizer leaves 0 for the 'unknown' token.
word_vectors_path: str, Optional
Path to the pretrained word vectors
verbose: int, default 1
Enable verbose output.
Attributes
----------
vocab: Vocab
an instance of :class:`pytorch_widedeep.utils.fastai_transforms.Vocab`
embedding_matrix: np.ndarray
Array with the pretrained embeddings
tokens: List
List with Lists of str containing the tokenized texts
Examples
---------
>>> import pandas as pd
>>> from pytorch_widedeep.preprocessing import TextPreprocessor
>>> df_train = pd.DataFrame({'text_column': ["life is like a box of chocolates",
... "You never know what you're gonna get"]})
>>> text_preprocessor = TextPreprocessor(text_col='text_column', max_vocab=25, min_freq=1, maxlen=10)
>>> text_preprocessor.fit_transform(df_train)
The vocabulary contains 24 tokens
array([[ 1, 1, 1, 1, 10, 11, 12, 13, 14, 15],
[ 5, 9, 16, 17, 18, 9, 19, 20, 21, 22]], dtype=int32)
>>> df_te = pd.DataFrame({'text_column': ['you never know what is in the box']})
>>> text_preprocessor.transform(df_te)
array([[ 1, 1, 9, 16, 17, 18, 11, 0, 0, 13]], dtype=int32)
"""
def __init__(
self,
text_col: str,
max_vocab: int = 30000,
min_freq: int = 5,
maxlen: int = 80,
pad_first: bool = True,
pad_idx: int = 1,
word_vectors_path: Optional[str] = None,
verbose: int = 1,
):
super(TextPreprocessor, self).__init__()
self.text_col = text_col
self.max_vocab = max_vocab
self.min_freq = min_freq
self.maxlen = maxlen
self.pad_first = pad_first
self.pad_idx = pad_idx
self.word_vectors_path = word_vectors_path
self.verbose = verbose
def fit(self, df: pd.DataFrame) -> BasePreprocessor:
"""Builds the vocabulary."""
texts = df[self.text_col].tolist()
tokens = get_texts(texts)
self.vocab = Vocab.create(
tokens, max_vocab=self.max_vocab, min_freq=self.min_freq)
if self.verbose:
print('The vocabulary contains {} tokens'.format(
len(self.vocab.stoi)))
if self.word_vectors_path is not None:
self.embedding_matrix = build_embeddings_matrix(
self.vocab, self.word_vectors_path, self.min_freq)
return self
def transform(self, df: pd.DataFrame) -> np.ndarray:
"""Returns the padded, `numericalised` sequences."""
check_is_fitted(self, attributes=['vocab'])
texts = df[self.text_col].tolist()
self.tokens = get_texts(texts)
sequences = [self.vocab.numericalize(t) for t in self.tokens]
padded_seq = np.array([
pad_sequences(
s,
maxlen=self.maxlen,
pad_first=self.pad_first,
pad_idx=self.pad_idx,
) for s in sequences
])
return padded_seq
def fit_transform(self, df: pd.DataFrame) -> np.ndarray:
"""Combines ``fit`` and ``transform``"""
return self.fit(df).transform(df)
def inverse_transform(self, padded_seq: np.ndarray) -> pd.DataFrame:
"""Returns the original text plus the added 'special' tokens."""
texts = [self.vocab.textify(num) for num in padded_seq]
return pd.DataFrame({self.text_col: texts})
```
#### File: data/utils/text_token.py
```python
from functools import partial
import pandas as pd
from .utils import agg_text_columns_func, convert_to_func, get_matching_cols
def get_text_token(data_df,
text_cols,
tokenizer,
sep_text_token_str=' ',
empty_text_values=None,
replace_empty_text=None,
max_token_length=None):
if empty_text_values is None:
empty_text_values = ['nan', 'None']
text_cols_func = convert_to_func(text_cols)
agg_func = partial(agg_text_columns_func, empty_text_values,
replace_empty_text)
texts_cols = get_matching_cols(data_df, text_cols_func)
texts_list = data_df[texts_cols].agg(agg_func, axis=1).tolist()
for i, text in enumerate(texts_list):
texts_list[i] = f' {sep_text_token_str} '.join(text)
hf_model_text_input = tokenizer(
texts_list, padding=True, truncation=True, max_length=max_token_length)
return hf_model_text_input
if __name__ == '__main__':
import torch
from torch import tensor
from transformers import AutoConfig, AutoTokenizer
config = AutoConfig.from_pretrained('bert-base-uncased')
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
df = pd.read_csv(
'/home/robin/jianzh/multimodal/Multimodal-Toolkit/datasets/Womens_Clothing_E-Commerce_Reviews/test.csv'
)
text_cols = ['Title', 'Review Text']
text_cols = ['Division Name', 'Department Name', 'Class Name']
print(df[text_cols])
text_cols_func = convert_to_func(text_cols)
empty_text_values = ['nan', 'None']
replace_empty_text = None
agg_func = partial(agg_text_columns_func, empty_text_values,
replace_empty_text)
print(agg_func)
texts_cols = get_matching_cols(df, text_cols_func)
print(text_cols)
texts_list = df[texts_cols].agg(agg_func, axis=1).tolist()
text_encoder = get_text_token(
df,
text_cols=['Title', 'Review Text'],
tokenizer=tokenizer,
sep_text_token_str=tokenizer.sep_token,
max_token_length=16,
)
item = {key: torch.tensor(val[0]) for key, val in text_encoder.items()}
print(item)
item = {
'input_ids':
tensor([
101, 2307, 2801, 1010, 3532, 7781, 102, 1045, 7078, 3866, 1996,
2801, 1997, 2019, 17876, 102
]),
'token_type_ids':
tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
'attention_mask':
tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
}
``` |
{
"source": "jianzhnie/self_supervised",
"score": 2
} |
#### File: self_supervised/data/data_simmim.py
```python
import numpy as np
import torch
import torch.distributed as dist
import torchvision.transforms as T
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from torch.utils.data import DataLoader, DistributedSampler
from torch.utils.data._utils.collate import default_collate
from torchvision.datasets import ImageFolder
class MaskGenerator:
def __init__(self,
input_size=192,
mask_patch_size=32,
model_patch_size=4,
mask_ratio=0.6):
self.input_size = input_size
self.mask_patch_size = mask_patch_size
self.model_patch_size = model_patch_size
self.mask_ratio = mask_ratio
assert self.input_size % self.mask_patch_size == 0
assert self.mask_patch_size % self.model_patch_size == 0
self.rand_size = self.input_size // self.mask_patch_size
self.scale = self.mask_patch_size // self.model_patch_size
self.token_count = self.rand_size**2
self.mask_count = int(np.ceil(self.token_count * self.mask_ratio))
def __call__(self):
mask_idx = np.random.permutation(self.token_count)[:self.mask_count]
mask = np.zeros(self.token_count, dtype=int)
mask[mask_idx] = 1
# mask shape: 48 * 48
# mask shape 和 model_patch_size 一致
mask = mask.reshape((self.rand_size, self.rand_size))
mask = mask.repeat(self.scale, axis=0).repeat(self.scale, axis=1)
return mask
class SimMIMTransform:
def __init__(self, config):
self.transform_img = T.Compose([
T.Lambda(lambda img: img.convert('RGB')
if img.mode != 'RGB' else img),
T.RandomResizedCrop(config.DATA.IMG_SIZE,
scale=(0.67, 1.),
ratio=(3. / 4., 4. / 3.)),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize(mean=torch.tensor(IMAGENET_DEFAULT_MEAN),
std=torch.tensor(IMAGENET_DEFAULT_STD)),
])
if config.MODEL.TYPE == 'swin':
model_patch_size = config.MODEL.SWIN.PATCH_SIZE
elif config.MODEL.TYPE == 'vit':
model_patch_size = config.MODEL.VIT.PATCH_SIZE
else:
raise NotImplementedError
self.mask_generator = MaskGenerator(
input_size=config.DATA.IMG_SIZE,
mask_patch_size=config.DATA.MASK_PATCH_SIZE,
model_patch_size=model_patch_size,
mask_ratio=config.DATA.MASK_RATIO,
)
def __call__(self, img):
img = self.transform_img(img)
mask = self.mask_generator()
return img, mask
def collate_fn(batch):
if not isinstance(batch[0][0], tuple):
return default_collate(batch)
else:
batch_num = len(batch)
ret = []
for item_idx in range(len(batch[0][0])):
if batch[0][0][item_idx] is None:
ret.append(None)
else:
ret.append(
default_collate(
[batch[i][0][item_idx] for i in range(batch_num)]))
ret.append(default_collate([batch[i][1] for i in range(batch_num)]))
return ret
def build_loader_simmim(config, logger):
transform = SimMIMTransform(config)
logger.info(f'Pre-train data transform:\n{transform}')
dataset = ImageFolder(config.DATA.DATA_PATH, transform)
logger.info(f'Build dataset: train images = {len(dataset)}')
sampler = DistributedSampler(dataset,
num_replicas=dist.get_world_size(),
rank=dist.get_rank(),
shuffle=True)
dataloader = DataLoader(dataset,
config.DATA.BATCH_SIZE,
sampler=sampler,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=True,
drop_last=True,
collate_fn=collate_fn)
return dataloader
if __name__ == '__main__':
mask_generator = MaskGenerator()
mask = mask_generator()
print(mask.shape)
```
#### File: data/transforms/gaussian_blur.py
```python
import numpy as np
from PIL import ImageFilter
class GaussianBlur(object):
"""Implementation of random Gaussian blur.
Utilizes the built-in ImageFilter method from PIL to apply a Gaussian
blur to the input image with a certain probability. The blur is further
randomized as the kernel size is chosen randomly around a mean specified
by the user.
Attributes:
kernel_size:
Mean kernel size for the Gaussian blur.
prob:
Probability with which the blur is applied.
scale:
Fraction of the kernel size which is used for upper and lower
limits of the randomized kernel size.
"""
def __init__(self,
kernel_size: float,
prob: float = 0.5,
scale: float = 0.2):
self.prob = prob
self.scale = scale
# limits for random kernel sizes
self.min_size = (1 - scale) * kernel_size
self.max_size = (1 + scale) * kernel_size
self.kernel_size = kernel_size
def __call__(self, sample):
"""Blurs the image with a given probability.
Args:
sample:
PIL image to which blur will be applied.
Returns:
Blurred image or original image.
"""
prob = np.random.random_sample()
if prob < self.prob:
# choose randomized kernel size
kernel_size = np.random.normal(self.kernel_size,
self.scale * self.kernel_size)
kernel_size = max(self.min_size, kernel_size)
kernel_size = min(self.max_size, kernel_size)
radius = int(kernel_size / 2)
return sample.filter(ImageFilter.GaussianBlur(radius=radius))
# return original image
return sample
```
#### File: data/transforms/jigsaw.py
```python
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
class Jigsaw(object):
"""Implementation of Jigsaw image augmentation, inspired from PyContrast
library.
Generates n_grid**2 random crops and returns a list.
This augmentation is instrumental to PIRL.
Attributes:
n_grid:
Side length of the meshgrid, sqrt of the number of crops.
img_size:
Size of image.
crop_size:
Size of crops.
transform:
Transformation to apply on each crop.
Examples:
>>> from lightly.transforms import Jigsaw
>>>
>>> jigsaw_crop = Jigsaw(n_grid=3, img_size=255, crop_size=64, transform=transforms.ToTensor())
>>>
>>> # img is a PIL image
>>> crops = jigsaw_crops(img)
"""
def __init__(self,
n_grid=3,
img_size=255,
crop_size=64,
transform=transforms.ToTensor()):
self.n_grid = n_grid
self.img_size = img_size
self.crop_size = crop_size
self.grid_size = int(img_size / self.n_grid)
self.side = self.grid_size - self.crop_size
self.transform = transform
yy, xx = np.meshgrid(np.arange(n_grid), np.arange(n_grid))
self.yy = np.reshape(yy * self.grid_size, (n_grid * n_grid, ))
self.xx = np.reshape(xx * self.grid_size, (n_grid * n_grid, ))
def __call__(self, img):
"""Performs the Jigsaw augmentation
Args:
img:
PIL image to perform Jigsaw augmentation on.
Returns:
Torch tensor with stacked crops.
"""
r_x = np.random.randint(0, self.side + 1, self.n_grid * self.n_grid)
r_y = np.random.randint(0, self.side + 1, self.n_grid * self.n_grid)
img = np.asarray(img, np.uint8)
crops = []
for i in range(self.n_grid * self.n_grid):
crops.append(img[self.xx[i] + r_x[i]:self.xx[i] + r_x[i] +
self.crop_size, self.yy[i] + r_y[i]:self.yy[i] +
r_y[i] + self.crop_size, :])
crops = [Image.fromarray(crop) for crop in crops]
crops = torch.stack([self.transform(crop) for crop in crops])
return crops
```
#### File: self_supervised/models/byol.py
```python
from copy import deepcopy
from typing import Any, Union
import torch
from pl_bolts.callbacks.byol_updates import BYOLMAWeightUpdate
from pl_bolts.models.self_supervised.byol.models import SiameseArm
from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
from pytorch_lightning import LightningModule
from torch.nn import functional as F
from torch.optim import Adam
class BYOL(LightningModule):
"""PyTorch Lightning implementation of Bootstrap Your Own Latent (BYOL_)_
Paper authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, \
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, \
<NAME>, <NAME>, <NAME>, <NAME>.
Model implemented by:
- `<NAME> <https://github.com/annikabrundyn>`_
.. warning:: Work in progress. This implementation is still being verified.
TODOs:
- verify on CIFAR-10
- verify on STL-10
- pre-train on imagenet
Example::
model = BYOL(num_classes=10)
dm = CIFAR10DataModule(num_workers=0)
dm.train_transforms = SimCLRTrainDataTransform(32)
dm.val_transforms = SimCLREvalDataTransform(32)
trainer = pl.Trainer()
trainer.fit(model, datamodule=dm)
Train::
trainer = Trainer()
trainer.fit(model)
CLI command::
# cifar10
python byol_module.py --gpus 1
# imagenet
python byol_module.py
--gpus 8
--dataset imagenet2012
--data_dir /path/to/imagenet/
--meta_dir /path/to/folder/with/meta.bin/
--batch_size 32
.. _BYOL: https://arxiv.org/pdf/2006.07733.pdf
"""
def __init__(self,
num_classes,
learning_rate: float = 0.2,
weight_decay: float = 1.5e-6,
input_height: int = 32,
batch_size: int = 32,
num_workers: int = 0,
warmup_epochs: int = 10,
max_epochs: int = 1000,
base_encoder: Union[str, torch.nn.Module] = 'resnet50',
encoder_out_dim: int = 2048,
projector_hidden_size: int = 4096,
projector_out_dim: int = 256,
**kwargs):
"""
Args:
datamodule: The datamodule
learning_rate: the learning rate
weight_decay: optimizer weight decay
input_height: image input height
batch_size: the batch size
num_workers: number of workers
warmup_epochs: num of epochs for scheduler warm up
max_epochs: max epochs for scheduler
base_encoder: the base encoder module or resnet name
encoder_out_dim: output dimension of base_encoder
projector_hidden_size: hidden layer size of projector MLP
projector_out_dim: output size of projector MLP
"""
super().__init__()
self.save_hyperparameters(ignore='base_encoder')
self.online_network = SiameseArm(base_encoder, encoder_out_dim,
projector_hidden_size,
projector_out_dim)
self.target_network = deepcopy(self.online_network)
self.weight_callback = BYOLMAWeightUpdate()
def on_train_batch_end(self, outputs, batch: Any, batch_idx: int,
dataloader_idx: int) -> None:
# Add callback for user automatically since it's key to BYOL weight update
self.weight_callback.on_train_batch_end(self.trainer, self, outputs,
batch, batch_idx,
dataloader_idx)
def forward(self, x):
y, _, _ = self.online_network(x)
return y
def shared_step(self, batch, batch_idx):
imgs, y = batch
img_1, img_2 = imgs[:2]
# Image 1 to image 2 loss
y1, z1, h1 = self.online_network(img_1)
with torch.no_grad():
y2, z2, h2 = self.target_network(img_2)
loss_a = -2 * F.cosine_similarity(h1, z2).mean()
# Image 2 to image 1 loss
y1, z1, h1 = self.online_network(img_2)
with torch.no_grad():
y2, z2, h2 = self.target_network(img_1)
# L2 normalize
loss_b = -2 * F.cosine_similarity(h1, z2).mean()
# Final loss
total_loss = loss_a + loss_b
return loss_a, loss_b, total_loss
def training_step(self, batch, batch_idx):
loss_a, loss_b, total_loss = self.shared_step(batch, batch_idx)
# log results
self.log_dict({
'1_2_loss': loss_a,
'2_1_loss': loss_b,
'train_loss': total_loss
})
return total_loss
def validation_step(self, batch, batch_idx):
loss_a, loss_b, total_loss = self.shared_step(batch, batch_idx)
# log results
self.log_dict({
'1_2_loss': loss_a,
'2_1_loss': loss_b,
'val_loss': total_loss
})
return total_loss
def configure_optimizers(self):
optimizer = Adam(self.parameters(),
lr=self.hparams.learning_rate,
weight_decay=self.hparams.weight_decay)
scheduler = LinearWarmupCosineAnnealingLR(
optimizer,
warmup_epochs=self.hparams.warmup_epochs,
max_epochs=self.hparams.max_epochs)
return [optimizer], [scheduler]
```
#### File: models/layers/layers.py
```python
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from pl_bolts.utils.self_supervised import torchvision_ssl_encoder
class MLP(nn.Module):
def __init__(self, input_dim=2048, hidden_size=4096, output_dim=256):
super().__init__()
self.output_dim = output_dim
self.input_dim = input_dim
self.model = nn.Sequential(
nn.Linear(input_dim, hidden_size, bias=False),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, output_dim, bias=True),
)
def forward(self, x):
x = self.model(x)
return x
class SiameseArm(nn.Module):
def __init__(self,
encoder='resnet50',
encoder_out_dim=2048,
projector_hidden_size=4096,
projector_out_dim=256):
super().__init__()
if isinstance(encoder, str):
encoder = torchvision_ssl_encoder(encoder)
# Encoder
self.encoder = encoder
# Projector
self.projector = MLP(encoder_out_dim, projector_hidden_size,
projector_out_dim)
# Predictor
self.predictor = MLP(projector_out_dim, projector_hidden_size,
projector_out_dim)
def forward(self, x):
y = self.encoder(x)[0]
z = self.projector(y)
h = self.predictor(z)
return y, z, h
class ProjectorHead(nn.Module):
def __init__(self, input_dim=2048, hidden_size=4096, output_dim=256):
super().__init__()
self.out_channels = 256
self.projection = MLP(input_dim, hidden_size, output_dim)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
def forward(self, x):
x_pooled = self.avg_pool(x)
h = x_pooled.view(x_pooled.shape[0],
x_pooled.shape[1]) # removing the last dimension
return self.projection(h)
class SwaVPrototypes(nn.Module):
"""Prototypes used for SwaV.
Each output feature is assigned to a prototype, SwaV solves the swapped
predicition problem where the features of one augmentation are used to
predict the assigned prototypes of the other augmentation.
Examples:
>>> # use features with 128 dimensions and 512 prototypes
>>> prototypes = SwaVPrototypes(128, 512)
>>>
>>> # pass batch through backbone and projection head.
>>> features = model(x)
>>> features = nn.functional.normalize(features, dim=1, p=2)
>>>
>>> # logits has shape bsz x 512
>>> logits = prototypes(features)
"""
def __init__(self, input_dim: int, n_prototypes: int):
super().__init__()
self.layers = nn.Linear(input_dim, n_prototypes, bias=False)
def farward(self, x):
out = self.layers(x)
return out
class BYOL(nn.Module):
def __init__(self, backbone: nn.Module, target_momentum=0.996):
super().__init__()
self.online_network = backbone
self.target_network = copy.deepcopy(backbone)
# Projection Head
self.online_projector = ProjectorHead()
# Predictor Head
self.predictor = MLP(self.online_projector.out_channels, 4096, 256)
self.m = target_momentum
def initialize_target_network(self):
for param_q, param_k in zip(self.online_network.parameters(),
self.target_network.parameters()):
param_k.data.copy_(param_q.data)
param_k.requires_grad = False
for param_q, param_k in zip(self.online_projector.parameters(),
self.target_projector.parameters()):
param_k.data.copy_(param_q.data)
param_k.requires_grad = False
@torch.no_grad()
def update_target_network(self):
for param_q, param_k in zip(self.online_network.parameters(),
self.target_network.parameters()):
param_k.data = self.m * param_k.data + (1 - self.m) * param_q.data
for param_q, param_k in zip(self.online_projector.parameters(),
self.target_projector.parameters()):
param_k.data = self.m * param_k.data + (1 - self.m) * param_q.data
@staticmethod
def regression_loss(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
x_norm = F.normalize(x, dim=1) # L2-normalize
y_norm = F.normalize(y, dim=1) # L2-normalize
loss = 2 - 2 * (x_norm * y_norm).sum(dim=-1) # dot product
return loss.mean()
```
#### File: self_supervised/models/simsiam.py
```python
import torch
import torch.nn as nn
from pl_bolts.models.self_supervised.resnets import resnet18, resnet50
from pl_bolts.models.self_supervised.simsiam.models import SiameseArm
from pl_bolts.optimizers.lars import LARS
from pl_bolts.optimizers.lr_scheduler import linear_warmup_decay
from pytorch_lightning import LightningModule
class SimSiam(LightningModule):
"""PyTorch Lightning implementation of Exploring Simple Siamese
Representation Learning (SimSiam_)
Paper authors: <NAME>, <NAME>.
Model implemented by:
- `<NAME> <https://github.com/zlapp>`_
.. warning:: Work in progress. This implementation is still being verified.
TODOs:
- verify on CIFAR-10
- verify on STL-10
- pre-train on imagenet
Example::
model = SimSiam()
dm = CIFAR10DataModule(num_workers=0)
dm.train_transforms = SimCLRTrainDataTransform(32)
dm.val_transforms = SimCLREvalDataTransform(32)
trainer = Trainer()
trainer.fit(model, datamodule=dm)
Train::
trainer = Trainer()
trainer.fit(model)
CLI command::
# cifar10
python simsiam_module.py --gpus 1
# imagenet
python simsiam_module.py
--gpus 8
--dataset imagenet2012
--data_dir /path/to/imagenet/
--meta_dir /path/to/folder/with/meta.bin/
--batch_size 32
.. _SimSiam: https://arxiv.org/pdf/2011.10566v1.pdf
"""
def __init__(self,
gpus: int,
num_samples: int,
batch_size: int,
dataset: str,
num_nodes: int = 1,
arch: str = 'resnet50',
hidden_mlp: int = 2048,
feat_dim: int = 128,
warmup_epochs: int = 10,
max_epochs: int = 100,
temperature: float = 0.1,
first_conv: bool = True,
maxpool1: bool = True,
optimizer: str = 'adam',
exclude_bn_bias: bool = False,
start_lr: float = 0.0,
learning_rate: float = 1e-3,
final_lr: float = 0.0,
weight_decay: float = 1e-6,
**kwargs):
"""
Args:
datamodule: The datamodule
learning_rate: the learning rate
weight_decay: optimizer weight decay
input_height: image input height
batch_size: the batch size
num_workers: number of workers
warmup_epochs: num of epochs for scheduler warm up
max_epochs: max epochs for scheduler
"""
super().__init__()
self.save_hyperparameters()
self.gpus = gpus
self.num_nodes = num_nodes
self.arch = arch
self.dataset = dataset
self.num_samples = num_samples
self.batch_size = batch_size
self.hidden_mlp = hidden_mlp
self.feat_dim = feat_dim
self.first_conv = first_conv
self.maxpool1 = maxpool1
self.optim = optimizer
self.exclude_bn_bias = exclude_bn_bias
self.weight_decay = weight_decay
self.temperature = temperature
self.start_lr = start_lr
self.final_lr = final_lr
self.learning_rate = learning_rate
self.warmup_epochs = warmup_epochs
self.max_epochs = max_epochs
self.init_model()
# compute iters per epoch
nb_gpus = len(self.gpus) if isinstance(gpus,
(list, tuple)) else self.gpus
assert isinstance(nb_gpus, int)
global_batch_size = self.num_nodes * nb_gpus * self.batch_size if nb_gpus > 0 else self.batch_size
self.train_iters_per_epoch = self.num_samples // global_batch_size
self.cosine_similarity_ = nn.CosineSimilarity(dim=1).cuda(self.gpus)
def init_model(self):
if self.arch == 'resnet18':
backbone = resnet18
elif self.arch == 'resnet50':
backbone = resnet50
encoder = backbone(first_conv=self.first_conv,
maxpool1=self.maxpool1,
return_all_feature_maps=False)
self.online_network = SiameseArm(encoder,
input_dim=self.hidden_mlp,
hidden_size=self.hidden_mlp,
output_dim=self.feat_dim)
def forward(self, x):
y, _, _ = self.online_network(x)
return y
def shard_step_(self, batch, batch_idx):
(img_1, img_2, _), y = batch
# Image 1 to image 2 loss
_, z1, h1 = self.online_network(img_1)
_, z2, h2 = self.online_network(img_2)
loss = -(self.cosine_similarity_(h1, z2).mean() +
self.cosine_similarity_(h2, z1).mean()) * 0.5
return loss
def shard_step(self, batch, batch_idx):
(img_1, img_2, _), y = batch
# Image 1 to image 2 loss
_, z1, h1 = self.online_network(img_1)
_, z2, h2 = self.online_network(img_2)
loss = self.cosine_similarity(h1, z2) / 2 + self.cosine_similarity(
h2, z1) / 2
return loss
def training_step(self, batch, batch_idx):
loss = self.shard_step_(batch, batch_idx)
# log results
self.log_dict({'train_loss': loss})
return loss
def validation_step(self, batch, batch_idx):
loss = self.shard_step_(batch, batch_idx)
# log results
self.log_dict({'val_loss': loss})
return loss
def exclude_from_wt_decay(self,
named_params,
weight_decay,
skip_list=['bias', 'bn']):
params = []
excluded_params = []
for name, param in named_params:
if not param.requires_grad:
continue
elif any(layer_name in name for layer_name in skip_list):
excluded_params.append(param)
else:
params.append(param)
return [
{
'params': params,
'weight_decay': weight_decay
},
{
'params': excluded_params,
'weight_decay': 0.0
},
]
def configure_optimizers(self):
if self.exclude_bn_bias:
params = self.exclude_from_wt_decay(self.named_parameters(),
weight_decay=self.weight_decay)
else:
params = self.parameters()
if self.optim == 'lars':
optimizer = LARS(
params,
lr=self.learning_rate,
momentum=0.9,
weight_decay=self.weight_decay,
trust_coefficient=0.001,
)
elif self.optim == 'adam':
optimizer = torch.optim.Adam(params,
lr=self.learning_rate,
weight_decay=self.weight_decay)
warmup_steps = self.train_iters_per_epoch * self.warmup_epochs
total_steps = self.train_iters_per_epoch * self.max_epochs
scheduler = {
'scheduler':
torch.optim.lr_scheduler.LambdaLR(
optimizer,
linear_warmup_decay(warmup_steps, total_steps, cosine=True),
),
'interval':
'step',
'frequency':
1,
}
return [optimizer], [scheduler]
``` |
{
"source": "jianzhnie/TsFormer",
"score": 2
} |
#### File: tsformer/datasets/raw_mill.py
```python
import logging
import os
import numpy as np
import pandas as pd
from uci_single_households import impute_missing, hourly_aggregate
logger = logging.getLogger('log')
SAMPLES_PER_DAY = 96
FREQ = 'min'
TARGET = 'target1'
DATETIME = 'date'
colnames_map = {
'21_SERVER_SERVER::C410211/C410211MFSG.U(A喂料量给定)': 'target',
'datetime': 'date'
}
colnames_map = {
'datetime': 'date',
'21_SERVER_SERVER::C41041P/C41041P03F.OUT(A磨磨机压差)': 'feature1',
'21_SERVER_SERVER::C410413/C410413MIIF.OUT(A磨磨机电流)': 'feature2',
'21_SERVER_SERVER::C411411/C411411MIIF.OUT(A出磨斗提电流)': 'feature3',
'21_SERVER_SERVER::C410211/C410211MFSG.U(A喂料量给定)': 'target1',
'21_SERVER_SERVER::C410211/C410211MFIF.OUT(A喂料量反馈)': 'target2',
'21_SERVER_SERVER::C41041T/C41041T02F.OUT(A磨出磨温度)': 'feature4',
'21_SERVER_SERVER::C5408to12/C5410ZS01G.SP(A磨热风阀给定)': 'feature5',
'21_SERVER_SERVER::C5408to12/C5410ZI01F.OUT(A磨热风阀反馈)': 'feature6',
'21_SERVER_SERVER::C5408to12/C5412ZS02G3.SP(A磨冷风阀给定)': 'feature7',
'21_SERVER_SERVER::C5408to12/C5412ZI02F3.OUT(A磨冷风阀反馈)': 'feature8',
'21_SERVER_SERVER::C4104AIAO/C4104AO3.U(A研磨压力给定)': 'feature9',
'21_SERVER_SERVER::C4104AIAO/C4104AI6.OUT(A研磨压力反馈)': 'feature10',
'21_SERVER_SERVER::C4104AIAO/C4104AI1.OUT(A主减垂直振动)': 'feature11',
'21_SERVER_SERVER::C4104AIAO/C4104AI2.OUT(A主减水平振动)': 'feature12',
'21_SERVER_SERVER::C4104/C4104M11VEVB.OUT(A磨主减输入垂直振动)': 'feature13',
'21_SERVER_SERVER::C4104/C4104M11LEVB.OUT(A磨主减输入水平振动)': 'feature14',
'21_SERVER_SERVER::C4104AIAO/C4104AO1.U(A磨选粉机频率给定)': 'feature15',
'21_SERVER_SERVER::C4104AIAO/C4104AI7.OUT(A磨选粉机频率反馈)': 'feature16',
'21_SERVER_SERVER::C4107aZ/C4107aZS01G.SP(A磨循环风机风门开度给定)': 'feature17',
'21_SERVER_SERVER::C4107aZ/C4107aZI01F.OUT(A磨循环风机风门开度反馈)': 'feature18',
'21_SERVER_SERVER::C41041T/C41041T01F.OUT(A磨入口温度)': 'feature19',
'21_SERVER_SERVER::C41041P/C41041P01F.OUT(A磨入口压力)': 'feature20',
'21_SERVER_SERVER::C41041P/C41041P02F.OUT(A磨出口压力)': 'feature21'
}
def process_csv(config):
"""Parse the datetime field, Sort the values accordingly and save the new
dataframe to disk."""
df = pd.read_csv(os.path.join(config['data']), sep=',', encoding="gb18030")
colnames = list(colnames_map.keys())
df = df[colnames]
df.rename(columns=colnames_map, inplace=True)
df[DATETIME] = pd.to_datetime(df[DATETIME], utc=False)
df = hourly_aggregate(df, freq=FREQ, datetime_col=DATETIME)
def parse(x):
try:
return np.float64(x)
except ValueError:
return np.nan
df = df[df[TARGET] > 0]
df[TARGET] = df[TARGET].apply(lambda x: parse(x))
df = impute_missing(
df,
method=config['fill_nan'],
values_col=TARGET,
datetime_col=DATETIME)
return df
if __name__ == '__main__':
data_dir = 'data/raw_milla/data1.csv'
config = {'data': data_dir, 'fill_nan': 'median'}
df = process_csv(config)
df.to_csv('data/raw_milla/data1_process.csv', index=False)
```
#### File: TsFormer/tsformer/exp_main.py
```python
import os
import time
import warnings
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch import optim
from torch.optim.lr_scheduler import CosineAnnealingLR
from tsformer.datasets.data_factory import data_provider
from tsformer.exp_basic import Exp_Basic
from tsformer.models.rnn_model import CNN, GRU, LSTM, RNN, AttentionalLSTM
from tsformer.models.transformer import Transformer
from tsformer.utils.metrics import metric
from tsformer.utils.tools import EarlyStopping, visual
warnings.filterwarnings('ignore')
class Exp_Main(Exp_Basic):
def __init__(self, args):
super(Exp_Main, self).__init__(args)
def _build_model(self):
input_size = self.args.input_size
hidden_size = self.args.hidden_size
num_layers = self.args.num_layers
output_size = self.args.output_size
if self.args.model == 'cnn':
model = CNN(input_size, hidden_size, output_size)
if self.args.model == 'rnn':
model = RNN(input_size, hidden_size, num_layers, output_size)
elif self.args.model == 'lstm':
model = LSTM(input_size, hidden_size, num_layers, output_size)
elif self.args.model == 'gru':
model = GRU(input_size, hidden_size, num_layers, output_size)
elif self.args.model == 'attlstm':
model = AttentionalLSTM(
input_size=input_size,
qkv=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
output_size=output_size)
elif self.args.model == 'transformer':
model = Transformer(
input_features=input_size,
input_seq_len=96,
hidden_dim=768,
output_seq_len=output_size,
dim_feedforward=512,
num_head=12,
num_layers=2,
dropout=0.1,
)
if self.args.use_multi_gpu and self.args.use_gpu:
model = nn.DataParallel(model, device_ids=self.args.device_ids)
return model
def _get_lr_scheduler(self, epochs):
optimizer = self._select_optimizer()
scheduler = CosineAnnealingLR(
optimizer, T_max=epochs, eta_min=0, last_epoch=-1)
return scheduler
def _get_data(self, flag):
data_set, data_loader = data_provider(self.args, flag)
return data_set, data_loader
def _select_optimizer(self):
model_optim = optim.Adam(
self.model.parameters(), lr=self.args.learning_rate)
return model_optim
def _select_criterion(self):
# criterion = nn.MSELoss()
criterion = nn.SmoothL1Loss()
return criterion
def vali(self, vali_data, vali_loader, criterion):
total_loss = []
self.model.eval()
with torch.no_grad():
for i, (batch_x, batch_y) in enumerate(vali_loader):
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float()
if self.args.use_amp:
with torch.cuda.amp.autocast():
outputs = self.model(batch_x)
else:
outputs = self.model(batch_x)
outputs = outputs.unsqueeze(-1)
f_dim = -1 if self.args.features == 'MS' else 0
batch_y = batch_y[:, -self.args.pred_len:,
f_dim:].to(self.device)
pred = outputs.detach().cpu()
true = batch_y.detach().cpu()
loss = criterion(pred, true)
total_loss.append(loss)
total_loss = np.average(total_loss)
self.model.train()
return total_loss
def train(self, setting):
train_data, train_loader = self._get_data(flag='train')
vali_data, vali_loader = self._get_data(flag='val')
test_data, test_loader = self._get_data(flag='test')
path = os.path.join(self.args.results_dir, 'checkpoints', setting)
if not os.path.exists(path):
os.makedirs(path)
time_now = time.time()
train_steps = len(train_loader)
early_stopping = EarlyStopping(
patience=self.args.patience, verbose=True)
model_optim = self._select_optimizer()
criterion = self._select_criterion()
lr_scheduler = self._get_lr_scheduler(self.args.train_epochs)
if self.args.use_amp:
scaler = torch.cuda.amp.GradScaler()
for epoch in range(self.args.train_epochs):
iter_count = 0
train_loss = []
self.model.train()
epoch_time = time.time()
for i, (batch_x, batch_y) in enumerate(train_loader):
iter_count += 1
model_optim.zero_grad()
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float().to(self.device)
# encoder - decoder
if self.args.use_amp:
with torch.cuda.amp.autocast():
outputs = self.model(batch_x)
else:
outputs = self.model(batch_x)
outputs = outputs.unsqueeze(-1)
f_dim = -1 if self.args.features == 'MS' else 0
batch_y = batch_y[:, -self.args.pred_len:,
f_dim:].to(self.device)
loss = criterion(outputs, batch_y)
train_loss.append(loss.item())
if (i + 1) % 100 == 0:
print('\titers: {0}, epoch: {1} | loss: {2:.7f}'.format(
i + 1, epoch + 1, loss.item()))
speed = (time.time() - time_now) / iter_count
left_time = speed * (
(self.args.train_epochs - epoch) * train_steps - i)
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(
speed, left_time))
iter_count = 0
time_now = time.time()
if self.args.use_amp:
scaler.scale(loss).backward()
scaler.step(model_optim)
scaler.update()
else:
loss.backward()
model_optim.step()
print('Epoch: {} cost time: {}'.format(epoch + 1,
time.time() - epoch_time))
train_loss = np.average(train_loss)
vali_loss = self.vali(vali_data, vali_loader, criterion)
test_loss = self.vali(test_data, test_loader, criterion)
lr = lr_scheduler.get_last_lr()
print(
'Epoch: {0}, Lr:{1} | Steps: {2} | Train Loss: {3:.7f} Vali Loss: {4:.7f} Test Loss: {5:.7f}'
.format(epoch + 1, lr, train_steps, train_loss, vali_loss,
test_loss))
early_stopping(vali_loss, self.model, path)
if early_stopping.early_stop:
print('Early stopping')
break
# adjust_learning_rate(model_optim, epoch + 1, self.args)
lr_scheduler.step()
best_model_path = os.path.join(path, 'checkpoint.pth')
self.model.load_state_dict(torch.load(best_model_path))
return self.model
def test(self, setting, test=0):
test_data, test_loader = self._get_data(flag='test')
if test:
print('loading model')
self.model.load_state_dict(
torch.load(
os.path.join(self.args.results_dir, 'checkpoints', setting,
'checkpoint.pth')))
preds = []
trues = []
folder_path = os.path.join(self.args.results_dir, 'test_results',
setting)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
self.model.eval()
with torch.no_grad():
for i, (batch_x, batch_y) in enumerate(test_loader):
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float().to(self.device)
# encoder - decoder
if self.args.use_amp:
with torch.cuda.amp.autocast():
outputs = self.model(batch_x)
else:
outputs = self.model(batch_x)
outputs = outputs.unsqueeze(-1)
f_dim = -1 if self.args.features == 'MS' else 0
batch_y = batch_y[:, -self.args.pred_len:,
f_dim:].to(self.device)
outputs = outputs.detach().cpu().numpy()
batch_y = batch_y.detach().cpu().numpy()
pred = outputs # outputs.detach().cpu().numpy() # .squeeze()
true = batch_y # batch_y.detach().cpu().numpy() # .squeeze()
preds.append(pred)
trues.append(true)
if i % 20 == 0:
input = batch_x.detach().cpu().numpy()
gt = np.concatenate((input[0, :, -1], true[0, :, -1]),
axis=0)
pd = np.concatenate((input[0, :, -1], pred[0, :, -1]),
axis=0)
visual(gt, pd, os.path.join(folder_path, str(i) + '.png'))
# result save
preds = np.array(preds)
trues = np.array(trues)
print('test shape:', preds.shape, trues.shape)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
print('test shape:', preds.shape, trues.shape)
folder_path = os.path.join(self.args.results_dir, 'results', setting)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
mae, mse, rmse, mape, mspe = metric(preds, trues)
print('mse:{}, mae:{},rmse:{}, mape:{}, mspe:{}'.format(
mse, mae, rmse, mape, mspe))
test_result_file = os.path.join(folder_path, 'result.txt')
with open(test_result_file, 'w+') as f:
f.write(setting + ' \n')
f.write('mse:{}, mae:{}, rmse:{}, mape:{}, mspe{}'.format(
mse, mae, rmse, mape, mspe))
f.write('\n')
f.write('\n')
f.close()
def predict(self, setting, load=False):
pred_data, pred_loader = self._get_data(flag='pred')
if load:
path = os.path.join(self.args.results_dir, 'checkpoints', setting)
best_model_path = os.path.join(path, 'checkpoint.pth')
self.model.load_state_dict(torch.load(best_model_path))
preds = []
self.model.eval()
with torch.no_grad():
for i, (batch_x, batch_y) in enumerate(pred_loader):
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float()
if self.args.use_amp:
with torch.cuda.amp.autocast():
outputs = self.model(batch_x)
else:
outputs = self.model(batch_x)
pred = outputs.detach().cpu().numpy() # .squeeze()
preds.append(pred)
preds = np.array(preds)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
df = pd.DataFrame(preds)
# result save
folder_path = os.path.join(self.args.results_dir + setting)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
df.to_csv(folder_path + 'real_prediction.csv', index=False)
return
```
#### File: tsformer/models/custom_informer.py
```python
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers.attn import AttentionLayer, FullAttention, ProbAttention
from .layers.embed import DataEmbedding
class Informer(nn.Module):
def __init__(self,
enc_in: int,
dec_in: int,
c_out: int,
seq_len: int,
label_len: int,
pred_len: int,
factor: int = 5,
d_model: int = 512,
n_heads: int = 8,
e_layers: int = 3,
d_layers: int = 2,
d_ffn: int = 512,
dropout=0.0,
embed='fixed',
freq='h',
activation='gelu'):
super(Informer, self).__init__()
self.seq_len = seq_len
self.pred_len = pred_len
self.label_len = label_len
self.c_out = c_out
# Encoding
self.enc_embedding = DataEmbedding(enc_in, d_model, embed, freq,
dropout)
self.dec_embedding = DataEmbedding(dec_in, d_model, embed, freq,
dropout)
# Attention
enc_prob_attn = ProbAttention(False, factor, attention_dropout=dropout)
# Encoder
conv_layer = ConvLayer(d_model)
encoder_norm = nn.LayerNorm(d_model)
enc_attn_layer = AttentionLayer(enc_prob_attn, d_model, n_heads)
encoder_layer = EncoderLayer(enc_attn_layer, d_model, d_ffn, dropout,
activation)
self.encoder = Encoder(encoder_layer, conv_layer, e_layers,
encoder_norm)
# Decoder
dec_prob_attn = ProbAttention(True, factor, attention_dropout=dropout)
dec_full_attn = FullAttention(False, factor, attention_dropout=dropout)
dec_attn_layer1 = AttentionLayer(dec_prob_attn, d_model, n_heads)
dec_attn_layer2 = AttentionLayer(dec_full_attn, d_model, n_heads)
decoder_layer = DecoderLayer(
self_attn_layer=dec_attn_layer1,
cross_attn_layer=dec_attn_layer2,
d_model=d_model,
d_ffn=d_ffn,
dropout=dropout,
activation=activation)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = Decoder(
decoder_layer, num_layers=d_layers, norm_layer=decoder_norm)
self.projection = nn.Linear(d_model, c_out, bias=True)
def forward(self,
x_enc: torch.Tensor,
x_mark_enc: torch.Tensor,
x_dec: torch.Tensor,
x_mark_dec: torch.Tensor,
enc_self_mask=None,
dec_self_mask=None,
dec_enc_mask=None):
"""
:param x_enc: The core tensor going into the model. Of dimension (batch_size, seq_len, enc_in)
:type x_enc: torch.Tensor
:param x_mark_enc: A tensor with the relevant datetime information. (batch_size, seq_len, n_datetime_feats)
:type x_mark_enc: torch.Tensor
:param x_dec: The datetime tensor information. Has dimension batch_size, seq_len, enc_in
:type x_dec: torch.Tensor
:param x_mark_dec: A tensor with the relevant datetime information. (batch_size, seq_len, n_datetime_feats)
:type x_mark_dec: torch.Tensor
:param enc_self_mask: The mask of the encoder model has size (), defaults to None
:type enc_self_mask: [type], optional
:param dec_self_mask: [description], defaults to None
:type dec_self_mask: [type], optional
:param dec_enc_mask: torch.Tensor, defaults to None
:type dec_enc_mask: torch.Tensor, optional
:return: Returns a PyTorch tensor of shape (batch_size, out_len, n_targets)
:rtype: torch.Tensor
"""
enc_out = self.enc_embedding(x_enc, x_mark_enc)
enc_out = self.encoder(enc_out, attn_mask=enc_self_mask)
dec_out = self.dec_embedding(x_dec, x_mark_dec)
dec_out = self.decoder(
dec_out, enc_out, tgt_mask=dec_self_mask, memory_mask=dec_enc_mask)
dec_out = self.projection(dec_out)
return dec_out[:, -self.pred_len:, :] # [B, L, D]
class ConvLayer(nn.Module):
def __init__(self, c_in):
super(ConvLayer, self).__init__()
self.downConv = nn.Conv1d(
in_channels=c_in,
out_channels=c_in,
kernel_size=3,
padding=2,
padding_mode='circular')
self.norm = nn.BatchNorm1d(c_in)
self.activation = nn.ELU()
self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.downConv(x.permute(0, 2, 1))
x = self.norm(x)
x = self.activation(x)
x = self.maxPool(x)
x = x.transpose(1, 2)
return x
class EncoderLayer(nn.Module):
def __init__(self,
attention_layer,
d_model,
d_ffn=None,
dropout=0.1,
activation='relu'):
"""[summary]
:param attention: [description]
:type attention: [type]
:param d_model: [description]
:type d_model: [type]
:param d_ff: [description], defaults to None
:type d_ff: [type], optional
:param dropout: [description], defaults to 0.1
:type dropout: float, optional
:param activation: [description], defaults to "relu"
:type activation: str, optional
"""
super(EncoderLayer, self).__init__()
d_ffn = d_ffn or 4 * d_model
self.attention = attention_layer
self.conv1 = nn.Conv1d(
in_channels=d_model, out_channels=d_ffn, kernel_size=1)
self.conv2 = nn.Conv1d(
in_channels=d_ffn, out_channels=d_model, kernel_size=1)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = F.relu if activation == 'relu' else F.gelu
def forward(self, src, attn_mask=None):
# x [B, L, D]
src2 = self.attention(src, src, src, attn_mask=attn_mask)[0]
src = src + self.dropout(src2)
src = self.norm1(src)
src2 = self.dropout1(self.activation(self.conv1(src.transpose(1, 2))))
src2 = self.dropout2(self.conv2(src2).transpose(1, 2))
src = self.norm2(src + src2)
return src
class Encoder(nn.Module):
def __init__(
self,
encoder_layer,
conv_layer=None,
num_layers=2,
norm_layer=None,
):
super(Encoder, self).__init__()
self.attn_layers = _get_clones(encoder_layer, num_layers)
self.conv_layers = _get_clones(conv_layer, num_layers -
1) if conv_layer is not None else None
self.norm = norm_layer
def forward(self, src, attn_mask=None) -> torch.Tensor:
# x [B, L, D]
if self.conv_layers is not None:
for attn_layer, conv_layer in zip(self.attn_layers,
self.conv_layers):
src = attn_layer(src, attn_mask=attn_mask)
src = conv_layer(src)
src = self.attn_layers[-1](src, attn_mask=attn_mask)
else:
for attn_layer in self.attn_layers:
src = attn_layer(src, attn_mask=attn_mask)
if self.norm is not None:
src = self.norm(src)
return src
class DecoderLayer(nn.Module):
def __init__(self,
self_attn_layer,
cross_attn_layer,
d_model,
d_ffn=None,
dropout=0.1,
activation='relu'):
super(DecoderLayer, self).__init__()
d_ffn = d_ffn or 4 * d_model
self.self_attention = self_attn_layer
self.cross_attention = cross_attn_layer
self.conv1 = nn.Conv1d(
in_channels=d_model, out_channels=d_ffn, kernel_size=1)
self.conv2 = nn.Conv1d(
in_channels=d_ffn, out_channels=d_model, kernel_size=1)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.dropout4 = nn.Dropout(dropout)
self.activation = F.relu if activation == 'relu' else F.gelu
def forward(self,
tgt,
memory,
tgt_mask=None,
memory_mask=None) -> torch.Tensor:
tgt2 = self.self_attention(tgt, tgt, tgt, attn_mask=tgt_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.cross_attention(
tgt, memory, memory, attn_mask=memory_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.dropout3(self.activation(self.conv1(tgt.transpose(1, 2))))
tgt2 = self.dropout4(self.conv2(tgt2).transpose(1, 2))
tgt = self.norm3(tgt + tgt2)
return tgt
class Decoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm_layer=None):
super(Decoder, self).__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm_layer
def forward(self,
tgt,
memory,
tgt_mask=None,
memory_mask=None) -> torch.Tensor:
for layer in self.layers:
output = layer(
tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask)
if self.norm is not None:
output = self.norm(output)
return output
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
``` |
{
"source": "jianzhoufeng/dolphin-doc",
"score": 4
} |
#### File: dolphin_doc_lib/base/rect.py
```python
"Base classes for geometrics related operations"
from typing import Generic, TypeVar, Dict
T = TypeVar("T", int, float)
class Rect(Generic[T]):
"""Class to represent a rectangle.
X-axis direction: from left to right.
Y-axis direction: from top to bottom.
T should be int or float.
When T is int: right and bottom are inside the Rect.
When T is float: right and bottom are on the edge.
"""
_x: T
_y: T
_w: T
_h: T
def __init__(self, x: T, y: T, w: T, h: T):
if w <= 0:
raise ValueError("|w| should be positive, got {}".format(w))
if h <= 0:
raise ValueError("|h| should be positive, got {}".format(h))
self._x = x
self._y = y
self._w = w
self._h = h
def set_position(self, x: T, y: T):
self._x = x
self._y = y
def left(self) -> T:
"Return left."
return self._x
def right(self) -> T:
"Return right."
if isinstance(self._x, int):
return self._x + self._w - 1
return self._x + self._w
def top(self) -> T:
"Return top."
return self._y
def bottom(self) -> T:
"Return bottom."
if isinstance(self._x, int):
return self._y + self._h - 1
return self._y + self._h
def width(self) -> T:
"Return width."
return self._w
def height(self) -> T:
"Return height."
return self._h
def area(self) -> T:
"Return the area."
return self._w * self._h
def contains(self, other: "Rect[T]") -> bool:
"Return whether a rect is inside the current rect."
return self.left() <= other.left() \
and self.right() >= other.right() \
and self.top() <= other.top() \
and self.bottom() >= other.bottom()
def contains_point(self, x: T, y: T) -> bool:
"Return whether a point is inside the current rect."
return self.left() <= x <= self.right() \
and self.top() <= y <= self.bottom()
def to_dict(self) -> Dict[str, T]:
return {
"left": self.left(),
"top": self.top(),
"width": self.width(),
"height": self.height()
}
```
#### File: dolphin_doc_lib/base/text_test.py
```python
"Unit test for text"
from dolphin_doc_lib.base.text import TextSegment, TextParagraph
def test_merge_segments():
par = TextParagraph() \
.append_text_segment(TextSegment("Hello ")) \
.append_text_segment(TextSegment("World!"))
assert len(par.segments()) == 1
assert par.segments()[0].text() == "Hello World!"
def test_merge_segments_with_link():
# This is a link: <a href="http://www.example.com"> link </a>.
par = TextParagraph() \
.append_text_segment(TextSegment("This is a link: ")) \
.append_text_segment(TextSegment("example", "http://www.example.com")) \
.append_text_segment(TextSegment("."))
assert len(par.segments()) == 3
assert par.segments()[0].text() == "This is a link: "
assert par.segments()[1].text() == "example"
assert par.segments()[1].link() == "http://www.example.com"
assert par.segments()[2].text() == "."
```
#### File: dolphin-doc/dolphin_doc_lib/process_test.py
```python
"Unit test for process"
from typing import cast
from dolphin_doc_lib.base.text import TextParagraph, TextSegment
from dolphin_doc_lib.process import process, Content, ContentSource
from dolphin_doc_lib.base.doc import Doc
def test_plain_text():
text = "paragraph 1\nparagraph 2\n\n \n \nparagraph 3\n"
doc = process(Content(data=text))
par1 = TextParagraph().append_text_segment(TextSegment("paragraph 1"))
par2 = TextParagraph().append_text_segment(TextSegment("paragraph 2"))
par3 = TextParagraph().append_text_segment(TextSegment("paragraph 3"))
expect_doc = Doc().append_blocks([par1, par2, par3])
assert doc.to_dict() == expect_doc.to_dict()
def test_plain_text_from_file():
doc = process(
Content(source=ContentSource.FILE,
path="dolphin_doc_lib/testdata/plain_text.txt"))
par1 = TextParagraph().append_text_segment(TextSegment("paragraph 1"))
par2 = TextParagraph().append_text_segment(TextSegment("paragraph 2"))
par3 = TextParagraph().append_text_segment(TextSegment("paragraph 3"))
par4 = TextParagraph().append_text_segment(TextSegment("paragraph 4"))
expect_doc = Doc().append_blocks([par1, par2, par3, par4])
assert doc.to_dict() == expect_doc.to_dict()
``` |
{
"source": "JianzhouZhan/Awesome-RecSystem-Models",
"score": 3
} |
#### File: Awesome-RecSystem-Models/Model/FM_PyTorch.py
```python
import pickle
import torch
import torch.nn as nn
from util.train_model_util_PyTorch import train_test_model_demo
AID_DATA_DIR = '../data/Criteo/forOtherModels/' # 辅助用途的文件路径
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
"""
PyTorch implementation of Factorization Machine[1]
Reference:
[1] Factorization Machines,
<NAME>, Osaka
https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf
[2] Tensorflow implementation of FM
https://github.com/babakx/fm_tensorflow/blob/master/fm_tensorflow.ipynb
"""
class FM_layer(nn.Module):
def __init__(self, num_feat, num_field, reg_l1=0.01, reg_l2=0.01, embedding_size=16):
super().__init__() # Python2 下使用 super(FM_layer, self).__init__()
self.reg_l1 = reg_l1
self.reg_l2 = reg_l2 # L1/L2正则化并没有去使用
self.num_feat = num_feat # denote as M
self.num_field = num_field # denote as F
self.embedding_size = embedding_size # denote as K
# first order term parameters embedding
self.first_weights = nn.Embedding(num_feat, 1) # None * M * 1
nn.init.xavier_uniform_(self.first_weights.weight)
self.bias = nn.Parameter(torch.randn(1))
# 需要定义一个 Embedding
self.feat_embeddings = nn.Embedding(num_feat, embedding_size) # None * M * K
nn.init.xavier_uniform_(self.feat_embeddings.weight)
def forward(self, feat_index, feat_value):
# Step1: 先计算得到线性的那一部分
feat_value = torch.unsqueeze(feat_value, dim=2) # None * F * 1
first_weights = self.first_weights(feat_index) # None * F * 1
first_weight_value = torch.mul(first_weights, feat_value) # None * F * 1
first_weight_value = torch.squeeze(first_weight_value, dim=2) # None * F
y_first_order = torch.sum(first_weight_value, dim=1) # None
# Step2: 再计算二阶部分
secd_feat_emb = self.feat_embeddings(feat_index) # None * F * K
feat_emd_value = torch.mul(secd_feat_emb, feat_value) # None * F * K(广播)
# sum_square part
summed_feat_emb = torch.sum(feat_emd_value, 1) # None * K
interaction_part1 = torch.pow(summed_feat_emb, 2) # None * K
# squared_sum part
squared_feat_emd_value = torch.pow(feat_emd_value, 2) # None * K
interaction_part2 = torch.sum(squared_feat_emd_value, dim=1) # None * K
y_secd_order = 0.5 * torch.sub(interaction_part1, interaction_part2)
y_secd_order = torch.sum(y_secd_order, dim=1)
output = self.bias + y_first_order + y_secd_order
output = torch.unsqueeze(output, dim=1)
return output
if __name__ == '__main__':
train_data_path, test_data_path = AID_DATA_DIR + 'train_data/', AID_DATA_DIR + 'test_data/'
feat_dict_ = pickle.load(open(AID_DATA_DIR + 'aid_data/feat_dict_10.pkl2', 'rb'))
fm = FM_layer(num_feat=len(feat_dict_) + 1, num_field=39, reg_l2=1e-5, embedding_size=10).to(DEVICE)
train_test_model_demo(fm, DEVICE, train_data_path, test_data_path, feat_dict_)
``` |
{
"source": "jianzi123/contrail-vrouter",
"score": 3
} |
#### File: utils/pylib/packet.py
```python
import constants
from scapy.all import *
class PacketBase(object):
def __init__():
pass
class Packet(PacketBase):
"""Base class for packet"""
def __init__():
pass
class EtherPacket(Packet):
"""
EtherPacket class for creating ethernet packet
Mandatory Parameters:
--------------------
smac : str
Source mac address
dmac : str:
Destination mac address
ether_type : int
Ethernet type
"""
def __init__(self, smac, dmac, ether_type):
self.eth = None
if smac and dmac:
self.eth = Ether(src=smac, dst=dmac, type=ether_type)
def get_packet(self):
return self.eth
class ArpPacket(EtherPacket):
"""
ArpPacket class for creating arp packet
Mandatory Parameters:
--------------------
None (If nothing provided then it will create vlan packet without \
Ethernet header)
Optional Parameters:
-------------------
src : str
Source mac address
dst : str:
Destination mac address
op : int
Arp operation code
hwtype : int
hardware type
hwlen : int
hardware length
"""
def __init__(
self,
src=None,
dst=None,
op=1,
hwtype=0x1,
hwlen=7,
**kwargs):
super(ArpPacket, self).__init__(src, dst, 0x0806, **kwargs)
self.arp = ARP(op=op, hwtype=hwtype,
hwlen=hwlen)
def get_packet(self):
if self.eth:
return self.eth / self.arp
else:
return self.arp
class VlanPacket(EtherPacket):
"""
VlanPacket class for creating vlan packet
Mandatory Parameters:
--------------------
None (If nothing provided then it will create vlan packet without \
Ethernet header)
Optional Parameters:
-------------------
src : str
Source mac address
dst : str:
Destination mac address
op : int
Arp operation
hwtype : int
hwtype
hwlen : int
hwlen
"""
def __init__(
self,
src=None,
dst=None,
vlan=1,
**kwargs):
super(VlanPacket, self).__init__(src, dst, ether_type=0x8100, **kwargs)
self.vlan = Dot1Q(vlan=vlan)
def get_packet(self):
if self.eth:
return self.eth / self.vlan
else:
return self.vlan
class IpPacket(EtherPacket):
"""
IpPacket class for creating IPv4 packet
Mandatory Parameters:
--------------------
proto : str
IP protocol
sip : str
Source IP address
dip : str:
Destination IP address
Optional Parameters:
-------------------
smac : str
Source mac address
dmac : str
Destination mac address
ihl : int
Internet header length
id : int
Identification field
ttl : int
Time to live
"""
def __init__(self, proto, sip, dip, smac=None, dmac=None,
ihl=5, id=1, ttl=64, **kwargs):
super(IpPacket, self).__init__(smac, dmac, 0x800, **kwargs)
self.ip = IP(version=4, ihl=ihl, id=id,
ttl=ttl, proto=proto, dst=dip, src=sip)
def get_packet(self):
if self.eth and self.ip:
return self.eth / self.ip
else:
return self.ip
class IpVlanPacket(VlanPacket):
"""
IpPacket class for creating IPv4 packet
Mandatory Parameters:
--------------------
proto : str
IP protocol
sip : str
Source IP address
dip : str:
Destination IP address
Optional Parameters:
-------------------
smac : str
Source mac address
dmac : str
Destination mac address
ihl : int
Internet header length
id : int
Identification field
ttl : int
Time to live
"""
def __init__(self, proto, sip, dip, smac=None, dmac=None,
ihl=5, id=1, ttl=64, **kwargs):
super(IpVlanPacket, self).__init__(smac, dmac, 0x800, **kwargs)
self.ip = IP(version=4, ihl=ihl, id=id,
ttl=ttl, proto=proto, dst=dip, src=sip)
def get_packet(self):
if self.eth:
return self.eth / self.vlan / self.ip
else:
return self.vlan / self.ip
class Ipv6Packet(EtherPacket):
"""
Ipv6Packet class for creating IPv6 packet
Mandatory Parameters:
--------------------
sipv6 : str
Source IP address
dipv6 : str:
Destination IP address
Optional Parameters:
-------------------
smac : str
Source mac address
dmac : str
Destination mac address
nh : int
Next header
"""
def __init__(self, sipv6, dipv6, smac=None, dmac=None, nh=0, **kwargs):
super(Ipv6Packet, self).__init__(smac, dmac, 0x86dd, **kwargs)
self.ipv6 = IPv6(src=sipv6, dst=dipv6, nh=nh, version=6,
tc=0, fl=0, plen=None, hlim=64)
def get_packet(self):
if self.eth and self.ipv6:
return self.eth / self.ipv6
else:
return self.ipv6
class IcmpPacket(IpPacket):
"""
IcmpPacket class for creating ICMP packet
Mandatory Parameters:
--------------------
sip : str
Source IP address
dip : str
Destination IP address
Optional Parameters:
-------------------
smac : str
Source mac address
dmac : str
Destination mac address
icmp_type : int
Icmp type
id : int
Identifier
"""
def __init__(
self,
sip,
dip,
smac=None,
dmac=None,
icmp_type=constants.ECHO_REQUEST,
id=1,
**kwargs):
super(IcmpPacket, self).__init__(
'icmp',
sip,
dip,
smac,
dmac,
**kwargs)
self.icmp = ICMP(type=icmp_type, code=0, id=id)
def get_packet(self):
if self.eth:
return self.eth / self.ip / self.icmp
else:
return self.ip / self.icmp
class UdpPacket(IpPacket):
"""
UdpPacket class for creating udp packet
Mandatory Parameters:
--------------------
sip : str
Source IP address
dip : str:
Destination IP address
sport : int
Source port
dport : int
Destination port
Optional Parameters:
-------------------
smac : str
Source mac address
dmac : str
Destination mac address
jumbo : bool
Jumbo packet size
"""
def __init__(self, sip, dip, sport, dport, smac=None,
dmac=None, jumbo=False, **kwargs):
super(UdpPacket, self).__init__('udp', sip, dip, smac, dmac, **kwargs)
self.udp = UDP(sport=sport, dport=dport)
self.jumbo = jumbo
def get_packet(self):
if self.jumbo and self.eth:
payload = "x" * 9000
pkt = self.eth / self.ip / self.udp / payload
elif self.eth:
pkt = self.eth / self.ip / self.udp
else:
pkt = self.ip / self.udp
return pkt
class UdpVlanPacket(IpVlanPacket):
"""
UdpPacket class for creating udp packet
Mandatory Parameters:
--------------------
sip : str
Source IP address
dip : str:
Destination IP address
sport : int
Source port
dport : int
Destination port
Optional Parameters:
-------------------
smac : str
Source mac address
dmac : str
Destination mac address
jumbo : bool
Jumbo packet size
"""
def __init__(self, sip, dip, sport, dport, smac=None, dmac=None, **kwargs):
super(UdpVlanPacket, self).__init__(
'udp', sip, dip, smac, dmac, **kwargs)
self.udp = UDP(sport=sport, dport=dport)
def get_packet(self):
if self.eth:
pkt = self.eth / self.vlan / self.ip / self.udp
else:
pkt = self.vlan / self.ip / self.udp
return pkt
class DnsPacket(UdpPacket):
"""
DnsPacket class for creating dns packet
Mandatory Parameters:
--------------------
sip : str
Source IP address
dip : str:
Destination IP address
sport : int
Source port
dport : int
Destination port
smac : str
Source mac address
dmac : str
Destination mac address
"""
def __init__(self, sip, dip, sport, dport, smac, dmac, **kwargs):
super(DnsPacket, self).__init__(
sip,
dip,
sport,
dport,
smac,
dmac,
**kwargs)
self.dns = DNS()
def get_packet(self):
pkt = self.eth / self.ip / self.udp / self.dns
return pkt
class GrePacket(IpPacket):
"""
GrePacket class for creating gre packet
Mandatory Parameters:
--------------------
sip : str
Source IP address
dip : str:
Destination IP address
sport : int
Source port
dport : int
Destination port
Optional Parameters:
-------------------
smac : str
Source mac address
dmac : str
Destination mac address
gre_proto : int
Gre protocol
gre_version : int
Gre version
gre_flags : int
Gre flags
"""
def __init__(self, sip, dip, smac=None, dmac=None,
gre_proto=0x8847, gre_version=0, gre_flags=0, **kwargs):
super(GrePacket, self).__init__('gre', sip, dip, smac, dmac, **kwargs)
self.gre = GRE(proto=gre_proto, version=gre_version, flags=gre_flags)
def get_packet(self):
pkt = self.eth / self.ip / self.gre
return pkt
class GreVlanPacket(IpVlanPacket):
"""
GrePacket class for creating gre packet
Mandatory Parameters:
--------------------
sip : str
Source IP address
dip : str:
Destination IP address
sport : int
Source port
dport : int
Destination port
Optional Parameters:
-------------------
smac : str
Source mac address
dmac : str
Destination mac address
gre_proto : int
Gre protocol
gre_version : int
Gre version
gre_flags : int
Gre flags
"""
def __init__(self, sip, dip, smac=None, dmac=None,
gre_proto=0x8847, gre_version=0, gre_flags=0, **kwargs):
super(GreVlanPacket, self).__init__(
'gre', sip, dip, smac, dmac, **kwargs)
self.gre = GRE(proto=gre_proto, version=gre_version, flags=gre_flags)
def get_packet(self):
if self.eth:
pkt = self.eth / self.vlan / self.ip / self.gre
else:
pkt = self.vlan / self.ip / self.gre
return pkt
class MplsPacket(Packet):
"""
MplsPacket class for creating mpls packet
Mandatory Parameters:
--------------------
label : int
Mpls label
Optional Parameters:
-------------------
mpls_ttl : int
mpls ttl value
"""
def __init__(self, label, mpls_ttl=64):
load_contrib("mpls")
self.mpls = MPLS(label=label, ttl=mpls_ttl)
def get_packet(self):
return self.mpls
class MplsoUdpPacket(UdpPacket):
"""
MplsoUdpPacket class for creating mpls over udp packet
Mandatory Parameters:
--------------------
label : int
Mpls label
sip : str
Source IP address
dip : str
Destination IP address
smac : str
Source MAC address
dmac : str
Destination MAC address
sport :
Source port address
dport:
Destination port address
Optional Parameters:
-------------------
inner_pkt : any other packet type
Inner packet
mpls_ttl : int
mpls ttl value
"""
def __init__(self, label, sip, dip, smac, dmac, sport, dport,
inner_pkt=None, mpls_ttl=64, **kwargs):
super(MplsoUdpPacket, self).__init__(
sip,
dip,
sport,
dport,
smac,
dmac,
**kwargs)
load_contrib("mpls")
self.mpls = MPLS(label=label, ttl=mpls_ttl)
self.inner_pkt = inner_pkt
def get_packet(self):
if self.inner_pkt:
pkt = self.eth / self.ip / self.udp / self.mpls / self.inner_pkt
else:
pkt = self.eth / self.ip / self.udp / self.mpls
return pkt
class MplsoUdpoVlanPacket(UdpVlanPacket):
"""
MplsoUdpPacket class for creating mpls over udp packet
Mandatory Parameters:
--------------------
label : int
Mpls label
sip : str
Source IP address
dip : str
Destination IP address
smac : str
Source MAC address
dmac : str
Destination MAC address
sport :
Source port address
dport:
Destination port address
Optional Parameters:
-------------------
inner_pkt : any other packet type
Inner packet
mpls_ttl : int
mpls ttl value
"""
def __init__(self, label, sip, dip, smac, dmac, sport, dport,
inner_pkt=None, mpls_ttl=64, **kwargs):
super(MplsoUdpoVlanPacket, self).__init__(
sip,
dip,
sport,
dport,
smac,
dmac,
**kwargs)
load_contrib("mpls")
self.mpls = MPLS(label=label, ttl=mpls_ttl)
self.inner_pkt = inner_pkt
def get_packet(self):
if self.inner_pkt:
pkt = self.eth / self.vlan / self.ip / self.udp / self.mpls \
/ self.inner_pkt
else:
pkt = self.eth / self.vlan / self.ip / self.udp / self.mpls
return pkt
class MplsoGrePacket(GrePacket):
"""
MplsoGrePacket class for creating mpls over gre packet
Mandatory Parameters:
--------------------
label : int
Mpls label
sip : str
Source IP address
dip : str
Destination IP address
smac : str
Source MAC address
dmac : str
Destination MAC address
Optional Parameters:
-------------------
inner_pkt : any other packet type
Inner packet
mpls_ttl : int
mpls ttl value
"""
def __init__(
self,
label,
sip,
dip,
smac,
dmac,
inner_pkt=None,
mpls_ttl=64,
**kwargs):
super(MplsoGrePacket, self).__init__(
sip=sip,
dip=sip,
smac=smac,
dmac=dmac,
**kwargs)
load_contrib("mpls")
self.mpls = MPLS(label=label, ttl=mpls_ttl)
self.inner_pkt = inner_pkt
def get_packet(self):
if self.inner_pkt:
pkt = self.eth / self.ip / self.gre / self.mpls / self.inner_pkt
else:
pkt = self.eth / self.ip / self.gre / self.mpls
return pkt
class MplsoGreoVlanPacket(GreVlanPacket):
"""
MplsoGrePacket class for creating mpls over gre packet
Mandatory Parameters:
--------------------
label : int
Mpls label
sip : str
Source IP address
dip : str
Destination IP address
smac : str
Source MAC address
dmac : str
Destination MAC address
Optional Parameters:
-------------------
inner_pkt : any other packet type
Inner packet
mpls_ttl : int
mpls ttl value
"""
def __init__(
self,
label,
sip,
dip,
smac,
dmac,
inner_pkt=None,
mpls_ttl=64,
**kwargs):
super(MplsoGreoVlanPacket, self).__init__(
sip=sip,
dip=sip,
smac=smac,
dmac=dmac,
**kwargs)
load_contrib("mpls")
self.mpls = MPLS(label=label, ttl=mpls_ttl)
self.inner_pkt = inner_pkt
def get_packet(self):
if self.inner_pkt:
pkt = self.eth / self.vlan / self.ip / self.gre / self.mpls \
/ self.inner_pkt
else:
pkt = self.eth / self.vlan / self.ip / self.gre / self.mpls
return pkt
class VxlanPacket(UdpPacket):
"""
VxlanPacket class for creating mpls over Vxlan packet
Mandatory Parameters:
--------------------
vnid : int
Vxlan network identifier
sip : str
Source IP address
dip : str
Destination IP address
smac : str
Source MAC address
dmac : str
Destination MAC address
sport : int
Source port address
dport: int
Destination port address
Optional Parameters:
-------------------
inner_pkt : any other packet type
Inner packet
flags : int
VXLAN flags
reserved1 : int
VXLAN reserved1
nxt_protocol :int
VXLAN nxt_protocol
"""
def __init__(
self,
vnid,
sip,
dip,
smac,
dmac,
sport,
dport,
flags=0x08,
reserved1=0x00,
nxt_protocol=0,
inner_pkt=None,
**kwargs):
super(
VxlanPacket,
self).__init__(
sip,
dip,
sport,
dport,
smac,
dmac,
**kwargs)
self.vxlan = VXLAN(
vni=vnid,
flags=flags,
reserved1=reserved1,
NextProtocol=nxt_protocol,
**kwargs)
self.inner_pkt = inner_pkt
def get_packet(self):
pkt = self.eth / self.ip / self.udp / self.vxlan / self.inner_pkt
return pkt
class Udpv6Packet(Ipv6Packet):
"""
Udpv6Packet class for creating Udp packet with Ipv6 packet
Mandatory Parameters:
--------------------
sport :
Source port address
dport:
Destination port address
Optional Parameters:
-------------------
sipv6 : str
Source IP address
dipv6 : str:
Destination IP address
smac : str
Source mac address
dmac : str
Destination mac address
nh : int
Next header
"""
def __init__(
self,
sport,
dport,
sipv6=None,
dipv6=None,
smac=None,
dmac=None,
nh=0,
**kwargs):
super(
Udpv6Packet,
self).__init__(
sipv6,
dipv6,
smac,
dmac,
nh,
**kwargs)
self.udp = UDP(sport=sport, dport=dport)
def get_packet(self):
if self.eth:
return self.eth / self.ipv6 / self.udp
else:
return self.ipv6 / self.udp
```
#### File: utils/pylib/vif.py
```python
import os
import sys
import constants
from object_base import *
from vr_py_sandesh.vr_py.ttypes import *
from scapy.all import *
class Vif(ObjectBase, vr_interface_req):
"""Base class to create virtual interfaces"""
# Index to allocate in case of auto index allocation
_auto_alloc_idx = 0
def __init__(
self,
idx,
name,
ipv4_str,
mac_str,
ipv6_str,
h_op=constants.SANDESH_OPER_ADD,
**kwargs):
super(Vif, self).__init__()
vr_interface_req.__init__(self)
if ObjectBase.auto_vif_idx_alloc:
Vif._auto_alloc_idx += 1
self.vifr_idx = Vif._auto_alloc_idx
else:
self.vifr_idx = idx
self.h_op = h_op
self.vifr_name = name
if ipv4_str:
self.vifr_ip = self.vt_ipv4(ipv4_str)
if mac_str:
self.vifr_mac = self.vt_mac(mac_str)
self.vifr_transport = constants.VIF_TRANSPORT_PMD
if ipv6_str is not None:
self.vifr_ip6_u, self.vifr_ip6_l = self.vt_ipv6(ipv6_str)
self.sreq_class = vr_interface_req.__name__
def __repr__(self):
"""Display basic details of the Vif"""
return "Vif(idx:{}, name:{})".format(self.vifr_idx, self.vifr_name)
def __str__(self):
"""Display basic details of the Vif"""
return "Vif(idx:{}, name:{})".format(self.vifr_idx, self.vifr_name)
def send_packet(self, tx_pkt_list):
"""Sends a packet in the vif"""
req_file = self.create_pcap_req(tx_pkt_list, self.vifr_name,
None, None)
# run the vtest cmd
self.run_vtest_command(True, req_file)
def send_and_receive_packet(self, tx_pkt_list, receive_vif, rx_pkt_list):
"""Sends a packet and receive the reply packet"""
req_file = self.create_pcap_req(tx_pkt_list, self.vifr_name,
rx_pkt_list, receive_vif.vifr_name)
output_pcap = self.get_output_pcap_file(req_file)
scapy_cap = None
if output_pcap:
scapy_cap = scapy.all.rdpcap(output_pcap)
if scapy_cap:
return scapy_cap[0]
else:
return None
def idx(self):
"""Returns vif index"""
return self.vifr_idx
def get(self, key):
"""
Queries vrouter and return the key value from the response xml file
"""
self.h_op = constants.SANDESH_OPER_GET
self.vifr_flags = 0
return super(Vif, self).get(key)
def get_vif_name(self):
"""
Queries vrouter and returns vifr_name value from the response xml file
"""
return self.get('vifr_name').strip('\n')
def get_vif_idx(self):
"""
Queries vrouter and returns vifr_idx value from the response xml file
"""
return int(self.get('vifr_idx'))
def get_vif_ip(self):
"""
Queries vrouter and returns vifr_ip value from the response xml file
"""
return int(self.get('vifr_ip'))
def get_vif_ipackets(self):
"""
Queries vrouter and returns vif_ipackets value from the response xml \
file
"""
return int(self.get('vifr_ipackets'))
def get_vif_opackets(self):
"""
Queries vrouter and returns vif_opackets value from the response xml \
file
"""
return int(self.get('vifr_opackets'))
def get_vif_nh_id(self):
"""
Queries vrouter and returns vif_opackets value from the response xml \
file
"""
return int(self.get('vifr_nh_id'))
class VirtualVif(Vif):
"""
VirtualVif class to create virtual vif
Mandatory Parameters:
--------------------
name : str
Interface name
ipv4_str : str
IPv4 address
mac_str: str
MAC address
idx(if auto_alloc is not set) : int
Interface index
Optional Parameters:
-------------------
ipv6_str : str
IPv6 address
nh_idx : str
Nexthop index
vrf : int
Vrf id
mcast_vrf : int
Multicast vrf id
mtu : int
MTU size
flags : int
Vif flags
h_op : int
Sandesh operation
"""
def __init__(self,
name,
ipv4_str,
mac_str,
idx=0,
ipv6_str=None,
nh_idx=None,
vrf=0,
mcast_vrf=None,
mtu=1514,
flags=(constants.VIF_FLAG_POLICY_ENABLED |
constants.VIF_FLAG_DHCP_ENABLED),
h_op=constants.SANDESH_OPER_ADD,
**kwargs):
super(VirtualVif, self).__init__(idx, name, ipv4_str, mac_str,
ipv6_str, h_op, **kwargs)
self.vifr_type = constants.VIF_TYPE_VIRTUAL
self.vifr_nh_id = nh_idx
self.vifr_transport = constants.VIF_TRANSPORT_PMD
self.vifr_vrf = vrf
self.vifr_mcast_vrf = mcast_vrf
self.vifr_mtu = mtu
self.vifr_flags = flags
class AgentVif(Vif):
"""
AgentVif Class to create agent vif
Mandatory Parameters:
--------------------
idx(if auto_alloc is not set) : int
Interface index
Optional Parameters:
-------------------
vrf : int
Vrf id
mcast_vrf : int
Multicast vrf id
mtu : int
MTU size
flags : int
Vif flags
"""
def __init__(self, idx=0, vrf=65535, mcast_vrf=65535, mtu=1514,
flags=constants.VIF_FLAG_L3_ENABLED, **kwargs):
name = 'unix'
super(AgentVif, self).__init__(idx, name, None, None, None,
**kwargs)
self.vifr_name = name
self.vifr_type = constants.VIF_TYPE_AGENT
self.vifr_transport = constants.VIF_TRANSPORT_SOCKET
self.vifr_vrf = vrf
self.vifr_mcast_vrf = mcast_vrf
self.vifr_mtu = mtu
self.vifr_mac = self.vt_mac("00:00:5e:00:01:00")
self.vifr_flags = flags
class VhostVif(Vif):
"""
VhostVif class to create vhost vif
Mandatory Parameters:
--------------------
ipv4_str : str
IPv4 address
mac_str: str
MAC address
idx(if auto_alloc is not set) : int
Interface index
Optional Parameters:
-------------------
ipv6_str : str
IPv6 address
nh_idx : str
Nexthop index
vrf : int
Vrf id
mcast_vrf : int
Multicast vrf id
mtu : int
MTU size
flags : int
Vif flags
"""
def __init__(
self,
ipv4_str,
mac_str,
ipv6_str=None,
idx=0,
nh_idx=None,
vrf=0,
mcast_vrf=65535,
mtu=1514,
flags=(constants.VIF_FLAG_L3_ENABLED |
constants.VIF_FLAG_DHCP_ENABLED),
**kwargs):
name = 'vhost0'
super(VhostVif, self).__init__(idx, name, ipv4_str, mac_str, ipv6_str,
**kwargs)
self.vifr_type = constants.VIF_TYPE_HOST
self.vifr_nh_id = nh_idx
self.vifr_transport = constants.VIF_TRANSPORT_PMD
self.vifr_vrf = vrf
self.vifr_mcast_vrf = mcast_vrf
self.vifr_mtu = mtu
self.vifr_flags = flags
class FabricVif(Vif):
"""
FabricVif class to create fabric vif
Mandatory Parameters:
--------------------
name : str
Interface name
mac_str: str
MAC address
idx(if auto_alloc is not set) : int
Interface index
Optional Parameters:
-------------------
ipv4_str : str
IPv4 address
ipv6_str : str
IPv6 address
vrf : int
Vrf id
mcast_vrf : int
Multicast vrf id
mtu : int
MTU size
flags : int
Vif flags
"""
def __init__(
self,
name,
mac_str,
ipv4_str=None,
ipv6_str=None,
idx=0,
vrf=0,
mcast_vrf=65535,
mtu=1514,
flags=constants.VIF_FLAG_VHOST_PHYS,
**kwargs):
super(FabricVif, self).__init__(idx, name, ipv4_str, mac_str, ipv6_str,
**kwargs)
self.vifr_type = constants.VIF_TYPE_PHYSICAL
self.vifr_flags = flags
self.vifr_transport = constants.VIF_TRANSPORT_PMD
self.vifr_vrf = vrf
self.vifr_mcast_vrf = mcast_vrf
self.vifr_mtu = mtu
``` |
{
"source": "jianzuoyi/rna-seq-pipeline",
"score": 2
} |
#### File: rna-seq-pipeline/src/mad_qc.py
```python
__author__ = '<NAME>'
__version__ = '0.1.0'
__license__ = 'MIT'
import argparse
import subprocess
import shlex
import os
import json
MADQC_CMD = 'Rscript {path_to_madR} {quants_1} {quants_2}'
def remove_quantfile_extensions(quant_fn):
first_extension_start_index = quant_fn.find('.')
if first_extension_start_index == -1:
return quant_fn
else:
return quant_fn[:first_extension_start_index]
def main(args):
run_cmd = MADQC_CMD.format(
path_to_madR=args.MAD_R_path,
quants_1=args.quants1,
quants_2=args.quants2)
quant_basename1 = remove_quantfile_extensions(
os.path.basename(args.quants1))
quant_basename2 = remove_quantfile_extensions(
os.path.basename(args.quants2))
plot_output_filename = '{basename_1}-{basename_2}_mad_plot.png'.format(
basename_1=quant_basename1, basename_2=quant_basename2)
# capture the output string from the run
mad_output = subprocess.check_output(shlex.split(run_cmd))
os.rename('MAplot.png', plot_output_filename)
qc_metrics = dict()
qc_metrics['MAD.R'] = json.loads(mad_output.decode())
qc_output_fn = '{basename_1}-{basename_2}_mad_qc_metrics.json'.format(
basename_1=quant_basename1, basename_2=quant_basename2)
with open(qc_output_fn, 'w') as f:
json.dump(qc_metrics, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--quants1', type=str, help='first quantification file from RSEM')
parser.add_argument(
'--quants2', type=str, help='second quantification file from RSEM')
parser.add_argument('--MAD_R_path', type=str, help='path to MAD.R')
args = parser.parse_args()
main(args)
```
#### File: rna-seq-pipeline/src/rsem_quant.py
```python
__author__ = '<NAME>'
__version__ = '0.1.0'
__license__ = 'MIT'
import argparse
from align import make_modified_TarInfo
import subprocess
import shlex
import tarfile
import re
import os
RSEM_COMMAND = '''rsem-calculate-expression --bam \
--estimate-rspd \
--calc-ci \
--seed {rnd_seed} \
-p {ncpus} \
--no-bam-output \
--ci-memory {ramGB}000 \
--forward-prob {fwd_prob} \
{paired_end} \
{anno_bam} \
rsem_index/rsem \
{bam_root}_rsem'''
def strand_to_fwd_prob(strand):
if strand == 'forward':
return 1
if strand == 'reverse':
return 0
if strand == 'unstranded':
return 0.5
raise ValueError('Strand must be forward, reverse or unstranded')
def format_endedness(endedness):
if endedness == 'paired':
return '--paired-end'
else:
return ''
def main(args):
remove_bam_from_end_re = re.compile('\.bam$')
bam_root = remove_bam_from_end_re.sub('', os.path.basename(args.anno_bam))
with tarfile.open(args.rsem_index, 'r:gz') as archive:
archive.extractall(
'.', members=make_modified_TarInfo(archive, 'rsem_index'))
rsem_call = shlex.split(
RSEM_COMMAND.format(
rnd_seed=args.rnd_seed,
ncpus=args.ncpus,
ramGB=args.ramGB,
fwd_prob=strand_to_fwd_prob(args.read_strand),
paired_end=format_endedness(args.endedness),
anno_bam=args.anno_bam,
bam_root=bam_root))
subprocess.call(rsem_call)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--rsem_index', type=str, help='RSEM index gzipped tar')
parser.add_argument(
'--anno_bam', type=str, help='STAR alignment to annotation.')
parser.add_argument('--endedness', type=str, choices=['paired', 'single'])
parser.add_argument(
'--read_strand',
type=str,
choices=['forward', 'reverse', 'unstranded'])
parser.add_argument(
'--rnd_seed', type=int, help='random seed', default=12345)
parser.add_argument('--ncpus', type=int, help='number of cpus available')
parser.add_argument('--ramGB', type=int, help='memory available in GB')
args = parser.parse_args()
main(args)
``` |
{
"source": "jiaoc1/aim",
"score": 2
} |
#### File: aim/backend/server.py
```python
import logging
import os
import time
from pathlib import Path
from typing import Any, Dict, Tuple
# Third-party modules
import motor
import tornado.ioloop
import tornado.log
import tornado.options
import tornado.web
import tornado.websocket
from dotenv import load_dotenv
from loguru import logger
from motor.motor_tornado import MotorClient, MotorDatabase
from tornado.log import LogFormatter
from tornado.options import define, options
# First-party modules
from aim.common import configmanager, utils
from aim.handlers import AIMWebSocketHandler
# ----------------------------------------------------------------------------
# Metadata
# ----------------------------------------------------------------------------
__author__ = "<NAME>"
__date__ = "2021-07-11"
__email__ = "<EMAIL>"
__version__ = "1.0"
# ----------------------------------------------------------------------------
# Definitions
# ----------------------------------------------------------------------------
define(
"environment", default="development", help="Runtime environment", type=str
)
define("name", default="aim-dev", help="Instance name", type=str)
define("port", default=8888, help="Port to listen on", type=int)
define(
"data_inputs_dir",
default=None,
help="Directory to store input files",
type=Path,
)
define(
"data_results_dir",
default=None,
help="Directory to store result files",
type=Path,
)
define("database_uri", default=None, help="Database URI", type=str)
# In addition, Tornado provides built-in support for the "logging" (level) option
# ----------------------------------------------------------------------------
# Take environment variables from .env
# ----------------------------------------------------------------------------
load_dotenv()
# ----------------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------------
def parse_environ_options() -> None:
port = os.environ.get("PORT")
data_inputs_dir = os.environ.get("DATA_INPUTS_DIR")
data_results_dir = os.environ.get("DATA_RESULTS_DIR")
if os.environ.get("ENVIRONMENT"):
options["environment"] = os.environ.get("ENVIRONMENT")
if os.environ.get("NAME"):
options["name"] = os.environ.get("NAME")
if port:
options["port"] = int(port)
if data_inputs_dir:
options["data_inputs_dir"] = Path(data_inputs_dir)
if data_results_dir:
options["data_results_dir"] = Path(data_results_dir)
DB_USER = os.environ.get("DB_USER")
DB_PASS = os.environ.get("DB_PASS")
DB_HOST = os.environ.get("DB_HOST")
DB_PORT = os.environ.get("DB_PORT")
DB_NAME = os.environ.get("DB_NAME")
if DB_USER and DB_PASS and DB_HOST and DB_PORT and DB_NAME:
options[
"database_uri"
] = f"mongodb://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{DB_NAME}?authSource=admin"
def make_app() -> Tuple[MotorDatabase, tornado.web.Application]:
client: MotorClient = motor.motor_tornado.MotorClient(options.database_uri)
db: MotorDatabase = client.get_database()
settings: Dict[str, Any] = {
"db": db,
"debug": True if options.environment == "development" else False,
"websocket_max_message_size": 5242880, # 5 MB
}
return (
db,
tornado.web.Application(
handlers=[
(r"/", AIMWebSocketHandler),
],
**settings,
),
)
def set_tornado_logging() -> None:
for handler in logging.getLogger().handlers:
formatter: LogFormatter = LogFormatter(
fmt="%(color)s%(asctime)s.%(msecs)03dZ | %(levelname)s | %(module)s:%(funcName)s:%(lineno)d | %(end_color)s%(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
color=True,
)
setattr(formatter, "converter", time.gmtime)
handler.setFormatter(formatter)
def main() -> None:
configmanager.options = configmanager.parser.parse_known_args()[
0
] # Get known options, i.e., Namespace from the tuple
# Parse options
tornado.options.parse_command_line()
# Configure logger
configmanager.database_sink = lambda msg: db["errors"].insert_one(
{"error": msg}
)
utils.configure_logger()
# Tornado root formatter settings
set_tornado_logging()
# Use environment variables to override options
parse_environ_options()
# Make application
db, app = make_app()
app.listen(options.port)
logger.info(
"Server '{}' in {} environment is listening on http://localhost:{}".format(
options.name, options.environment, options.port
)
)
# Start application
tornado.ioloop.IOLoop.current().start()
# ----------------------------------------------------------------------------
# Application
# ----------------------------------------------------------------------------
if __name__ == "__main__":
main()
```
#### File: tests/metrics/test_m6.py
```python
import pathlib
from typing import Any, List, Optional, Union
# Third-party modules
import pytest
# First-party modules
from aim.common import image_utils
from aim.metrics.m6.m6_contour_congestion import Metric
from tests.common.constants import DATA_TESTS_DIR
# ----------------------------------------------------------------------------
# Metadata
# ----------------------------------------------------------------------------
__author__ = "<NAME>"
__date__ = "2021-03-19"
__email__ = "<EMAIL>"
__version__ = "1.0"
# ----------------------------------------------------------------------------
# Tests
# ----------------------------------------------------------------------------
@pytest.mark.parametrize(
["input_value", "expected_result"],
[
("aalto.fi_website.png", [0.5822306238185255]),
("transparent.png", [0.0]), # transparent -> white pixels
("white.png", [0.0]),
("black.png", [0.0]),
("gray.png", [0.0]),
("red.png", [0.0]),
("green.png", [0.0]),
("blue.png", [0.0]),
(
"white_50_transparent_50.png",
[0.0],
), # transparent -> white pixels
(
"black_50_transparent_50.png",
[0.0],
), # transparent -> white pixels
("white_50_black_50.png", [0.0]),
("red_50_green_50.png", [0.0]),
("green_50_blue_50.png", [0.0]),
("blue_50_red_50.png", [0.0]),
("4_high-contrast_shades_of_gray.png", [0.0]),
("4_low-contrast_shades_of_gray.png", [0.0]),
("10_black_stripes_20px_margin.png", [0.0]),
("3_black_stripes_19px_margin.png", [0.0]),
("2_narrow_black_stripes_19px_margin.png", [0.5]),
],
)
def test_contour_congestion_desktop(
input_value: str, expected_result: List[Any]
) -> None:
"""
Test contour congestion (desktop GUIs).
Args:
input_value: GUI image file name
expected_result: Expected result (list of measures)
"""
# Build GUI image file path
gui_image_filepath: pathlib.Path = (
pathlib.Path(DATA_TESTS_DIR) / input_value
)
# Read GUI image (PNG)
gui_image_png_base64: str = image_utils.read_image(gui_image_filepath)
# Execute metric
result: Optional[List[Union[int, float, str]]] = Metric.execute_metric(
gui_image_png_base64
)
# Test result
if result is not None:
assert result[0] == expected_result[0]
``` |
{
"source": "JiaoHuang1/reliability_analysis",
"score": 3
} |
#### File: src/main/Main.py
```python
from __future__ import division
import os
import sys
from main.Util import Util
from main.ObservedDisagreement import ObservedDisagreement
from main.ExpectedDisagreement import ExpectedDisagreement
base_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "data")
def main():
if len(sys.argv) != 2:
print "python Main %s"
sys.exit(-1)
data = Util.read_data(os.path.join(base_path, sys.argv[1]))
for c, d in enumerate(data):
od = ObservedDisagreement(d)
ock = od.create_ock()
od_value = od.get_disagreement(ock)
ed = ExpectedDisagreement(od, ock)
ed_value = ed.get()
alpha = 1 - od_value / ed_value
print "data[{0}] = {1}".format(c, d)
print "alpha = {0}".format(alpha)
if __name__ == '__main__':
main()
```
#### File: src/test/ExpectedDisagreementTest.py
```python
import unittest
import os
from main.Util import Util
from main.ObservedDisagreement import ObservedDisagreement
from main.ExpectedDisagreement import ExpectedDisagreement
class ExpectedDisagreementTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
data = Util.read_data(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "data", "music.dat"))[0]
od = ObservedDisagreement(data)
ock = od.create_ock()
cls.ed = ExpectedDisagreement(od, ock)
def test_num_p(self):
print
print "{0}testing probability for the number of values contained in multi-value item{0}".format("*" * 6)
p = self.ed.get_num_p()
print p
self.assertAlmostEqual(p[0], 0.125, 3, "p[0] = 0.125")
self.assertAlmostEqual(p[1], 0.5, 1, "p[1] = 0.5")
self.assertAlmostEqual(p[2], 0.25, 2, "p[2] = 0.25")
self.assertAlmostEqual(p[3], 0.125, 3, "p[3] = 0.125")
print "{0}tested probability for the number of values contained in multi-value item{0}".format("*" * 6)
print
def test_single_item_n(self):
print
print "{0}testing frequency of single items{0}".format("*" * 6)
n = self.ed.get_single_item_n()
print n
self.assertDictEqual(n, {(): 1, 'Jazz': 3, 'Funk': 4, 'Rock': 4}, "n = {{}:1, Jazz:2, Funk:4, Rock:4}")
print "{0}tested frequency of single items{0}".format("*" * 6)
print
def test_eck(self):
print
print "{0}testing expected frequency matrix{0}".format("*" * 6)
n = self.ed.get_single_item_n()
eck = self.ed.create_eck(n)
print "\t\t".join(["header"] + ["(%s)" % (",".join(x)) for x in self.ed.data_combinations])
for k in self.ed.data_combinations:
print "\t\t".join(["(%s)" % (",".join(k))] + [str(eck[k][x]) for x in self.ed.data_combinations])
self.assertDictEqual(eck[()], {(): 0, ('Funk',):4,
("Rock",):4, ('Funk', 'Rock'):16,
('Jazz',):3, ('Funk', 'Jazz'):12,
('Jazz', 'Rock'):12, ('Funk', 'Jazz', 'Rock'):48}, "eck(()) should be right")
self.assertDictEqual(eck[('Funk', 'Rock')], {('Funk',):48, ('Rock',):48,
('Funk', 'Rock'):144, ('Jazz',):48,
('Funk', 'Jazz'):144, ('Jazz', 'Rock'):144,
('Funk', 'Jazz', 'Rock'):432, (): 16}, "eck({Rock, Funk}) should be right")
self.assertEquals(eck[('Rock',)][('Funk',)], eck[('Rock',)][('Funk',)], "eck should be symmetric")
self.assertEquals(eck[('Funk', 'Jazz')][('Funk', 'Jazz', 'Rock')], eck[('Funk', 'Jazz', 'Rock')][('Funk', 'Jazz')], "eck should still be symmetric")
print "{0}testing number of ways of pairing two labels{0}".format("*" * 6)
denominator_matrix = self.ed.get_ways_of_pair(eck)
denominator_keys = sorted(denominator_matrix.keys())
print "\t\t".join(['header'] + [str(n) for n in denominator_keys])
for d_key in denominator_keys:
print "\t\t".join([str(d_key)] + [str(denominator_matrix[d_key][x]) for x in denominator_matrix[d_key]])
print "{0}tested number of ways of pairing two labels{0}".format("*" * 6)
print "{0}tested expected frequency matrix{0}".format("*" * 6)
print
def test_ed(self):
p = self.ed.get_num_p()
n = self.ed.get_single_item_n()
eck = self.ed.create_eck(n)
d = self.ed.get_ways_of_pair(eck)
print self.ed.get_single_case_ed(('Funk', 'Jazz'), ('Funk', 'Jazz', 'Rock'), p, n, d)
print self.ed.get_disagreement(p, n, d)
``` |
{
"source": "jiaojiao1234/RISE",
"score": 2
} |
#### File: M_Touch/data/feature_ex.py
```python
import math
from copy import deepcopy
import numpy as np
from sklearn import svm
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, cross_val_score, StratifiedKFold
#from sklearn.externals import joblib
import joblib
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
def touch(path,nameall):
label=[]
label_num=0;
name_num=0
feature_flag=0
colornum=[]
for name_num in nameall:
num_name = 0;
name_u = name_num
flag_2 =0
n1 = []
x1 = []
y1 = []
s1 = []
t1 = []
n2 = []
x2 = []
y2 = []
s2 = []
t2 = []
n3 = []
x3 = []
y3 = []
s3 = []
t3 = []
x_zb1 = []
y_zb1 = []
x_zb2 = []
y_zb2 = []
x_zb3 = []
y_zb3 = []
x_zb4 = []
y_zb4 = []
# sign=0 #标记处理文件所处的位置
# end=len(name_u)
for num_name in range(len(name_u)):
filename = name_u[num_name]
n4 = []
x4 = []
y4 = []
s4 = []
t4 = []
with open(path+filename, 'r', encoding='UTF-8') as file_to_read:
while True:
lines = file_to_read.readline() # 整行读取数据
if not lines:
break
pass
n_tmp, x_tmp, y_tmp, s_tmp, t_tmp = [float(i) for i in
lines.split()] # 将整行数据分割处理,如果分割符是空格,括号里就不用传入参数,如果是逗号, 则传入‘,'字符。
n4.append(n_tmp) # 添加新读取的数据
x4.append(x_tmp)
y4.append(y_tmp)
s4.append(s_tmp)
t4.append(t_tmp)
pass
n4 = np.array(n4) # 将数据从list类型转换为array类型。
x4 = np.array(x4)
y4 = np.array(y4)
s4 = np.array(s4)
t4 = np.array(t4)
pass
if n1==[]:
n1 = n4
x1 = x4
y1 = y4
s1 = s4
t1 = t4
elif n1!=[] and n2==[]:
n2 = n4
x2 = x4
y2 = y4
s2 = s4
t2 = t4
elif n2!=[] and n3==[]:
n3 = n4
x3 = x4
y3 = y4
s3 = s4
t3 = t4
#处理数据
i = 0
j = 0
flag = 0;
A = []
B = []
C = []
D = []
E = []
color_num=0;
for inum in range(len(n1)):
if n1[inum] == 0:
color_num=color_num+1
colornum.append(color_num)
for i in range(len(n1)):
if flag == 0:
if n1[i] == 0:
if i == 0:
xzb1=x1[i] # x1
yzb1=y1[i] # y1
xzb2=x2[i] # x2
yzb2=y2[i] # y2
xzb3=x3[i] # x3
yzb3=y3[i] # y3
xzb4=x4[i] # x3
yzb4=y4[i] # y3
continue;
j = i
flag = 1
A = x1[0:j]
A = np.vstack((A, y1[0:j]))
A = np.vstack((A, t1[0:j]))
A = np.vstack((A, s1[0:j]))
B = x2[0:j]
B = np.vstack((B, y2[0:j]))
B = np.vstack((B, t2[0:j]))
B = np.vstack((B, s2[0:j]))
C = x3[0:j]
C = np.vstack((C, y3[0:j]))
C = np.vstack((C, t3[0:j]))
C = np.vstack((C, s3[0:j]))
D = x4[0:j]
D = np.vstack((D, y4[0:j]))
D = np.vstack((D, t4[0:j]))
D = np.vstack((D, s4[0:j]))
#特征计算
length1 = 0
for x_num in range(len(A[0, :])):
if x_num == (len(A[0, :]) - 1):
break;
length1 = length1 + math.sqrt(
(A[0, x_num + 1] - A[0, x_num]) * (A[0, x_num + 1] - A[0, x_num]) + (
A[1, x_num + 1] - A[1, x_num]) * (A[1, x_num + 1] - A[1, x_num])) # 距离
weiyi1 = math.sqrt((A[0, j - 1] - A[0, 0]) * (A[0, j - 1] - A[0, 0]) + (A[1, j - 1] - A[1, 0]) * (
A[1, j - 1] - A[1, 0])) # 位移
juli_weiyi1 = length1 / weiyi1 # 距离/位移
time1 = A[3, j - 1] # 持续时间
speed1 = length1 / time1 # 平均速度
mean1 = np.mean(A[2, :]) # size平均值
biaozhuncha1 = np.std(A[2, :], ddof=1) # size标准差
length2 = 0
for x_num2 in range(len(B[0, :])):
if x_num2 == (len(B[0, :]) - 1):
break;
length2 = length2 + math.sqrt(
(B[0, x_num2 + 1] - B[0, x_num2]) * (B[0, x_num2 + 1] - B[0, x_num2]) + (
B[1, x_num2 + 1] - B[1, x_num2]) * (B[1, x_num2 + 1] - B[1, x_num2])) # 距离
weiyi2 = math.sqrt((B[0, j - 1] - B[0, 0]) * (B[0, j - 1] - B[0, 0]) + (B[1, j - 1] - B[1, 0]) * (
B[1, j - 1] - B[1, 0])) # 位移
juli_weiyi2 = length2 / weiyi2 # 距离/位移
time2 = B[3, j - 1] # 持续时间
speed2 = length2 / time2 # 平均速度
mean2 = np.mean(B[2, :]) # size平均值
biaozhuncha2 = np.std(B[2, :], ddof=1) # size标准差
length3 = 0
for x_num3 in range(len(C[0, :])):
if x_num3 == (len(C[0, :]) - 1):
break;
length3 = length3 + math.sqrt(
(C[0, x_num3 + 1] - C[0, x_num3]) * (C[0, x_num3 + 1] - C[0, x_num3]) + (
C[1, x_num3 + 1] - C[1, x_num3]) * (C[1, x_num3 + 1] - C[1, x_num3])) # 距离
weiyi3 = math.sqrt((C[0, j - 1] - C[0, 0]) * (C[0, j - 1] - C[0, 0]) + (C[1, j - 1] - C[1, 0]) * (
C[1, j - 1] - C[1, 0])) # 位移
juli_weiyi3 = length3 / weiyi3 # 距离/位移
time3 = C[3, j - 1] # 持续时间
speed3 = length3 / time3 # 平均速度
mean3 = np.mean(C[2, :]) # size平均值
biaozhuncha3 = np.std(C[2, :], ddof=1) # size标准差
length4 = 0
for x_num4 in range(len(C[0, :])):
if x_num4 == (len(C[0, :]) - 1):
break;
length4 = length4 + math.sqrt(
(D[0, x_num4 + 1] - D[0, x_num4]) * (D[0, x_num4 + 1] - D[0, x_num4]) + (
D[1, x_num4 + 1] - D[1, x_num4]) * (D[1, x_num4 + 1] - D[1, x_num4])) # 距离
weiyi4 = math.sqrt((D[0, j - 1] - D[0, 0]) * (D[0, j - 1] - D[0, 0]) + (D[1, j - 1] - D[1, 0]) * (
D[1, j - 1] - D[1, 0])) # 位移
juli_weiyi4 = length4 / weiyi4 # 距离/位移
time4 = D[3, j - 1] # 持续时间
speed4 = length4 / time4 # 平均速度
mean4 = np.mean(C[2, :]) # size平均值
biaozhuncha4 = np.std(C[2, :], ddof=1) # size标准差
# PY
Pf = []
Pf.append(xzb1)
Pf.append(yzb1)
Pf.append(xzb2)
Pf.append(yzb2)
Pf.append(xzb3)
Pf.append(yzb3)
Pf.append(xzb4)
Pf.append(yzb4)
Spf=[]
Spf2=[]
Spf=sorted([yzb1, yzb2, yzb3, yzb4])
for snum in range(4):
Spf2.append(Pf[Pf.index(Spf[snum]) - 1])
Spf2.append(Spf[snum])
Pf=Spf2
#
d1 = Pf[2] - Pf[0]
d2 = Pf[4] - Pf[2]
d3 = Pf[6] - Pf[4]
d4 = Pf[3] - Pf[1]
d5 = Pf[5] - Pf[3]
d6 = Pf[7] - Pf[5]
d7 = math.sqrt((Pf[2] - Pf[0]) * (Pf[2] - Pf[0]) + (Pf[3] - Pf[1]) * (Pf[3] - Pf[1]))
d8 = math.sqrt((Pf[4] - Pf[2]) * (Pf[4] - Pf[2]) + (Pf[5] - Pf[3]) * (Pf[5] - Pf[3]))
d9 = math.sqrt((Pf[4] - Pf[0]) * (Pf[4] - Pf[0]) + (Pf[5] - Pf[1]) * (Pf[5] - Pf[1]))
d10 = math.sqrt((Pf[6] - Pf[0]) * (Pf[6] - Pf[0]) + (Pf[7] - Pf[1]) * (Pf[7] - Pf[1]))
d11 = math.sqrt((Pf[6] - Pf[2]) * (Pf[6] - Pf[2]) + (Pf[7] - Pf[3]) * (Pf[7] - Pf[3]))
d12 = math.sqrt((Pf[6] - Pf[4]) * (Pf[6] - Pf[4]) + (Pf[7] - Pf[5]) * (Pf[7] - Pf[5]))
#
feature = []
feature.append((length1+length2+length3+length4)/4)
feature.append((weiyi1+weiyi2+weiyi3+weiyi4)/4)
feature.append((juli_weiyi1+juli_weiyi2+juli_weiyi3+juli_weiyi4)/4)
feature.append((time1+time2+time3+time4)/4)
feature.append((speed1+speed2+speed3+speed4)/4)
feature.append((mean1+mean2+mean3+mean4)/4)
feature.append((biaozhuncha1+biaozhuncha2+biaozhuncha3+biaozhuncha4)/4)
feature.append(d1)
feature.append(d2)
feature.append(d3)
feature.append(d4)
feature.append(d5)
feature.append(d6)
feature.append(d7)
feature.append(d8)
feature.append(d9)
feature.append(d10)
feature.append(d11)
feature.append(d12)
E = np.array(feature);
elif flag == 1:
if n1[i] == 0:
xzb1 = x1[i] # x1
yzb1 = y1[i] # y1
xzb2 = x2[i] # x2
yzb2 = y2[i] # y2
xzb3 = x3[i] # x3
yzb3 = y3[i] # y3
xzb4 = x4[i] # x3
yzb4 = y4[i] # y3
k = j;
j = i
A = x1[k:j]
A = np.vstack((A, y1[k:j]))
A = np.vstack((A, t1[k:j]))
A = np.vstack((A, s1[k:j]))
B = x2[k:j]
B = np.vstack((B, y2[k:j]))
B = np.vstack((B, t2[k:j]))
B = np.vstack((B, s2[k:j]))
C = x3[k:j]
C = np.vstack((C, y3[k:j]))
C = np.vstack((C, t3[k:j]))
C = np.vstack((C, s3[k:j]))
D = x4[k:j]
D = np.vstack((D, y4[k:j]))
D = np.vstack((D, t4[k:j]))
D = np.vstack((D, s4[k:j]))
# 特征计算
length1 = 0
for x_num1 in range(len(A[0, :])):
if x_num1 == (len(A[0, :]) - 1):
break;
length1 = length1 + math.sqrt(
(A[0, x_num1 + 1] - A[0, x_num1]) * (A[0, x_num1 + 1] - A[0, x_num1]) + (
A[1, x_num1 + 1] - A[1, x_num1]) * (A[1, x_num1 + 1] - A[1, x_num1])) # 距离
weiyi1 = math.sqrt((A[0, len(A[0, :]) - 1] - A[0, 0]) * (A[0, len(A[0, :]) - 1] - A[0, 0]) + (
A[1, len(A[0, :]) - 1] - A[1, 0]) * (A[1, len(A[0, :]) - 1] - A[1, 0])) # 位移
juli_weiyi1 = length1 / weiyi1 # 距离/位移
time1 = A[3, len(A) - 1] # 持续时间
speed1 = length1 / time1 # 平均速度
mean1 = np.mean(A[2, :]) # size平均值
biaozhuncha1 = np.std(A[2, :], ddof=1) # size标准差
length2 = 0
for x_num2 in range(len(A[0, :])):
if x_num2 == (len(A[0, :]) - 1):
break;
length2 = length2 + math.sqrt(
(A[0, x_num2 + 1] - A[0, x_num2]) * (A[0, x_num2 + 1] - A[0, x_num2]) + (
A[1, x_num2 + 1] - A[1, x_num2]) * (A[1, x_num2 + 1] - A[1, x_num2])) # 距离
weiyi2 = math.sqrt((A[0, len(A[0, :]) - 1] - A[0, 0]) * (A[0, len(A[0, :]) - 1] - A[0, 0]) + (
A[1, len(A[0, :]) - 1] - A[1, 0]) * (A[1, len(A[0, :]) - 1] - A[1, 0])) # 位移
juli_weiyi2 = length2 / weiyi2 # 距离/位移
time2 = A[3, len(A) - 1] # 持续时间
speed2 = length2 / time2 # 平均速度
mean2 = np.mean(A[2, :]) # size平均值
biaozhuncha2 = np.std(A[2, :], ddof=1) # size标准差
length3 = 0
for x_num3 in range(len(A[0, :])):
if x_num3 == (len(A[0, :]) - 1):
break;
length3 = length3 + math.sqrt(
(A[0, x_num3 + 1] - A[0, x_num3]) * (A[0, x_num3 + 1] - A[0, x_num3]) + (
A[1, x_num3 + 1] - A[1, x_num3]) * (A[1, x_num3 + 1] - A[1, x_num3])) # 距离
weiyi3 = math.sqrt((A[0, len(A[0, :]) - 1] - A[0, 0]) * (A[0, len(A[0, :]) - 1] - A[0, 0]) + (
A[1, len(A[0, :]) - 1] - A[1, 0]) * (A[1, len(A[0, :]) - 1] - A[1, 0])) # 位移
juli_weiyi3 = length3 / weiyi3 # 距离/位移
time3 = A[3, len(A) - 1] # 持续时间
speed3 = length3 / time3 # 平均速度
mean = np.mean(A[2, :]) # size平均值
biaozhuncha3 = np.std(A[2, :], ddof=1) # size标准差
length4 = 0
for x_num4 in range(len(A[0, :])):
if x_num4 == (len(A[0, :]) - 1):
break;
length4 = length4 + math.sqrt(
(A[0, x_num4 + 1] - A[0, x_num4]) * (A[0, x_num4 + 1] - A[0, x_num4]) + (
A[1, x_num4 + 1] - A[1, x_num4]) * (A[1, x_num4 + 1] - A[1, x_num4])) # 距离
weiyi4 = math.sqrt((A[0, len(A[0, :]) - 1] - A[0, 0]) * (A[0, len(A[0, :]) - 1] - A[0, 0]) + (
A[1, len(A[0, :]) - 1] - A[1, 0]) * (A[1, len(A[0, :]) - 1] - A[1, 0])) # 位移
juli_weiyi4 = length4 / weiyi4 # 距离/位移
time4 = A[3, len(A) - 1] # 持续时间
speed4 = length4 / time4 # 平均速度
mean = np.mean(A[2, :]) # size平均值
biaozhuncha4 = np.std(A[2, :], ddof=1) # size标准差
# PY
Pf = []
Pf.append(xzb1)
Pf.append(yzb1)
Pf.append(xzb2)
Pf.append(yzb2)
Pf.append(xzb3)
Pf.append(yzb3)
Pf.append(xzb4)
Pf.append(yzb4)
Spf=[]
Spf2=[]
Spf=sorted([yzb1, yzb2, yzb3, yzb4])
for snum in range(4):
Spf2.append(Pf[Pf.index(Spf[snum]) - 1])
Spf2.append(Spf[snum])
Pf=Spf2
#
d1 = Pf[2] - Pf[0]
d2 = Pf[4] - Pf[2]
d3 = Pf[6] - Pf[4]
d4 = Pf[3] - Pf[1]
d5 = Pf[5] - Pf[3]
d6 = Pf[7] - Pf[5]
d7 = math.sqrt((Pf[2] - Pf[0]) * (Pf[2] - Pf[0]) + (Pf[3] - Pf[1]) * (Pf[3] - Pf[1]))
d8 = math.sqrt((Pf[4] - Pf[2]) * (Pf[4] - Pf[2]) + (Pf[5] - Pf[3]) * (Pf[5] - Pf[3]))
d9 = math.sqrt((Pf[4] - Pf[0]) * (Pf[4] - Pf[0]) + (Pf[5] - Pf[1]) * (Pf[5] - Pf[1]))
d10 = math.sqrt((Pf[6] - Pf[0]) * (Pf[6] - Pf[0]) + (Pf[7] - Pf[1]) * (Pf[7] - Pf[1]))
d11 = math.sqrt((Pf[6] - Pf[2]) * (Pf[6] - Pf[2]) + (Pf[7] - Pf[3]) * (Pf[7] - Pf[3]))
d12 = math.sqrt((Pf[6] - Pf[4]) * (Pf[6] - Pf[4]) + (Pf[7] - Pf[5]) * (Pf[7] - Pf[5]))
feature = []
feature.append((length1 + length2 + length3 + length4) / 4)
feature.append((weiyi1 + weiyi2 + weiyi3 + weiyi4) / 4)
feature.append((juli_weiyi1 + juli_weiyi2 + juli_weiyi3 + juli_weiyi4) / 4)
feature.append((time1 + time2 + time3 + time4) / 4)
feature.append((speed1 + speed2 + speed3 + speed4) / 4)
feature.append((mean1 + mean2 + mean3 + mean4) / 4)
feature.append((biaozhuncha1 + biaozhuncha2 + biaozhuncha3 + biaozhuncha4) / 4)
feature.append(d1)
feature.append(d2)
feature.append(d3)
feature.append(d4)
feature.append(d5)
feature.append(d6)
feature.append(d7)
feature.append(d8)
feature.append(d9)
feature.append(d10)
feature.append(d11)
feature.append(d12)
E = np.vstack((E, feature))
#
xzb1 = x1[i] # x1
yzb1 = y1[i] # y1
xzb2 = x2[i] # x2
yzb2 = y2[i] # y2
xzb3 = x3[i] # x3
yzb3 = y3[i] # y3
xzb4 = x4[i] # x3
yzb4 = y4[i] # y3
k = j;
j = i
A = x1[k:j]
A = np.vstack((A, y1[k:j]))
A = np.vstack((A, t1[k:j]))
A = np.vstack((A, s1[k:j]))
B = x2[k:j]
B = np.vstack((B, y2[k:j]))
B = np.vstack((B, t2[k:j]))
B = np.vstack((B, s2[k:j]))
C = x3[k:j]
C = np.vstack((C, y3[k:j]))
C = np.vstack((C, t3[k:j]))
C = np.vstack((C, s3[k:j]))
D = x4[k:j]
D = np.vstack((D, y4[k:j]))
D = np.vstack((D, t4[k:j]))
D = np.vstack((D, s4[k:j]))
# 特征计算
length1 = 0
for x_num1 in range(len(A[0, :])):
if x_num1 == (len(A[0, :]) - 1):
break;
length1 = length1 + math.sqrt(
(A[0, x_num1 + 1] - A[0, x_num1]) * (A[0, x_num1 + 1] - A[0, x_num1]) + (
A[1, x_num1 + 1] - A[1, x_num1]) * (A[1, x_num1 + 1] - A[1, x_num1])) # 距离
weiyi1 = math.sqrt((A[0, len(A[0, :]) - 1] - A[0, 0]) * (A[0, len(A[0, :]) - 1] - A[0, 0]) + (
A[1, len(A[0, :]) - 1] - A[1, 0]) * (A[1, len(A[0, :]) - 1] - A[1, 0])) # 位移
juli_weiyi1 = length1 / weiyi1 # 距离/位移
time1 = A[3, len(A) - 1] # 持续时间
speed1 = length1 / time1 # 平均速度
mean1 = np.mean(A[2, :]) # size平均值
biaozhuncha1 = np.std(A[2, :], ddof=1) # size标准差
length2 = 0
for x_num2 in range(len(A[0, :])):
if x_num2 == (len(A[0, :]) - 1):
break;
length2 = length2 + math.sqrt(
(A[0, x_num2 + 1] - A[0, x_num2]) * (A[0, x_num2 + 1] - A[0, x_num2]) + (
A[1, x_num2 + 1] - A[1, x_num2]) * (A[1, x_num2 + 1] - A[1, x_num2])) # 距离
weiyi2 = math.sqrt((A[0, len(A[0, :]) - 1] - A[0, 0]) * (A[0, len(A[0, :]) - 1] - A[0, 0]) + (
A[1, len(A[0, :]) - 1] - A[1, 0]) * (A[1, len(A[0, :]) - 1] - A[1, 0])) # 位移
juli_weiyi2 = length2 / weiyi2 # 距离/位移
time2 = A[3, len(A) - 1] # 持续时间
speed2 = length2 / time2 # 平均速度
mean2 = np.mean(A[2, :]) # size平均值
biaozhuncha2 = np.std(A[2, :], ddof=1) # size标准差
length3 = 0
for x_num3 in range(len(A[0, :])):
if x_num3 == (len(A[0, :]) - 1):
break;
length3 = length3 + math.sqrt(
(A[0, x_num3 + 1] - A[0, x_num3]) * (A[0, x_num3 + 1] - A[0, x_num3]) + (
A[1, x_num3 + 1] - A[1, x_num3]) * (A[1, x_num3 + 1] - A[1, x_num3])) # 距离
weiyi3 = math.sqrt((A[0, len(A[0, :]) - 1] - A[0, 0]) * (A[0, len(A[0, :]) - 1] - A[0, 0]) + (
A[1, len(A[0, :]) - 1] - A[1, 0]) * (A[1, len(A[0, :]) - 1] - A[1, 0])) # 位移
juli_weiyi3 = length3 / weiyi3 # 距离/位移
time3 = A[3, len(A) - 1] # 持续时间
speed3 = length3 / time3 # 平均速度
mean = np.mean(A[2, :]) # size平均值
biaozhuncha3 = np.std(A[2, :], ddof=1) # size标准差
length4 = 0
for x_num4 in range(len(A[0, :])):
if x_num4 == (len(A[0, :]) - 1):
break;
length4 = length4 + math.sqrt(
(A[0, x_num4 + 1] - A[0, x_num4]) * (A[0, x_num4 + 1] - A[0, x_num4]) + (
A[1, x_num4 + 1] - A[1, x_num4]) * (A[1, x_num4 + 1] - A[1, x_num4])) # 距离
weiyi4 = math.sqrt((A[0, len(A[0, :]) - 1] - A[0, 0]) * (A[0, len(A[0, :]) - 1] - A[0, 0]) + (
A[1, len(A[0, :]) - 1] - A[1, 0]) * (A[1, len(A[0, :]) - 1] - A[1, 0])) # 位移
juli_weiyi4 = length4 / weiyi4 # 距离/位移
time4 = A[3, len(A) - 1] # 持续时间
speed4 = length4 / time4 # 平均速度
mean = np.mean(A[2, :]) # size平均值
biaozhuncha4 = np.std(A[2, :], ddof=1) # size标准差
# PY
Pf = []
Pf.append(xzb1)
Pf.append(yzb1)
Pf.append(xzb2)
Pf.append(yzb2)
Pf.append(xzb3)
Pf.append(yzb3)
Pf.append(xzb4)
Pf.append(yzb4)
Spf = []
Spf2 = []
Spf = sorted([yzb1, yzb2, yzb3, yzb4])
for snum in range(4):
Spf2.append(Pf[Pf.index(Spf[snum]) - 1])
Spf2.append(Spf[snum])
Pf = Spf2
#
d1 = Pf[2] - Pf[0]
d2 = Pf[4] - Pf[2]
d3 = Pf[6] - Pf[4]
d4 = Pf[3] - Pf[1]
d5 = Pf[5] - Pf[3]
d6 = Pf[7] - Pf[5]
d7 = math.sqrt((Pf[2] - Pf[0]) * (Pf[2] - Pf[0]) + (Pf[3] - Pf[1]) * (Pf[3] - Pf[1]))
d8 = math.sqrt((Pf[4] - Pf[2]) * (Pf[4] - Pf[2]) + (Pf[5] - Pf[3]) * (Pf[5] - Pf[3]))
d9 = math.sqrt((Pf[4] - Pf[0]) * (Pf[4] - Pf[0]) + (Pf[5] - Pf[1]) * (Pf[5] - Pf[1]))
d10 = math.sqrt((Pf[6] - Pf[0]) * (Pf[6] - Pf[0]) + (Pf[7] - Pf[1]) * (Pf[7] - Pf[1]))
d11 = math.sqrt((Pf[6] - Pf[2]) * (Pf[6] - Pf[2]) + (Pf[7] - Pf[3]) * (Pf[7] - Pf[3]))
d12 = math.sqrt((Pf[6] - Pf[4]) * (Pf[6] - Pf[4]) + (Pf[7] - Pf[5]) * (Pf[7] - Pf[5]))
feature = []
feature.append((length1 + length2 + length3 + length4) / 4)
feature.append((weiyi1 + weiyi2 + weiyi3 + weiyi4) / 4)
feature.append((juli_weiyi1 + juli_weiyi2 + juli_weiyi3 + juli_weiyi4) / 4)
feature.append((time1 + time2 + time3 + time4) / 4)
feature.append((speed1 + speed2 + speed3 + speed4) / 4)
feature.append((mean1 + mean2 + mean3 + mean4) / 4)
feature.append((biaozhuncha1 + biaozhuncha2 + biaozhuncha3 + biaozhuncha4) / 4)
feature.append(d1)
feature.append(d2)
feature.append(d3)
feature.append(d4)
feature.append(d5)
feature.append(d6)
feature.append(d7)
feature.append(d8)
feature.append(d9)
feature.append(d10)
feature.append(d11)
feature.append(d12)
E = np.vstack((E, feature)) # 之前叫B
if flag_2 == 0:
Feature = E;
flag_2 = 1;
elif flag_2 == 1:
Feature = np.vstack((E, Feature)) # BF
if feature_flag == 0:
FeatureAll = Feature
feature_flag = 1
elif feature_flag == 1:
FeatureAll = np.vstack((FeatureAll, Feature))
#label
for labelnum in range(len(Feature[:, 0])):
label.append(label_num)
label_num = label_num + 1;
return FeatureAll,label
```
#### File: Src/Probability_vector/probability_vector.py
```python
def cal_proba_vector(sample, classification_model_fit):
"""Calculate the probability vector.
Usage:
:param: sample:A dataset that is a 2D numpy array
classification_model_fit: A classification model that has been fitted
:rtype:
"""
proba_vector = classification_model_fit.predict_proba(sample)
return proba_vector
"""
#Test Code
from sklearn import svm
import scipy.io as sio
original_sample = sio.loadmat('wig9_label_startend30p1_.mat')
data = original_sample['fe_wig']
label = original_sample['label'].flatten()
classification_model = svm.SVC(probability = True, break_ties=True, decision_function_shape='ovr', random_state=0)
classification_model_fit = classification_model.fit(data,label)
proba_vector = cal_proba_vector(data, classification_model_fit)
"""
```
#### File: Src/Statistical_vector/statistical_vector.py
```python
from nonconformist.base import ClassifierAdapter
from nonconformist.nc import ClassifierNc, MarginErrFunc
from nonconformist.icp import IcpClassifier
import numpy as np
def train_statistical_vector(data_train, label_train, data_cal, label_cal, classification_model, non_Func = MarginErrFunc(), significance=None):
"""Calculate the statistical vector of train set.
Usage:
:param: sample:A dataset that is a 2D numpy array
data_train, label_train:Training datasets and labels
data_cal, label_cal:Calibration datasets and labels
classification_model: A classification model
non_Func: A noncormity function
:rtype: train_statistical: The statistical vector of the training set
train_proba: The probability vector of the training set
"""
model_classi = ClassifierAdapter(classification_model)
nc_score = ClassifierNc(model_classi, non_Func)
icp_model = IcpClassifier(nc_score)
icp_model.fit(data_train, label_train)
icp_model.calibrate(data_cal,label_cal)
train_statistical = icp_model.predict(data_train,significance)
train_proba = model_classi.predict(data_train)
return train_statistical, train_proba
def non_condition_p(validation_c,proba_test):
"""
Given the nonconformity measurements of test samples and calibration samples,
calculate the statistical vector of test samples.
For label-conditional conformal prediction, validation_c refers to the nonconformity measurement
of a specific label in the calibration set.
Usage:
:param: validation_c:Nonconformity measurement of calibration samples
non_test:Nonconformity measurement of test samples
:rtype: p_test:Statistical vector of test samples
"""
p_test = np.empty((proba_test.shape)) ##Shape of the statistical vector
for i in range(proba_test.shape[1]): ##Each column represents a class
validation_c1 = np.array(validation_c)
n_cal = validation_c1.size
for j in range(proba_test[:,i].size):##Number of samples of each class
nc = proba_test[j,i]
idx_left = np.searchsorted(validation_c1, nc, 'left',sorter=np.argsort(validation_c1)) ##Note that validation_c1 should be sorted in ascending order
idx_right = np.searchsorted(validation_c1, nc,'right',sorter=np.argsort(validation_c1))
n_gt = n_cal - idx_right
n_eq = idx_right - idx_left + 1
p_test[j, i] = n_gt / (n_cal+1)
# p_test[j, i] += (n_eq+1) * np.random.uniform(0, 1, 1)) / (n_cal + 1) ##random
p_test[j, i] += (n_eq+1) / (n_cal + 1) ##no random
return p_test
def test_statistical_vector_param(data_train, label_train, data_cal, label_cal, data_test,
classification_model, non_Func = MarginErrFunc(), significance=None):
"""Calculate the statistical vector of test set.
Usage:
:param: sample:A dataset that is a 2D numpy array
data_train, label_train:Training datasets and labels
data_cal, label_cal:Calibration datasets and labels
data_test:Testing datasets
classification_model: A classification model
non_Func: A noncormity function
:rtype: validation_nonconformity: Nonconformity measurements of calibration samples
test_nc_score: Nonconformity measurements of test samples
test_proba: The probability vector of the test sample
"""
model_classi = ClassifierAdapter(classification_model)
nc_score = ClassifierNc(model_classi, non_Func)
icp_model = IcpClassifier(nc_score)
icp_model.fit(data_train, label_train)
validation_nonconformity = icp_model.calibrate(data_cal, label_cal)[0][::-1]
test_proba = model_classi.predict(data_test)
test_nc_score = icp_model.get_test_nc(data_test)
# test_statistical = non_condition_p(cal_scores2_,test_nc_score)
return validation_nonconformity, test_nc_score, test_proba
def test_validation_nonconformity(data_train, label_train, data_cal, label_cal, classification_model, non_Func = MarginErrFunc(), significance=None):
"""Calculate the nonconformity measurements of calibration samples.
Usage:
:param: sample:A dataset that is a 2D numpy array
data_train, label_train:Training datasets and labels
data_cal, label_cal:Calibration datasets and labels
classification_model: A classification model
non_Func: A noncormity function
:rtype: validation_nonconformity: Nonconformity measurements of calibration samples
"""
model_classi = ClassifierAdapter(classification_model)
nc_score = ClassifierNc(model_classi, non_Func)
icp_model = IcpClassifier(nc_score)
icp_model.fit(data_train, label_train)
validation_nonconformity = icp_model.calibrate(data_cal, label_cal)[0][::-1]
return validation_nonconformity
"""
#Test Code
from sklearn import svm
import scipy.io as sio
import numpy as np
from sklearn.model_selection import train_test_split
original_sample = sio.loadmat('wig9_label_startend30p1_.mat')
data = original_sample['fe_wig']
label = original_sample['label'].flatten()
data_train, data_cal, label_train, label_cal = train_test_split(data, label, test_size=0.25, random_state=42)
classification_model = svm.SVC(probability = True, break_ties=True, decision_function_shape='ovr', random_state=0)
train_statistical, train_proba = train_statistical_vector(data_train, label_train, data_cal, label_cal, classification_model, non_Func = MarginErrFunc(), significance=None)
"""
``` |
{
"source": "jiaojiaogou/UDSBC",
"score": 3
} |
#### File: UDSBC/Rivers/fast_connectivity.py
```python
import geopandas as gpd
import pandas as pd
import numpy as np
import pdb
from shapely.geometry import Point
def connectivity(line_file,out_file):
print("ok1")
#read line shapefile
print("ok2")
d = gpd.read_file(line_file)
print('**** File '+line_file+' *****')
lines = d['geometry'][:]
linecoords = lines[0].coords[:]
npt = len(linecoords)
point1 = Point(linecoords[npt-1])
point2 = Point(linecoords[0])
print(point1)
print(point2)
fromnode = []
tonode = []
fromnode.append(str(point1.x)+','+str(point1.y))
tonode.append(str(point2.x)+','+str(point2.y))
print(fromnode)
print(tonode)
fromnode = []
tonode = []
print('... generating fromnode and tonode ...')
for line in lines:
#calculate actual length in km (multiple points)
linecoords = line.coords[:]
npt = len(linecoords)
#calculate direct length in km (two points)
point1 = Point(linecoords[npt-1])
point2 = Point(linecoords[0])
fromnode.append(str(point1.x)+','+str(point1.y))
tonode.append(str(point2.x)+','+str(point2.y))
d['fromnode'] = fromnode
d['tonode'] = tonode
#creating renamed DataFrame for calculating NextDownID
print('--- creating DataFrame for NextDownID ---')
df1 = d[['COMID','fromnode','tonode']]
df2 = d[['COMID', 'fromnode']].copy().rename(columns={'COMID':'NextID','fromnode': 'tonode'})
print('--- merging to find NextDownID ---')
df_id = pd.merge(df1,df2,how='left',on='tonode') #merge
df_id.fillna(0,inplace=True) #fill missing
df_id.drop(['fromnode','tonode'],axis=1,inplace=True)
df_id['NextID'] = df_id['NextID'].astype(int) #convert to integer
#for calculating upstream IDs
print('--- finding upids ---')
df3 = df_id[['COMID','NextID']].copy()
df4 = df_id[['COMID','NextID']].copy().rename(columns={'COMID': 'fromID','NextID':'COMID'})
df_final = pd.merge(df3, df4, how='left', on='COMID')
df_final.fillna(0,inplace=True)
df_final['fromID'] = df_final['fromID'].astype(int)
grouped = df_final.groupby(['COMID','NextID'])
upid = grouped['fromID'].apply(lambda x: pd.Series(x.values[0:4])).unstack()
nup = len(upid.columns)
upid = upid.rename(columns={iup: 'upid{}'.format(iup + 1) for iup in range(nup)})
upid.fillna(0,inplace=True)
for iup in range(nup):
upid['upid'+str(iup+1)] = upid['upid'+str(iup+1)].astype(int)
for iup in range(nup,4):
upid['upid'+str(iup+1)] = 0
upid['maxup'] = grouped['fromID'].count()
upid['maxup'][upid['maxup']==1] = 0
tomerge = upid.reset_index()
print('--- final merging ---')
final = d[['COMID']].merge(tomerge[['COMID','NextID','maxup','upid1','upid2','upid3','upid4']],on='COMID',how='left')
print('--- writing to csv file... wait ... ---')
final.to_csv(out_file,index=False)
return
```
#### File: UDSBC/Rivers/revise.py
```python
import pandas as pd
import numpy as np
def river_revise(connect_file,out_file):
data = pd.read_csv(connect_file)
COMID = data['COMID']
maxup = data['maxup']
for i in range(len(COMID)):
data['maxup'][i]=0
if data['upid1'][i] !=0:
data['maxup'][i] +=1
if data['upid2'][i] !=0:
data['maxup'][i] +=1
if data['upid3'][i] !=0:
data['maxup'][i] +=1
if data['upid4'][i] !=0:
data['maxup'][i] +=1
data.to_csv(out_file,index=False)
return
```
#### File: UDSBC/Rivers/upstream.py
```python
import pandas as pd
import numpy as np
def find_upstream(river,connet_file):
connect = pd.read_csv(connet_file)
COMID = connect['COMID']
maxup = connect['maxup']
init = river
river1=[]
river1.append(river)
index = 0
while True:
k = np.where(COMID == river)[0][0]
p=maxup[k]
if p!=0 :
for m in range(p):
name = 'upid'+str(m+1)
river1.append(connect[name][k])
index= index+1
if index==len(river1):
break
else:
river = river1[index]
river1.remove(init)
return river1
```
#### File: UDSBC/util/__init__.py
```python
__all__ = ["filter_nan"]
import numpy as np
def filter_nan(sim, obs):
count = len(obs) - np.isnan(obs).sum()
s1 = np.empty(count)
o1 = np.empty(count)
k=0
for i in range(len(obs)):
if np.isnan(obs[i]):
continue
else:
o1[k] = obs[i]
s1[k] = sim[i]
k = k+1
return s1, o1
``` |
{
"source": "JiaojiaoYe1994/nni",
"score": 2
} |
#### File: examples/model_compress/model_prune_tf.py
```python
import argparse
import tensorflow as tf
import nni.compression.tensorflow
prune_config = {
'level': {
'dataset_name': 'mnist',
'model_name': 'naive',
'pruner_class': nni.compression.tensorflow.LevelPruner,
'config_list': [{
'sparsity': 0.9,
'op_types': ['default'],
}]
},
}
def get_dataset(dataset_name='mnist'):
assert dataset_name == 'mnist'
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train[..., tf.newaxis] / 255.0
x_test = x_test[..., tf.newaxis] / 255.0
return (x_train, y_train), (x_test, y_test)
def create_model(model_name='naive'):
assert model_name == 'naive'
return NaiveModel()
class NaiveModel(tf.keras.Model):
def __init__(self):
super().__init__()
self.seq_layers = [
tf.keras.layers.Conv2D(filters=20, kernel_size=5),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Conv2D(filters=20, kernel_size=5),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=500),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(units=10),
tf.keras.layers.Softmax()
]
def call(self, x):
for layer in self.seq_layers:
x = layer(x)
return x
def create_pruner(model, pruner_name):
pruner_class = prune_config[pruner_name]['pruner_class']
config_list = prune_config[pruner_name]['config_list']
return pruner_class(model, config_list)
def main(args):
model_name = prune_config[args.pruner_name]['model_name']
dataset_name = prune_config[args.pruner_name]['dataset_name']
train_set, test_set = get_dataset(dataset_name)
model = create_model(model_name)
print('start training')
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9, decay=1e-4)
model.compile(
optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
model.fit(
train_set[0],
train_set[1],
batch_size=args.batch_size,
epochs=args.pretrain_epochs,
validation_data=test_set
)
print('start model pruning')
optimizer_finetune = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9, decay=1e-4)
pruner = create_pruner(model, args.pruner_name)
model = pruner.compress()
model.compile(
optimizer=optimizer_finetune,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=True # NOTE: Important, model compression does not work in graph mode!
)
model.fit(
train_set[0],
train_set[1],
batch_size=args.batch_size,
epochs=args.prune_epochs,
validation_data=test_set
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--pruner_name', type=str, default='level')
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--pretrain_epochs', type=int, default=10)
parser.add_argument('--prune_epochs', type=int, default=10)
args = parser.parse_args()
main(args)
```
#### File: compression/tensorflow/compressor.py
```python
import logging
import tensorflow as tf
assert tf.__version__.startswith('2'), 'NNI model compression only supports TensorFlow v2.x'
from . import default_layers
_logger = logging.getLogger(__name__)
class LayerInfo:
"""
This structure contains all infomation needed to compress a TensorFlow ``Layer``.
Attributes
----------
layer : tf.keras.layers.Layer
The layer.
name : str
The layer's name. Note that it's local to sub-model and may differ from its attribute name.
type : str
Name of the layer's class.
path : list of str or tuple of (str, int)
The layer object's and its parents' attribute name / list index.
For example, if the path is `[('cells', 2), 'conv']`, then the layer can be accessed as `model.cells[2].conv`.
config : JSON object
Selected configuration for this layer. The format is detailed in tutorial.
Parameters
----------
layer : tf.keras.layers.Layer
See attributes section.
path : list of str or tuple of (str, int)
See attributes section.
"""
def __init__(self, layer, path=None):
self.layer = layer
self.name = layer.name
self.type = type(layer).__name__
self.path = path
self.config = None
class Compressor:
"""
Common base class for all compressors.
This class is designed for other base classes.
Algorithms should inherit ``Pruner`` or ``Quantizer`` instead.
Attributes
----------
bound_model : tf.keras.Model
Compressed user model.
wrappers : list of tf.keras.Model
A wrapper is an instrumented TF ``Layer``, in ``Model`` format.
The list is ordered by preorder traversal.
Parameters
----------
LayerWrapperClass : a class derive from Model
The class used to instrument layers.
model : tf.keras.Model
The user model to be compressed.
config_list : list of JSON object
User configuration. The format is detailed in tutorial.
"""
def __init__(self, LayerWrapperClass, model, config_list):
assert isinstance(model, tf.keras.Model)
if isinstance(model, tf.keras.Sequential):
raise ValueError('NNI model compression does not support `Sequential` model for now')
self.validate_config(model, config_list)
self.bound_model = model
self.wrappers = []
for layer_info in _detect_layers_to_compress(model, config_list):
self.wrappers.append(LayerWrapperClass(layer_info, self))
if not self.wrappers:
_logger.warning('Nothing is configured to compress, please check your model and config list')
_instrument_model(model, self.wrappers)
def set_wrappers_attribute(self, name, value):
"""
Call ``setattr`` on all wrappers.
"""
for wrapper in self.wrappers:
setattr(wrapper, name, value)
class Pruner(Compressor):
"""
Base class for pruning algorithms.
End users should use ``compress`` and callback APIs (WIP) to prune their models.
The underlying model is instrumented upon initialization of pruner object.
So if you want to pre-train the model, train it before creating pruner object.
The compressed model can only execute in eager mode.
Algorithm developers should override ``calc_masks`` method to specify pruning strategy.
Parameters
----------
model : tf.keras.Model
The user model to prune.
config_list : list of JSON object
User configuration. The format is detailed in tutorial.
"""
def __init__(self, model, config_list):
super().__init__(PrunerLayerWrapper, model, config_list)
#self.callback = PrunerCallback(self)
def compress(self):
"""
Apply compression on a pre-trained model.
If you want to prune the model during training, use callback API (WIP) instead.
Returns
-------
tf.keras.Model
The compressed model, for convenience. This is exactly the same object to constructor argument.
"""
self._update_mask()
return self.bound_model
def calc_masks(self, wrapper, **kwargs):
"""
Abstract method to be overridden by algorithm. End users should ignore it.
If the callback is set up, this method will be invoked at end of each training minibatch.
If not, it will only be called when end user invokes ``compress``.
Parameters
----------
wrapper : PrunerLayerWrapper
The instrumented layer.
**kwargs
Reserved for forward compatibility.
Returns
-------
dict of (str, tf.Tensor), or None
The key is weight ``Variable``'s name. The value is a mask ``Tensor`` of weight's shape and dtype.
If a weight's key does not appear in the return value, that weight will not be pruned.
Returning ``None`` means the mask is not changed since last time.
Weight names are globally unique, e.g. `model/conv_1/kernel:0`.
"""
# TODO: maybe it should be able to calc on weight-granularity, beside from layer-granularity
raise NotImplementedError("Pruners must overload calc_masks()")
def _update_mask(self):
for wrapper_idx, wrapper in enumerate(self.wrappers):
masks = self.calc_masks(wrapper, wrapper_idx=wrapper_idx)
if masks is not None:
wrapper.masks = masks
class PrunerLayerWrapper(tf.keras.Model):
"""
Instrumented TF layer.
Wrappers will be passed to pruner's ``calc_masks`` API,
and the pruning algorithm should use wrapper's attributes to calculate masks.
Once instrumented, underlying layer's weights will get **modified** by masks before forward pass.
Attributes
----------
layer_info : LayerInfo
All static information of the original layer.
layer : tf.keras.layers.Layer
The original layer.
config : JSON object
Selected configuration. The format is detailed in tutorial.
pruner : Pruner
Bound pruner object.
masks : dict of (str, tf.Tensor)
Current masks. The key is weight's name and the value is mask tensor.
On initialization, `masks` is an empty dict, which means no weight is pruned.
Afterwards, `masks` is the last return value of ``Pruner.calc_masks``.
See ``Pruner.calc_masks`` for details.
"""
def __init__(self, layer_info, pruner):
super().__init__()
self.layer_info = layer_info
self.layer = layer_info.layer
self.config = layer_info.config
self.pruner = pruner
self.masks = {}
_logger.info('Layer detected to compress: %s', self.layer.name)
def call(self, *inputs):
new_weights = []
for weight in self.layer.weights:
mask = self.masks.get(weight.name)
if mask is not None:
new_weights.append(tf.math.multiply(weight, mask))
else:
new_weights.append(weight)
if new_weights and not hasattr(new_weights[0], 'numpy'):
raise RuntimeError('NNI: Compressed model can only run in eager mode')
self.layer.set_weights([weight.numpy() for weight in new_weights])
return self.layer(*inputs)
# TODO: designed to replace `patch_optimizer`
#class PrunerCallback(tf.keras.callbacks.Callback):
# def __init__(self, pruner):
# super().__init__()
# self._pruner = pruner
#
# def on_train_batch_end(self, batch, logs=None):
# self._pruner.update_mask()
def _detect_layers_to_compress(model, config_list):
# Returns list of LayerInfo.
located_layers = _locate_layers(model)
ret = []
for layer in model.layers:
config = _select_config(LayerInfo(layer), config_list)
if config is not None:
if id(layer) not in located_layers:
_logger.error('Failed to locate layer %s in model. The layer will not be compressed. '
'This is a bug in NNI, feel free to fire an issue.', layer.name)
continue
layer_info = located_layers[id(layer)]
layer_info.config = config
ret.append(layer_info)
return ret
def _locate_layers(model, cur_path=[]):
# Find out how to access layers from model object.
# Returns dict of (layer's object ID, LayerInfo).
# This function is required because TF framework does not track layer's attribute name,
# and to my knowledge `Layer.name` is only useful for read-only access.
# `cur_path`s format is documented in `LayerInfo.path`.
# TODO: it can only find layers in `Model` and `list` for now.
assert isinstance(model, tf.keras.Model)
if isinstance(model, tf.keras.Sequential):
_logger.warning('`Sequential` model is not supported yet, ignored.')
ret = {}
for key, value in model.__dict__.items():
if isinstance(value, tf.keras.Model):
ret.update(_locate_layers(value, cur_path + [key]))
elif isinstance(value, tf.keras.layers.Layer):
ret[id(value)] = LayerInfo(value, cur_path + [key])
elif isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, tf.keras.Model):
ret.update(_locate_layers(item, cur_path + [(key, i)]))
elif isinstance(item, tf.keras.layers.Layer):
ret[id(item)] = LayerInfo(item, cur_path + [(key, i)])
return ret
def _select_config(layer_info, config_list):
# Find the last matching config block for given layer.
# Returns None if the layer should not be compressed.
ret = None
for config in config_list:
if 'op_types' in config:
match = layer_info.type in config['op_types']
match_default = 'default' in config['op_types'] and layer_info.type in default_layers.weighted_modules
if not match and not match_default:
continue
if 'op_names' in config and layer_info.name not in config['op_names']:
continue
ret = config
if ret is None or 'exclude' in ret:
return None
return ret
def _instrument_model(model, wrappers):
# Replace layers to wrappers
for wrapper in reversed(wrappers):
cur = model
for key in wrapper.layer_info.path[:-1]:
if isinstance(key, str):
cur = getattr(cur, key)
else:
name, index = key
cur = getattr(cur, name)[index]
key = wrapper.layer_info.path[-1]
if isinstance(key, str):
setattr(cur, key, wrapper)
else:
name, index = key
getattr(cur, name)[index] = wrapper
#if isinstance(cur, tf.keras.Sequential):
# cur._graph_initialized = False
# cur._layer_call_argspecs[wrapper] = cur._layer_call_argspecs[wrapper.layer]
``` |
{
"source": "jiaojiening/pytorch-CycleGAN",
"score": 2
} |
#### File: pytorch-CycleGAN/models/reid_hybrid_cycle_gan_model.py
```python
import torch
import itertools
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
from . import networks_reid
class ReidHybridCycleGANModel(BaseModel):
def name(self):
return 'ReidHybridCycleGANModel'
@staticmethod
def modify_commandline_options(parser, is_train=True):
# default GAN did not use dropout
parser.set_defaults(no_dropout=True)
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0,
help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5,
help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. '
'For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
parser.add_argument('--lambda_rec', type=float, default=10.0, help='weight for reconstruction loss')
parser.add_argument('--lambda_G', type=float, default=1.0, help='weight for Generator loss')
# reid parameters
parser.add_argument('--droprate', type=float, default=0.5, help='the dropout ratio in reid model')
return parser
def initialize(self, opt):
BaseModel.initialize(self, opt)
# specify the training losses you want to print out. The program will call base_model.get_current_losses
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B', 'rec_A', 'rec_B', 'reid']
# self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B', 'reid']
# specify the images you want to save/display. The program will call base_model.get_current_visuals
visual_names_A = ['real_HR_A', 'fake_LR_A', 'rec_HR_A', 'real_LR_A']
visual_names_B = ['real_LR_B', 'fake_HR_B', 'rec_LR_B', 'real_HR_B']
if self.isTrain and self.opt.lambda_identity > 0.0:
visual_names_A.append('idt_A')
visual_names_B.append('idt_B')
self.visual_names = visual_names_A + visual_names_B
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B', 'D_reid']
else: # during test time, only load Gs
self.model_names = ['G_A', 'G_B', 'D_reid']
# netG_A: HR -> LR, netG_B: LR -> HR
# load/define networks
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
# Load a pretrained resnet model and reset the final connected layer
self.netD_reid = networks_reid.ft_net(opt.num_classes, opt.droprate)
# the reid network is trained on a single gpu because of the BatchNorm layer
self.netD_reid = self.netD_reid.to(self.device)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain,
self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain,
self.gpu_ids)
if self.isTrain:
# GAN
self.fake_HR_A_pool = ImagePool(opt.pool_size)
# CycleGAN
self.fake_LR_A_pool = ImagePool(opt.pool_size) # fake_B_pool
self.fake_HR_B_pool = ImagePool(opt.pool_size) # fake_A_pool
# define loss functions
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
self.criterionRec = torch.nn.L1Loss()
self.criterionReid = torch.nn.CrossEntropyLoss()
# initialize optimizers
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
# SR optimizer
# self.optimizers = []
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
# reid optimizer
ignored_params = list(map(id, self.netD_reid.model.fc.parameters())) + \
list(map(id, self.netD_reid.classifier.parameters()))
base_params = filter(lambda p: id(p) not in ignored_params, self.netD_reid.parameters())
self.optimizer_D_reid = torch.optim.SGD([
{'params': base_params, 'lr': 0.1 * opt.reid_lr},
{'params': self.netD_reid.model.fc.parameters(), 'lr': opt.reid_lr},
{'params': self.netD_reid.classifier.parameters(), 'lr': opt.reid_lr}
], weight_decay=5e-4, momentum=0.9, nesterov=True)
self.optimizer_reid.append(self.optimizer_D_reid)
def reset_model_status(self):
if self.opt.stage==1:
self.netG_A.train()
self.netG_B.train()
self.netD_A.train()
self.netD_B.train()
# for the BatchNorm
self.netD_reid.eval()
elif self.opt.stage==0 or self.opt.stage==2:
self.netG_A.train()
self.netG_B.train()
self.netD_A.train()
self.netD_B.train()
# for the BatchNorm
self.netD_reid.train()
def set_input(self, input):
self.real_HR_A = input['A'].to(self.device)
self.real_LR_B = input['B'].to(self.device)
# load the ground-truth low resolution A image
self.real_LR_A = input['GT_A'].to(self.device)
# load the ground-truth high resolution B image to test the SR quality
self.real_HR_B = input['GT_B'].to(self.device)
self.image_paths = input['A_paths']
# get the id label for person reid
self.A_label = input['A_label'].to(self.device)
self.B_label = input['B_label'].to(self.device)
def forward(self):
# GAN
self.fake_HR_A = self.netG_B(self.real_LR_A) # LR -> HR
# cycleGAN
# HR -> LR -> HR
self.fake_LR_A = self.netG_A(self.real_HR_A) # HR -> LR
self.rec_HR_A = self.netG_B(self.fake_LR_A) # LR -> HR
# LR -> HR -> LR
self.fake_HR_B = self.netG_B(self.real_LR_B) # LR -> HR
self.rec_LR_B = self.netG_A(self.fake_HR_B) # HR -> LR
# self.imags = torch.cat([self.real_HR_A, self.fake_HR_B], 0)
# self.labels = torch.cat([self.A_label, self.B_label], 0)
# all the HR images
self.imgs = torch.cat([self.real_HR_A, self.fake_HR_B, self.rec_HR_A, self.fake_HR_A], 0)
self.labels = torch.cat([self.A_label, self.B_label, self.A_label, self.A_label])
self.pred_imgs = self.netD_reid(self.imgs)
def psnr_eval(self):
# compute the PSNR for the test
self.bicubic_psnr = networks.compute_psnr(self.real_HR_A, self.real_LR_A)
self.psnr = networks.compute_psnr(self.real_HR_A, self.fake_HR_A)
def ssim_eval(self):
self.bicubic_ssim = networks.compute_ssim(self.real_HR_A, self.real_LR_A)
self.ssim = networks.compute_ssim(self.real_HR_A, self.fake_HR_A)
def backward_D_basic(self, netD, real, fake):
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
# fake.detach() the loss_D do not backward to the net_G
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss
loss_D = (loss_D_real + loss_D_fake) * 0.5
# backward
loss_D.backward()
return loss_D
def backward_D_A(self):
# real/fake LR image(G_A)
fake_LR_A = self.fake_LR_A_pool.query(self.fake_LR_A)
# # used for GAN
# self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_LR_A, fake_LR_A)
# # used for CycleGAN
# self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_LR_B, fake_LR_A)
real_LR = torch.cat([self.real_LR_A, self.real_LR_B], 0)
self.loss_D_A = self.backward_D_basic(self.netD_A, real_LR, fake_LR_A)
def backward_D_B(self):
fake_HR_A = self.fake_HR_A_pool.query(self.fake_HR_A) # GAN
fake_HR_B = self.fake_HR_B_pool.query(self.fake_HR_B)
# # used for GAN
# self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_HR_A, fake_HR_A)
# # used for CycleGAN
# self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_HR_A, fake_HR_B)
fake_HR = torch.cat([fake_HR_A, fake_HR_B], 0)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_HR_A, fake_HR)
def backward_G(self):
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
lambda_rec = self.opt.lambda_rec
lambda_G = self.opt.lambda_G
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed.
self.idt_A = self.netG_A(self.real_LR_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_LR_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed.
self.idt_B = self.netG_B(self.real_HR_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_HR_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss D_A(G_A(A))
# self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_LR_A), True)
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_LR_A), True) * lambda_G
# GAN loss D_B(G_B(B))
# used for GAN
# self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_HR_A), True)
# used for CycleGAN
# self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_HR_B), True)
fake_HR = torch.cat([self.fake_HR_A, self.fake_HR_B], 0)
# self.loss_G_B = self.criterionGAN(self.netD_B(fake_HR), True)
self.loss_G_B = self.criterionGAN(self.netD_B(fake_HR), True) * lambda_G
# Forward cycle loss
self.loss_cycle_A = self.criterionCycle(self.rec_HR_A, self.real_HR_A) * lambda_A
# Backward cycle loss
self.loss_cycle_B = self.criterionCycle(self.rec_LR_B, self.real_LR_B) * lambda_B
# combined loss
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
# reconstruct loss of low resolution fake_LR_A(G_A)
self.loss_rec_A = self.criterionRec(self.fake_LR_A, self.real_LR_A) * lambda_rec
# reconstruct loss of high resolution fake_HR_A(G_B)
self.loss_rec_B = self.criterionRec(self.fake_HR_A, self.real_HR_A) * lambda_rec
self.loss_rec = self.loss_rec_A + self.loss_rec_B
self.loss_G += self.loss_rec
_, pred_label_imgs = torch.max(self.pred_imgs, 1)
self.corrects += float(torch.sum(pred_label_imgs == self.labels))
self.loss_reid = self.criterionReid(self.pred_imgs, self.labels)
self.loss_G = self.loss_G + self.loss_reid
self.loss_G.backward()
def optimize_parameters(self):
# forward
self.forward()
if self.opt.stage == 1:
# G_A and G_B
# self.set_requires_grad([self.netD_A, self.netD_B], False)
self.set_requires_grad([self.netD_A, self.netD_B, self.netD_reid], False)
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
# D_A and D_B
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad()
self.backward_D_A()
self.backward_D_B()
self.optimizer_D.step()
if self.opt.stage == 0 or self.opt.stage == 2:
# G_A and G_B
self.set_requires_grad([self.netD_A, self.netD_B], False)
self.optimizer_G.zero_grad()
self.optimizer_D_reid.zero_grad()
self.backward_G()
self.optimizer_G.step()
self.optimizer_D_reid.step()
# D_A and D_B
self.set_requires_grad([self.netD_A, self.netD_B], True)
self.optimizer_D.zero_grad()
self.backward_D_A()
self.backward_D_B()
self.optimizer_D.step()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.