metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "565353780/chart-plot",
"score": 2
} |
#### File: chart-plot/BarMethod/bar_manager.py
```python
from BarMethod.bar import Bar
class BarManager(object):
def __init__(self):
self.bar_list = []
return
def reset(self):
self.bar_list.clear()
return True
```
#### File: chart-plot/LineMethod/line_manager.py
```python
from LineMethod.line import Line
class LineManager(object):
def __init__(self):
self.title = "Title"
self.x_label = "X Label"
self.y_label = "Y Label"
self.axis_font_size = 30
self.label_font_size = 20
self.tick_font_size = 20
# position can be : upper lower left right center auto
self.label_position = "lower right"
self.marker = ""
self.fill_alpha = 0.2
self.fig_size = [20, 15]
self.dpi = 80
self.show_line_label = True
self.show_confidence_interval_label = False
self.line_list = []
return
def reset(self):
self.line_list.clear()
return True
def addLine(self,
line_type,
line_width,
label,
fit_polyline,
show_confidence_interval,
confidence_diff_min,
confidence_diff_max):
new_line = Line(len(self.line_list))
new_line.line_type = line_type
new_line.line_width = line_width
new_line.label = label
new_line.fit_polyline = fit_polyline
new_line.show_confidence_interval = show_confidence_interval
new_line.confidence_diff_min = confidence_diff_min
new_line.confidence_diff_max = confidence_diff_max
self.line_list.append(new_line)
return True
def getBBoxXYXY(self):
x_min = None
y_min = None
x_max = None
y_max = None
if len(self.line_list) == 0:
return x_min, y_min, x_max, y_max
for line in self.line_list:
line_x_min , line_y_min, line_x_max, line_y_max = line.getBBoxXYXY()
if line_x_min is None:
continue
if x_min is None:
x_min = line_x_min
y_min = line_y_min
x_max = line_x_max
y_max = line_y_max
continue
x_min = min(x_min, line_x_min)
y_min = min(y_min, line_y_min)
x_max = max(x_max, line_x_max)
y_max = max(y_max, line_y_max)
return x_min, y_min, x_max, y_max
def getXYRange(self):
x_min, y_min, x_max, y_max = self.getBBoxXYXY()
if x_min is None:
return 0, 0
return x_max - x_min, y_max - y_min
def moveUp(self, move_dist):
if move_dist == 0:
return True
for line in self.line_list:
line.moveUp(move_dist)
return True
def moveDown(self, move_dist):
if move_dist == 0:
return True
for line in self.line_list:
line.moveDown(move_dist)
return True
def moveLeft(self, move_dist):
if move_dist == 0:
return True
for line in self.line_list:
line.moveLeft(move_dist)
return True
def moveRight(self, move_dist):
if move_dist == 0:
return True
for line in self.line_list:
line.moveRight(move_dist)
return True
def scaleX(self, scale):
x_min, _, _, _ = self.getBBoxXYXY()
if x_min is None:
return True
for line in self.line_list:
line.scaleX(x_min, scale)
return True
def scaleY(self, scale):
_, y_min, _, _ = self.getBBoxXYXY()
if y_min is None:
return True
for line in self.line_list:
line.scaleY(y_min, scale)
return True
def getDataJson(self):
data_json = {}
data_json["title"] = self.title
data_json["x_label"] = self.x_label
data_json["y_label"] = self.y_label
data_json["axis_font_size"] = self.axis_font_size
data_json["label_font_size"] = self.label_font_size
data_json["tick_font_size"] = self.tick_font_size
data_json["label_position"] = self.label_position
data_json["marker"] = self.marker
data_json["fill_alpha"] = self.fill_alpha
data_json["fig_size"] = self.fig_size
data_json["dpi"] = self.dpi
data_json["show_line_label"] = self.show_line_label
data_json["show_confidence_interval_label"] = self.show_confidence_interval_label
data_json["Lines"] = {}
for line in self.line_list:
line_json = {}
line_json["line_type"] = line.line_type
line_json["line_color"] = line.line_color
line_json["line_width"] = line.line_width
line_json["label"] = line.label
line_json["point_list"] = line.getPointData()
line_json["fit_polyline"] = line.fit_polyline
line_json["show_confidence_interval"] = line.show_confidence_interval
line_json["confidence_diff_min"] = line.confidence_diff_min
line_json["confidence_diff_max"] = line.confidence_diff_max
line_json["confidence_interval_list"] = line.confidence_interval_list
data_json["Lines"][str(line.line_idx)] = line_json
return data_json
def loadDataJson(self, data_json):
self.reset()
self.title = data_json["title"]
self.x_label = data_json["x_label"]
self.y_label = data_json["y_label"]
self.axis_font_size = data_json["axis_font_size"]
self.label_font_size = data_json["label_font_size"]
self.tick_font_size = data_json["tick_font_size"]
self.label_position = data_json["label_position"]
self.marker = data_json["marker"]
self.fill_alpha = data_json["fill_alpha"]
self.fig_size = data_json["fig_size"]
self.dpi = data_json["dpi"]
self.show_line_label = data_json["show_line_label"]
self.show_confidence_interval_label = data_json["show_confidence_interval_label"]
data_json["Lines"].keys()
for line_key in data_json["Lines"].keys():
new_line = Line(len(self.line_list))
line_json = data_json["Lines"][line_key]
new_line.line_type = line_json["line_type"]
new_line.line_color = line_json["line_color"]
new_line.line_width = line_json["line_width"]
new_line.label = line_json["label"]
new_line.loadPointData(line_json["point_list"])
new_line.fit_polyline = line_json["fit_polyline"]
new_line.show_confidence_interval = line_json["show_confidence_interval"]
new_line.confidence_diff_min = line_json["confidence_diff_min"]
new_line.confidence_diff_max = line_json["confidence_diff_max"]
new_line.confidence_interval_list = line_json["confidence_interval_list"]
self.line_list.append(new_line)
return True
```
#### File: 565353780/chart-plot/plot_markdown.py
```python
import json
from LineMethod.line_creater import LineCreater
def getData():
'''
chart_data_dict = [robot_num][metric_name][x or scene_level] -> value_list
'''
data_file_path = "/home/chli/chLi/coscan_data/different_robot_num.txt"
metric_name_list = ["TC", "DC", "D-LB", "T-LB"]
metric_col_idx_list = [5, 6, 11, 12]
data_list = []
with open(data_file_path, "r") as f:
for line in f.readlines():
line_split_list = line.replace(" ", "").split("\n")[0].split("|")[1:-1]
valid_line_data = \
[float(line_split_list[col_idx]) for col_idx in metric_col_idx_list]
data_list.append(valid_line_data)
chart_data_dict = {}
for metric_name in metric_name_list:
chart_data_dict[metric_name] = {}
chart_data_dict[metric_name]["x"] = [1.0 * i for i in range(3, 11)]
scene_level_list = ["Small", "Middle", "Large"]
for metric_idx in range(len(metric_name_list)):
for scene_level in range(3):
chart_y_list = []
for data_idx in range(8):
chart_y_list.append(
data_list[3 * data_idx + scene_level][metric_idx])
chart_data_dict[
metric_name_list[metric_idx]][
scene_level_list[scene_level]] = \
chart_y_list
return chart_data_dict
if __name__ == "__main__":
chart_data_dict = getData()
line_color_list = ["tomato", "teal", "orange"]
json_save_folder_path = "/home/chli/chLi/coscan_data/different_robot_num/"
fit_polyline = False
show_confidence_interval = True
confidence_diff_min = 10
confidence_diff_max = 20
show_line_label = True
show_confidence_interval_label = False
line_type = "-"
line_width = 2
for chart_name in chart_data_dict.keys():
if chart_name == "TC":
confidence_diff_min = 1
confidence_diff_max = 2
elif chart_name == "DC":
confidence_diff_min = 50
confidence_diff_max = 100
elif chart_name == "D-LB":
confidence_diff_min = 0.01
confidence_diff_max = 0.02
elif chart_name == "T-LB":
confidence_diff_min = 0.005
confidence_diff_max = 0.01
line_creater = LineCreater()
line_creater.setParam(fit_polyline,
show_confidence_interval,
confidence_diff_min,
confidence_diff_max)
line_creater.x_label = "#Robot"
line_creater.y_label = chart_name
line_creater.title = ""
line_creater.line_list = []
chart_data = chart_data_dict[chart_name]
chart_x_list = chart_data["x"]
line_color_idx = 0
for chart_y_name in chart_data.keys():
if chart_y_name == "x":
continue
chart_y_list = chart_data[chart_y_name]
new_line_idx = len(line_creater.line_list)
line_creater.addLine(line_type,
line_width,
chart_y_name,
fit_polyline,
show_confidence_interval,
confidence_diff_min,
confidence_diff_max)
line_creater.line_list[new_line_idx].line_color = \
line_color_list[line_color_idx]
line_color_idx += 1
for i in range(len(chart_x_list)):
line_creater.line_list[new_line_idx].addPoint(
chart_x_list[i], chart_y_list[i])
line_creater.line_list[new_line_idx].updateConfidenceInterval()
chart_save_path = json_save_folder_path + chart_name + "_data.json"
data_json = line_creater.getDataJson()
data_json_dump = json.dumps(data_json, indent=4)
with open(chart_save_path, "w") as f:
f.write(data_json_dump)
``` |
{
"source": "565353780/express-parcel-split",
"score": 3
} |
#### File: q3d/bin/t.py
```python
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtWebEngineWidgets import *
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setWindowTitle('加载外部网页的例子')
self.setGeometry(5,30,1355,730)
self.browser=QWebEngineView()
#加载外部的web界面
self.browser.load(QUrl('https://blog.csdn.net/jia666666'))
self.setCentralWidget(self.browser)
if __name__ == '__main__':
app=QApplication(sys.argv)
win=MainWindow()
win.show()
app.exit(app.exec_())
``` |
{
"source": "565353780/filter-socket",
"score": 3
} |
#### File: src/Python/datachange.py
```python
import os.path as osp
import os
import json as js
import numpy as np
from shutil import copyfile
from PIL import Image
import shutil
import xml.etree.ElementTree as ET
from os import getcwd
dataset_path = os.getcwd() + '/../Server_DataBase/train_dataset/darknet_dataset'
# my_labels = ['person', 'rock', 'broom']
# my_labels = ["Sprite", "Coca-Cola", "Fanta_Orange", "Fanta_Apple", "Gatorade_Orange", "Gatorade_Huang", "Gatorade_Blue",
# "Qin lemon water", "Beauty Juice Orange", "Honey Citron Tea", "Sour plum soup", "Unified Green Tea",
# "Unified Ice Black Tea", "Oolong Tea", "Lemon U grid", "Jasmine Honey Tea", "Jasmine Tea",
# "Master Green Tea", "Master Kong Ice Black Tea", "Ome Green Tea", "Osmanthus Sour Plum Soup Drink",
# "Unification of fresh oranges"]
sets = [('2012', 'train')]
class DataChange:
def __init__(self, rootpath, source_json_path, source_img_path, my_labels=['0'], use_my_labels=False,
need_to_change_image_size=False, image_width=300, image_height=300):
self.rootpath = rootpath
self.source_json_path = source_json_path
self.source_img_path = source_img_path
self.use_my_labels = use_my_labels
self.need_to_change_image_size = need_to_change_image_size
self.image_width = image_width
self.image_height = image_height
self.my_labels = my_labels
self.classes = my_labels.copy()
def convert(self, size, box):
dw = 1. / (size[0])
dh = 1. / (size[1])
x = (box[0] + box[1]) / 2.0 - 1
y = (box[2] + box[3]) / 2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return (x, y, w, h)
def convert_annotation(self, year, image_id):
in_file = open(dataset_path + '/Annotations_/%s.xml' % (image_id))
out_file = open(dataset_path + '/labels/val2014/%s.txt' % (image_id), 'w')
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in self.classes or int(difficult) == 1:
continue
cls_id = self.classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = self.convert((w, h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
def readxml(self, dataset, xml, count, file_name):
tree = ET.parse(xml)
root = tree.getroot()
for child in root:
if child.tag == "size":
for s_ch in child:
if s_ch.tag == "width":
w = s_ch.text
else:
h = s_ch.text
elif child.tag == "object":
for s_ch in child:
if s_ch.tag == "bndbox":
for ss_ch in s_ch:
if ss_ch.tag == "xmin":
xmin = ss_ch.text
elif ss_ch.tag == "ymin":
ymin = ss_ch.text
elif ss_ch.tag == "xmax":
xmax = ss_ch.text
elif ss_ch.tag == "ymax":
ymax = ss_ch.text
else:
ca_name = s_ch.text
try:
dataset.setdefault("images", []).append({
'file_name': file_name,
'id': int(count),
'width': int(w),
'height': int(h)
})
dataset.setdefault("annotations", []).append({
'image_id': int(count),
'bbox': [int(xmin), int(ymin), int(xmax) - int(xmin), int(ymax) - int(ymin)],
'category_id': 6,
'area': int(w) * int(h),
'iscrowd': 0,
'id': int(count),
'segmentation': []
})
except:
pass
def produceImage(self, file_in, file_out):
image = Image.open(file_in)
resized_image = image.resize((self.image_width, self.image_height), Image.ANTIALIAS)
resized_image.save(file_out)
def data_change(self):
xmlpath = self.rootpath + '/Annotations_'
imgpath = self.rootpath + '/JPEGImages'
imgsetpath = self.rootpath + '/ImageSets'
txtpath = imgsetpath + '/Main'
if not osp.exists(xmlpath):
os.makedirs(xmlpath)
if not osp.exists(imgpath):
os.makedirs(imgpath)
if not osp.exists(imgsetpath):
os.makedirs(imgsetpath)
if not osp.exists(txtpath):
os.makedirs(txtpath)
json_file_arr = os.listdir(self.source_json_path)
img_file_arr = os.listdir(self.source_img_path)
json_arr = []
img_arr = []
for name in json_file_arr:
if '.json' in name:
json_arr.append(name)
for name in img_file_arr:
if '.jpg' in name or '.png' in name:
img_arr.append(name)
fixed_file_arr = []
fixed_file_type = []
json_arr = sorted(json_arr)
img_arr = sorted(img_arr)
for i in range(len(json_arr)):
json = json_arr[i]
img = img_arr[i]
index_last_points_json = json.rfind(".")
index_last_points_img = img.rfind(".")
if json[:index_last_points_json] == img[:index_last_points_img]:
fixed_file_arr.append(json[0:index_last_points_json])
fixed_file_type.append('.' + img[index_last_points_img + 1:])
annotation_arr = np.array([])
for i in range(len(fixed_file_arr)):
if self.need_to_change_image_size:
self.produceImage(self.source_img_path + '/' + fixed_file_arr[i] + fixed_file_type[i],
imgpath + '/' + str(i) + fixed_file_type[i])
else:
copyfile(self.source_img_path + '/' + fixed_file_arr[i] + fixed_file_type[i],
imgpath + '/' + str(i) + fixed_file_type[i])
f = open(self.source_json_path + '/' + fixed_file_arr[i] + '.json', 'r', encoding='utf-8')
my_dic = js.load(f)
annotation_arr = np.append(annotation_arr, (fixed_file_arr[i], my_dic))
f.close()
print('\rRead dataset : ' + str(i + 1) + ' / ' + str(len(fixed_file_arr)), end='')
print('')
annotation_arr = annotation_arr.reshape(-1, 2)
f1 = open(txtpath + '/test.txt', 'w')
f2 = open(txtpath + '/trainval.txt', 'w')
f3 = open(txtpath + '/person_trainval.txt', 'w')
f4 = open(txtpath + '/train.txt', 'w')
f5 = open(txtpath + '/val.txt', 'w')
f6 = open(self.rootpath + '/ground_truth.txt', 'w')
if not os.path.exists(dataset_path + '/Main/'):
os.makedirs(dataset_path + '/Main/')
f7 = open(dataset_path + '/Main/' + 'train.txt', 'w')
for i in range(annotation_arr.shape[0]):
f1.write(annotation_arr[i][0] + '\n')
f2.write(annotation_arr[i][0] + '\n')
f3.write(annotation_arr[i][0] + ' 1\n')
f4.write(str(i) + '\n')
f5.write(annotation_arr[i][0] + '\n')
f6.write('\nGROUND TRUTH FOR: ' + annotation_arr[i][0] + '\n')
f7.write(str(i) + '.jpg\n')
f = open(xmlpath + '/' + str(i) + '.xml', 'w')
f.write('<annotation>\n')
f.write('\t<folder>VOC2007</folder>\n')
f.write('\t#' + annotation_arr[i][0] + '</filename>\n')
f.write('\t<size>\n')
if self.need_to_change_image_size:
f.write('\t\t<width>%s</width>\n' % self.image_width)
f.write('\t\t<height>%s</height>\n' % self.image_height)
else:
f.write('\t\t<width>%s</width>\n' % annotation_arr[i][1]['Area']['shape'][0])
f.write('\t\t<height>%s</height>\n' % annotation_arr[i][1]['Area']['shape'][1])
f.write('\t\t<depth>3</depth>\n')
f.write('\t</size>\n')
f.write('\t<segmented>0</segmented>\n')
if len(annotation_arr[i][1]['Area']['labels']) > 0:
for j in range(len(annotation_arr[i][1]['Area']['labels'])):
f6.write('label: ')
f.write('\t<object>\n')
if self.use_my_labels:
f.write('\t\t<name>%s</name>\n' % self.my_labels[int(annotation_arr[i][1]['Area']['labels'][j][0])])
else:
f.write('\t\t<name>person</name>\n')
f.write('\t\t<pose>Unspecified</pose>\n')
f.write('\t\t<truncated>0</truncated>\n')
f.write('\t\t<difficult>0</difficult>\n')
f.write('\t\t<bndbox>\n')
if self.need_to_change_image_size:
f6.write('%d' % int(annotation_arr[i][1]['Area']['polygons'][j][0][0] * self.image_width /
annotation_arr[i][1]['Area']['shape'][0]))
f6.write(' || ')
f6.write('%d' % int(annotation_arr[i][1]['Area']['polygons'][j][0][1] * self.image_width /
annotation_arr[i][1]['Area']['shape'][1]))
f6.write(' || ')
f6.write('%d' % int(annotation_arr[i][1]['Area']['polygons'][j][2][0] * self.image_width /
annotation_arr[i][1]['Area']['shape'][0]))
f6.write(' || ')
f6.write('%d' % int(annotation_arr[i][1]['Area']['polygons'][j][2][1] * self.image_width /
annotation_arr[i][1]['Area']['shape'][1]))
f6.write(' || ')
f6.write(self.my_labels[int(annotation_arr[i][1]['Area']['labels'][j][0])])
f6.write('\n')
f.write('\t\t\t<xmin>%s</xmin>\n' % int(
annotation_arr[i][1]['Area']['polygons'][j][0][0] * self.image_width /
annotation_arr[i][1]['Area']['shape'][0]))
f.write('\t\t\t<ymin>%s</ymin>\n' % int(
annotation_arr[i][1]['Area']['polygons'][j][0][1] * self.image_height /
annotation_arr[i][1]['Area']['shape'][1]))
f.write('\t\t\t<xmax>%s</xmax>\n' % int(
annotation_arr[i][1]['Area']['polygons'][j][2][0] * self.image_width /
annotation_arr[i][1]['Area']['shape'][0]))
f.write('\t\t\t<ymax>%s</ymax>\n' % int(
annotation_arr[i][1]['Area']['polygons'][j][2][1] * self.image_height /
annotation_arr[i][1]['Area']['shape'][1]))
else:
f6.write('%d' % annotation_arr[i][1]['Area']['polygons'][j][0][0])
f6.write(' || ')
f6.write('%d' % annotation_arr[i][1]['Area']['polygons'][j][0][1])
f6.write(' || ')
f6.write('%d' % annotation_arr[i][1]['Area']['polygons'][j][2][0])
f6.write(' || ')
f6.write('%d' % annotation_arr[i][1]['Area']['polygons'][j][2][1])
f6.write(' || ')
f6.write('person\n')
f.write('\t\t\t<xmin>%s</xmin>\n' % annotation_arr[i][1]['Area']['polygons'][j][0][0])
f.write('\t\t\t<ymin>%s</ymin>\n' % annotation_arr[i][1]['Area']['polygons'][j][0][1])
f.write('\t\t\t<xmax>%s</xmax>\n' % annotation_arr[i][1]['Area']['polygons'][j][2][0])
f.write('\t\t\t<ymax>%s</ymax>\n' % annotation_arr[i][1]['Area']['polygons'][j][2][1])
f.write('\t\t</bndbox>\n')
f.write('\t</object>\n')
f.write('</annotation>')
f.close()
print('\rWrite dataset : ' + str(i + 1) + ' / ' + str(annotation_arr.shape[0]), end='')
print('')
f1.close()
f2.close()
f3.close()
f4.close()
f5.close()
f6.close()
f7.close()
def produce_darknet_cfg(self):
currentPath = os.getcwd()
with open('../Server_DataBase/train_dataset/darknet_dataset/coco.data', 'a+') as f:
pass
with open('../Server_DataBase/train_dataset/darknet_dataset/coco.data', 'w') as f:
f.write("classes= " + str(len(self.my_labels)) + "\n")
f.write("train = " + currentPath + "/../Server_DataBase/train_dataset/darknet_dataset/Main/train.txt\n")
f.write("valid = coco_testdev\n")
f.write("names = " + currentPath + "/../Server_DataBase/train_dataset/darknet_dataset/coco.names\n")
f.write("backup = " + currentPath + "/../darknet-master/backup\n")
f.write("eval=coco\n")
with open('../Server_DataBase/train_dataset/darknet_dataset/coco.names', 'a+') as f:
pass
with open('../Server_DataBase/train_dataset/darknet_dataset/coco.names', 'w') as f:
for label in self.my_labels:
f.write(label)
with open('../Server_DataBase/train_dataset/darknet_dataset/yolov3.cfg', 'a+') as f:
pass
with open('../Server_DataBase/train_dataset/darknet_dataset/yolov3.cfg', 'w') as f:
with open('../src/Python/backup/yolov3_backup.cfg', 'r') as f_backup:
meet_tag = False
for line in f_backup.readlines():
if meet_tag:
msg = line.split('=')[0]
if msg == 'filters':
f.write('filters=' + str(3 * (len(self.my_labels) + 5)) + '\n')
elif msg == 'classes':
f.write('classes=' + str(len(self.my_labels)) + '\n')
else:
f.write(line)
meet_tag = False
else:
f.write(line)
if line == '#change!\n':
meet_tag = True
def Main_DataChange():
if os.path.exists(dataset_path + '/annotations/'):
shutil.rmtree(dataset_path + '/annotations')
if os.path.exists(dataset_path + '/images/'):
shutil.rmtree(dataset_path + '/images')
if os.path.exists(dataset_path + '/labels/'):
shutil.rmtree(dataset_path + '/labels')
if os.path.exists(dataset_path + '/Main/'):
shutil.rmtree(dataset_path + '/Main')
dataset = {}
my_labels = []
with open('../Server_DataBase/train_dataset/darknet_dataset/my_labels.txt', 'r') as f:
for line in f.readlines():
my_labels.append(line)
datachange = DataChange(dataset_path, dataset_path + '/enhance_dataset/', dataset_path + '/enhance_dataset/', my_labels, True)
datachange.produce_darknet_cfg()
datachange.data_change()
print('Change to VOC dataset succeed!')
wd = getcwd()
for year, image_set in sets:
if not os.path.exists(dataset_path + '/labels/val2014/'):
os.makedirs(dataset_path + '/labels/val2014/')
image_ids = open(dataset_path + '/ImageSets/Main/%s.txt' % (image_set)).read().strip().split()
# list_file = open(dataset_path + '/%s.txt' % (image_set), 'w')
img_num = len(image_ids)
converted_img_num = 0
for image_id in image_ids:
converted_img_num += 1
# list_file.write(dataset_path + '/JPEGImages/%s.jpg\n' % (image_id))
datachange.convert_annotation(year, image_id)
print('\rConvert dataset : ' + str(converted_img_num) + ' / ' + str(img_num), end='')
print('')
# list_file.close()
print('Create json dataset succeed!')
im_path = dataset_path + "/imgs/"
trainimg = dataset_path + "/images/val2014/"
if not os.path.exists(im_path):
os.makedirs(im_path)
if not os.path.exists(trainimg):
os.makedirs(trainimg)
if not os.path.exists(im_path + 'img0/'):
os.makedirs(im_path + 'img0/')
imgs_list = os.listdir(dataset_path + '/JPEGImages/')
copy_img_num = len(imgs_list)
copyed_num = 0
for file in imgs_list:
copyfile(dataset_path + '/JPEGImages/' + file, dataset_path + '/imgs/img0/' + file)
copyed_num += 1
print('\rCopy images : ' + str(copyed_num) + ' / ' + str(copy_img_num), end='')
print('')
xmls_list = os.listdir(dataset_path + '/Annotations_/')
copy_xml_num = len(xmls_list)
copyed_num = 0
for file in xmls_list:
copyfile(dataset_path + '/Annotations_/' + file, dataset_path + '/imgs/img0/' + file)
copyed_num += 1
print('\rCopy xmls : ' + str(copyed_num) + ' / ' + str(copy_xml_num), end='')
print('')
print('Prepare changing to COCO dataset succeed!')
cmax = 0
dirpath = os.listdir(im_path)
for imgdir in dirpath:
f1 = os.listdir(trainimg)
for file in f1:
cmax = max(cmax, int(file.split(".")[0]))
count = 1
total_img_num = 0
file_list = os.listdir(im_path + imgdir)
for file in os.listdir(im_path + imgdir):
if file[-3:] == 'jpg':
total_img_num += 1
for file in file_list:
if file.split(".")[1] == "jpg":
oldname = os.path.join(im_path + imgdir, file)
jpgname = os.path.join(trainimg, file)
shutil.copyfile(oldname, jpgname)
datachange.readxml(dataset, os.path.join(im_path + imgdir, file.split(".")[0] + ".xml"), count + cmax, file)
print('\rChange to COCO dataset : ' + str(count) + ' / ' + str(total_img_num), end='')
count += 1
print('')
print('Change to COCO dataset succeed!')
for i in range(1, 81):
dataset.setdefault("categories", []).append({
'id': i,
'name': 1,
'supercategory': 'No'
})
folder = os.path.join(dataset_path + '/annotations/')
if not os.path.exists(folder):
os.makedirs(folder)
json_name = os.path.join(folder + 'instances_minival2014.json')
with open(json_name, 'w') as f:
js.dump(dataset, f)
shutil.rmtree(dataset_path + '/Annotations_')
shutil.rmtree(dataset_path + '/ImageSets')
shutil.rmtree(dataset_path + '/JPEGImages')
shutil.rmtree(dataset_path + '/imgs')
os.remove(dataset_path + '/ground_truth.txt')
path_pictures = dataset_path + '/images/val2014/'
file_name = os.listdir(path_pictures)
txt_path = dataset_path + '/Main/train.txt'
with open(txt_path, 'w') as f:
for i in file_name:
f.write(path_pictures + i + '\n')
return
if __name__ == "__main__":
Main_DataChange()
```
#### File: Python/LapNet/test.py
```python
import torch
import numpy as np
import time
import os
# import argparse
import cv2
from LapNet import LAPNet
from loss import DiscriminativeLoss
from create_dataset import createDataset
from logger import Logger
from torch.nn import DataParallel
from collections import OrderedDict
from torch.nn.parameter import Parameter
import json
# from flask import Flask, request, Response
# app = Flask(__name__)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
test_mode = True
class LapNet_Test:
def __init__(self):
# torch.cuda.set_device(args.gpu_idx)
torch.cuda.set_device(0)
# self.INPUT_CHANNELS = 3
# self.OUTPUT_CHANNELS = 2
# self.LEARNING_RATE = args.lr #1e-5
# self.BATCH_SIZE = args.batch_size #20
# self.NUM_EPOCHS = args.epoch #100
# self.LOG_INTERVAL = 20
# self.INS_CH = 32
# self.SIZE = [args.img_size[0], args.img_size[1]] #[224, 224]
# self.NUM_WORKERS = args.num_workers #20
self.INPUT_CHANNELS = 3
self.OUTPUT_CHANNELS = 2
self.LEARNING_RATE = 3e-4
self.BATCH_SIZE = 32
self.NUM_EPOCHS = 10000000000000
self.LOG_INTERVAL = 20
self.INS_CH = 32
self.SIZE = [1024,512]
self.NUM_WORKERS = 32
self.root_path = os.getcwd() + '/../src/Python/LapNet'
# dataset_path = args.dataset_path
self.model = LAPNet(input_ch=self.INPUT_CHANNELS, output_ch=self.OUTPUT_CHANNELS,internal_ch = 8).cuda()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.LEARNING_RATE, betas=(0.9, 0.99), amsgrad=True)
self.start_epoch = -1
# chkpt_filename = "LapNet_chkpt_default"+"_GPU"+str(args.gpu_idx)+".pth"
current_file_list = os.listdir(self.root_path + '/trained_model')
current_epoch_num = -1
for file_name in current_file_list:
print("filename:",file_name)
self.output(["filename:",file_name])
if 'LapNet_chkpt_better_epoch' in file_name and "_GPU" + str(0) + ".pth" in file_name:
temp_epoch_num = int(file_name.split('_')[3].split('h')[1])
if temp_epoch_num > current_epoch_num:
current_epoch_num = temp_epoch_num
# chkpt_filename = self.root_path + '/trained_model/'+"LapNet_chkpt_better_epoch" + str(current_epoch_num) + "_GPU" + str(args.gpu_idx) + ".pth"
chkpt_filename = self.root_path + '/trained_model/'+"LapNet_chkpt_better_epoch" + str(current_epoch_num) + "_GPU" + str(0) + ".pth"
if not os.path.exists(self.root_path + '/trained_model'):
os.mkdir(self.root_path + '/trained_model')
if os.path.isfile(chkpt_filename):
checkpoint = torch.load(chkpt_filename)
self.start_epoch = checkpoint['epoch']
print("Found Checkpoint file",chkpt_filename,".")
self.output(["Found Checkpoint file",chkpt_filename,"."])
print("The checkpoint was saved at epoch",checkpoint['epoch'],".")
self.output(["The checkpoint was saved at epoch",str(checkpoint['epoch']),"."])
print("Training stats is reset form epoch",self.start_epoch)
self.output(["Training stats is reset form epoch",str(self.start_epoch)])
# if(args.optimizer_reset != 1):
if(100 != 1):
self.optimizer.load_state_dict(checkpoint['optimizer'])
else:
print("Optimizer State Reset.")
self.output(["Optimizer State Reset."])
self.model.load_state_dict(checkpoint['net'])
self.load_state_dict(self.model, self.state_dict(self.model))
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20,30,400,1000,6000,7000,8000], gamma=0.9)
def output(self, string):
with open(self.root_path + '/temp_output.txt', 'a+') as f:
for str_ in string:
f.write(str_)
f.write('\n')
return
def state_dict(self, model, destination=None, prefix='', keep_vars=False):
own_state = model.module if isinstance(model, torch.nn.DataParallel) \
else model
if destination is None:
destination = OrderedDict()
for name, param in own_state._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.data
for name, buf in own_state._buffers.items():
if buf is not None:
destination[prefix + name] = buf
for name, module in own_state._modules.items():
if module is not None:
self.state_dict(module, destination, prefix + name + '.', keep_vars=keep_vars)
return destination
def load_state_dict(self, model, state_dict, strict=True):
own_state = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) \
else model.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
except Exception:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
def test(self):
self.model.eval()
img_in_path = os.getcwd() + "/../Server_DataBase/message_set/lapnet_eval.jpg"
img_out_path = os.getcwd() + "/../Server_DataBase/message_set/lapnet_eval_out.jpg"
img_send_finished = os.getcwd() + "/../Server_DataBase/message_set/lapnet_eval_finish.txt"
test_stopped = os.getcwd() + "/../Server_DataBase/message_set/lapnet_test_stopped.txt"
if test_mode:
img_list = os.listdir("Z:/public/MaskStation/2c_allImage/MaskSpace/LapNet/train_dataset/")
for img_name in img_list:
img_idx = int(img_name.split(".")[0])
if img_idx < 61:
continue
if ".jpg" in img_name:
img_in_path = "Z:/public/MaskStation/2c_allImage/MaskSpace/LapNet/train_dataset/" + img_name
img_in = cv2.imread(img_in_path)
img_resized = cv2.resize(img_in, (1024, 512))
train_dataset = createDataset(img_in_path, size=self.SIZE)
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=24, pin_memory=True,
shuffle=False, num_workers=0)
img = list(enumerate(train_dataloader))[0][1]
img_tensor = torch.tensor(img).cuda()
# Predictions
sem_pred = self.model(img_tensor)
# sem_pred=torch.floor(sem_pred)
seg_map = torch.squeeze(sem_pred, 0).cpu().detach().numpy()
seg_show = seg_map[1]
(h,w)=seg_show.shape
# result_img1 = np.zeros((h, w, 3))
# for i in range(result_img1.shape[0]):
# for j in range(result_img1.shape[1]):
# if seg_show[i][j] > -2:
# result_img1[i][j] = [255, 255, 255]
_, seg_show2 = cv2.threshold(seg_show + 1, 0, 0, cv2.THRESH_TOZERO)
seg_show2 = cv2.normalize(seg_show2, seg_show2, 0, 1, cv2.NORM_MINMAX)
seg_show2 = cv2.convertScaleAbs(seg_show2, seg_show2, 255)
result_img = cv2.applyColorMap(seg_show2, cv2.COLORMAP_MAGMA)
# cv2.imshow(img_name, img_resized)
# cv2.imshow("result_img1", result_img1)
# cv2.imshow("result_img", result_img)
# cv2.waitKey(0)
if not os.path.exists("Z:/public/MaskStation/2c_allImage/MaskSpace/LapNet/output/"):
os.makedirs("Z:/public/MaskStation/2c_allImage/MaskSpace/LapNet/output/")
cv2.imwrite("Z:/public/MaskStation/2c_allImage/MaskSpace/LapNet/output/" + img_name, result_img)
return
while True:
if os.path.exists(test_stopped):
os.remove(test_stopped)
return
if not os.path.exists(img_send_finished):
continue
os.remove(img_send_finished)
train_dataset = createDataset(img_in_path, size=self.SIZE)
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=24, pin_memory=True,
shuffle=False, num_workers=0)
img = list(enumerate(train_dataloader))[0][1]
img_tensor = torch.tensor(img).cuda()
# Predictions
sem_pred = self.model(img_tensor)
# sem_pred=torch.floor(sem_pred)
seg_map = torch.squeeze(sem_pred, 0).cpu().detach().numpy()
seg_show = seg_map[1]
# (h,w)=seg_show.shape
# result_img1 = np.zeros((h, w))
# for i in range(result_img1.shape[0]):
# for j in range(result_img1.shape[1]):
# if seg_show[i][j] > -4:
# result_img1[i][j] = 255
# cv2.imshow("source output to 2value-map", result_img1)
# cv2.waitKey(0)
_, seg_show2 = cv2.threshold(seg_show + 1, 0, 0, cv2.THRESH_TOZERO)
seg_show2 = cv2.normalize(seg_show2, seg_show2, 0, 1, cv2.NORM_MINMAX)
seg_show2 = cv2.convertScaleAbs(seg_show2, seg_show2, 255)
result_img = cv2.applyColorMap(seg_show2, cv2.COLORMAP_MAGMA)
cv2.imwrite(img_out_path, result_img)
os.remove(img_in_path)
def predict(self, image_path_input):
image_path_json = json.loads(image_path_input)
image_path = image_path_json["ImagePath"]
if ".jpg" not in image_path:
return image_path
self.model.eval()
img_out_path = image_path.split(".jpg")[0] + "_out.jpg"
train_dataset = createDataset(image_path, size=self.SIZE)
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=24, pin_memory=True,
shuffle=False, num_workers=0)
img = list(enumerate(train_dataloader))[0][1]
img_tensor = torch.tensor(img).cuda()
sem_pred = self.model(img_tensor)
seg_map = torch.squeeze(sem_pred, 0).cpu().detach().numpy()
seg_show = seg_map[1]
_, seg_show2 = cv2.threshold(seg_show + 1, 0, 0, cv2.THRESH_TOZERO)
seg_show2 = cv2.normalize(seg_show2, seg_show2, 0, 1, cv2.NORM_MINMAX)
seg_show2 = cv2.convertScaleAbs(seg_show2, seg_show2, 255)
result_img = cv2.applyColorMap(seg_show2, cv2.COLORMAP_MAGMA)
cv2.imwrite(img_out_path, result_img)
image_path_output = {}
image_path_output["OutputImagePath"] = img_out_path
return image_path_output
# if test_mode:
# os.chdir("D:/chLi/Project/ABACI/FilterSocket/bin_win/")
# with open(os.getcwd() + '/../src/Python/LapNet/temp_train_mode.txt', 'w') as f:
# f.write('True')
# print('in ok')
# lapnet_test = LapNet_Test()
# lapnet_test.model.eval()
# print('init ok')
# @app.route("/predict", methods=["POST"])
# def predict():
# data= request.get_data()
# data_json = json.loads(data)
# image_path = data_json["ImagePath"]
# if ".jpg" not in image_path:
# return image_path
# img_out_path = image_path.split(".jpg")[0] + "_out.jpg"
# train_dataset = createDataset(image_path, size=lapnet_test.SIZE)
# train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=24, pin_memory=True,
# shuffle=False, num_workers=0)
# img = list(enumerate(train_dataloader))[0][1]
# img_tensor = torch.tensor(img).cuda()
# sem_pred = lapnet_test.model(img_tensor)
# seg_map = torch.squeeze(sem_pred, 0).cpu().detach().numpy()
# seg_show = seg_map[1]
# _, seg_show2 = cv2.threshold(seg_show + 1, 0, 0, cv2.THRESH_TOZERO)
# seg_show2 = cv2.normalize(seg_show2, seg_show2, 0, 1, cv2.NORM_MINMAX)
# seg_show2 = cv2.convertScaleAbs(seg_show2, seg_show2, 255)
# result_img = cv2.applyColorMap(seg_show2, cv2.COLORMAP_MAGMA)
# cv2.imwrite(img_out_path, result_img)
# image_path_output = {}
# image_path_output["OutputImagePath"] = img_out_path
# return image_path_output
def Main_LapNet_Test():
# logger = Logger('./logslite'+str(args.gpu_idx))
# parser = argparse.ArgumentParser(description="Train model")
# parser.add_argument('--dataset-path', default='Dataset/train_dataset')
# parser.add_argument('--lr', type=float, default=3e-4, help='learning rate')
# parser.add_argument('--batch-size', type=int, default=32, help='batch size')
# parser.add_argument('--img-size', type=int, nargs='+', default=[1024,512], help='image resolution: [width height]')
# parser.add_argument('--epoch', type=int, default=10000000000000)
# parser.add_argument('--gpu-idx',type = int,default= 0, help='using gpu(idx)')
# parser.add_argument('--optimizer-reset', type=int, default=100)
# parser.add_argument('--num-workers', type=int, default=1)
# args = parser.parse_args()
# if os.getcwd()[-6:] != "LapNet":
# os.chdir(os.getcwd() + '/../src/Python/LapNet')
if test_mode:
os.chdir("D:/chLi/Project/ABACI/FilterSocket/bin_win/")
with open(os.getcwd() + '/../src/Python/LapNet/temp_train_mode.txt', 'w') as f:
f.write('True')
print('in ok')
lapnet_test = LapNet_Test()
print('init ok')
# lapnet_test.predict("Z:/public/MaskStation/2c_allImage/MaskSpace/LapNet/train_line/135.jpg")
# return
lapnet_test.test()
print('test ok')
return
if __name__ == "__main__":
Main_LapNet_Test()
# app.run(host="0.0.0.0", port="9527",debug=True)
```
#### File: ResNet/resnet-test/model.py
```python
import torch.nn as nn
import torch.nn.functional as F
class ResNet34(nn.Module):
def __init__(self,model,num_classes):
super(ResNet34,self).__init__()
self.conv1= model.conv1
self.bn1 = model.bn1
self.relu = model.relu
self.maxpool= model.maxpool
self.layer1 = model.layer1
self.layer2 = model.layer2
self.layer3 = model.layer3
self.layer4 = model.layer4
self.fc = nn.Linear(512,num_classes)
def forward(self,x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.avg_pool2d(x, kernel_size=x.shape[2])
x = x.view(x.size(0),-1)
x = self.fc(x)
return x
```
#### File: ResNet/resnet-test/run_training.py
```python
import requests
import json
from trainmodel import Learning
import configparser
import datetime
def update_config(param, model_name):
config = configparser.ConfigParser()
config.read('./configs/default.ini')
configs = config["default"]
new_configs = configparser.ConfigParser()
new_configs["default"] = configs
new_configs["appointed"] = param
with open('./configs/{}.ini'.format(model_name), 'w') as f:
new_configs.write(f)
for key, value in param.items():
#print("key,value",key,value)
configs[key] = value
return configs
def training(configs, model_name, annotation_file):
aoi = Learning(configs, model_name, annotation_file)
num_epochs = int(configs["num_epochs"])
best_val_acc = 0.0
for epoch in range(num_epochs):
train_loss, train_acc = aoi.train()
valid_loss, valid_acc, valid_conf_maxtrix, conf_maxtrix_detail, set_info = aoi.valid()
# response
result_json={
"time":datetime.datetime.now().strftime('%Y/%m/%d-%H:%M:%S'),
"train_loss":{"epoch":epoch, "loss":train_loss},
"train_accuracy":{"epoch":epoch, "accuracy":train_acc},
"validation_loss":{"epoch":epoch, "loss":valid_loss},
"validation_accuracy":{"epoch":epoch, "accuracy":valid_acc}
}
print(result_json)
if valid_acc >= best_val_acc:
best_val_acc = valid_acc
best_valid_loss, best_valid_acc, best_valid_conf_maxtrix, best_valid_conf_maxtrix_detail, valid_set_info = valid_loss, valid_acc, valid_conf_maxtrix, conf_maxtrix_detail, set_info
best_test_loss, best_test_acc, best_test_conf_maxtrix, best_test_conf_maxtrix_detail, test_set_info= aoi.test()
# response
result={
"time":datetime.datetime.now().strftime('%Y/%m/%d-%H:%M:%S'),
"success":True,
"confusion_matrix_validation":best_valid_conf_maxtrix.tolist(),
"confusion_matrix_test":best_test_conf_maxtrix.tolist(),
"confusion_matrix_validation_detail":best_valid_conf_maxtrix_detail,
"confusion_matrix_yesy_detail":best_test_conf_maxtrix_detail,
"validation_set_info":valid_set_info,
"test_set_info":test_set_info
}
print(result)
#result_json=json.dumps(result)
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstabce(obj,'int64'):
return str(obj)
else:
return super(NpEncoder, self).default(obj)
if __name__ == "__main__":
model_name="SandResNet"
param={}
# param["num_epochs"]=100
f=open("./train.json",'r')
annotation_file = json.loads(f.read())
print(annotation_file)
# configs = update_config(param,model_name)
# training(configs, model_name, annotation_file)
```
#### File: Python/ResNet/resnet_train_2.py
```python
import os
import torch.utils.data as data
import torch
import torch.optim as optim
import torch.nn as nn
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
from PIL import Image
import time
import copy
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def default_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
class CustomImageLoader(data.Dataset): # 定义自己的数据类
##自定义类型数据输入
def __init__(self, RootImageFolderPath, dataset = '', data_transforms=None, loader = default_loader):
im_list = []
im_labels = []
if RootImageFolderPath[-1] != "/":
RootImageFolderPath += "/"
ImageFolderPath_list = os.listdir(RootImageFolderPath)
for ImageFolderPath in ImageFolderPath_list:
currentImageFolderPath = RootImageFolderPath + ImageFolderPath
if os.path.isdir(currentImageFolderPath):
ImagePath_list = os.listdir(currentImageFolderPath)
for ImagePath in ImagePath_list:
if "train" in ImagePath:
continue
if ImagePath.split(".")[1] == "jpg":
currentImagePath = currentImageFolderPath + "/" + ImagePath
im_list.append(currentImagePath)
im_labels.append(int(ImageFolderPath))
self.imgs = im_list
self.labels = im_labels
self.data_tranforms = data_transforms
self.loader = loader
self.dataset = dataset
def __len__(self):
return len(self.imgs)
def __getitem__(self, item):
img_name = self.imgs[item]
label = self.labels[item]
img = self.loader(img_name)
if self.data_tranforms is not None:
try:
img = self.data_tranforms[self.dataset](img)
except:
print("Cannot transform image: {}".format(img_name))
return img, label
data_tranforms={
'Train':transforms.Compose([
# transforms.RandomResizedCrop(224), # 随机裁剪为不同的大小和宽高比,缩放所为制定的大小
transforms.RandomHorizontalFlip(0.5),
transforms.RandomVerticalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225]) # 各通道颜色的均值和方差,用于归一化
]),
'Test':transforms.Compose([
# transforms.Resize(256), # 变换大小
# transforms.CenterCrop(224), # 中心裁剪
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
}
def train_model(model, crtiation, optimizer, schedular, num_epochs, batch_size, device):
image_datasets = {x : CustomImageLoader(os.getcwd() + "/resnet-test/sand-pic/", # 默认目录为根目录,配搭文件中使用全路径
data_transforms=data_tranforms,
dataset=x) for x in ['Train', 'Test']
}
dataloders = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=batch_size,
shuffle=True) for x in ['Train', 'Test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['Train', 'Test']} # 数据大小
begin_time = time.time()
best_weights = copy.deepcopy(model.state_dict())#copy the weights from the model
best_acc = 0.0
arr_acc = [] # 用于作图
for epoch in range(num_epochs):
print("-*-" * 20)
item_acc = []
for phase in ['Train', 'Test']:
if phase=='Train':
schedular.step()
model.train()
else:
model.eval()
running_loss = 0.0
running_acc = 0.0
for images, labels in dataloders[phase]:
images.to(device)
labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase=='Train'):
opt = model(images.cuda())
# opt = model(images)
_,pred = torch.max(opt,1)
labels = labels.cuda()
loss = crtiation(opt, labels)
if phase=='Train':
loss.backward()
optimizer.step()
running_loss += loss.item()*images.size(0)
running_acc += torch.sum(pred==labels)
epoch_loss = running_loss/dataset_sizes[phase]
epoch_acc = running_acc.double()/dataset_sizes[phase]
print('epoch={}, Phase={}, Loss={:.4f}, ACC:{:.4f}'.format(epoch, phase,
epoch_loss, epoch_acc))
item_acc.append(epoch_acc)
if phase == 'Test' and epoch_acc>best_acc:
# Upgrade the weights
best_acc=epoch_acc
best_weights = copy.deepcopy(model.state_dict())
torch.save(model.state_dict(), os.getcwd() + "/resnet-test/models/chLi-ResNet-best-model.pkl")
arr_acc.append(item_acc)
time_elapes = time.time() - begin_time
time_elapes // 60, time_elapes % 60
print('Best Val ACC: {:}'.format(best_acc))
model.load_state_dict(best_weights) # 保存最好的参数
return model,arr_acc
if __name__ == '__main__':
device = torch.device('cuda:0') # 默认使用 GPU
NUMCLASS = 7 # 类别数
model_ft = models.resnet50(pretrained=False)
num_fits = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_fits, NUMCLASS) # 替换最后一个全连接层
model_ft = model_ft.to(device)
model_ft.cuda()
# try:
# model_ft.load_state_dict(torch.load(os.getcwd() + "/resnet-test/models/chLi-ResNet-best-model.pkl"))
# except:
# print("load model failed.")
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=10, gamma=0.1)
model_ft,arr_acc = train_model(model=model_ft, \
crtiation=criterion, \
optimizer=optimizer_ft, \
schedular=exp_lr_scheduler, \
num_epochs=1000, \
batch_size=1, \
device=device)
# ll = np.array(arr_acc)
# plt.plot(ll[:,0])
# plt.plot(ll[:,1])
# plt.show()
``` |
{
"source": "565353780/gr-net",
"score": 2
} |
#### File: GRNetDetector/core/train.py
```python
import logging
import os
import torch
import GRNetDetector.utils.data_loaders
import GRNetDetector.utils.helpers
from datetime import datetime
from time import time
from tensorboardX import SummaryWriter
from GRNetDetector.core.test import test_net
from GRNetDetector.extensions.chamfer_dist import ChamferDistance
from GRNetDetector.extensions.gridding_loss import GriddingLoss
from GRNetDetector.models.grnet import GRNet
from GRNetDetector.utils.average_meter import AverageMeter
from GRNetDetector.utils.metrics import Metrics
def train_net(cfg):
# Enable the inbuilt cudnn auto-tuner to find the best algorithm to use
torch.backends.cudnn.benchmark = True
# Set up data loader
train_dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TRAIN_DATASET](cfg)
test_dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg)
train_data_loader = torch.utils.data.DataLoader(dataset=train_dataset_loader.get_dataset(
utils.data_loaders.DatasetSubset.TRAIN),
batch_size=cfg.TRAIN.BATCH_SIZE,
num_workers=cfg.CONST.NUM_WORKERS,
collate_fn=utils.data_loaders.collate_fn,
pin_memory=True,
shuffle=True,
drop_last=True)
val_data_loader = torch.utils.data.DataLoader(dataset=test_dataset_loader.get_dataset(
utils.data_loaders.DatasetSubset.VAL),
batch_size=1,
num_workers=cfg.CONST.NUM_WORKERS,
collate_fn=utils.data_loaders.collate_fn,
pin_memory=True,
shuffle=False)
# Set up folders for logs and checkpoints
output_dir = os.path.join(cfg.DIR.OUT_PATH, '%s', datetime.now().isoformat())
cfg.DIR.CHECKPOINTS = output_dir % 'checkpoints'
cfg.DIR.LOGS = output_dir % 'logs'
if not os.path.exists(cfg.DIR.CHECKPOINTS):
os.makedirs(cfg.DIR.CHECKPOINTS)
# Create tensorboard writers
train_writer = SummaryWriter(os.path.join(cfg.DIR.LOGS, 'train'))
val_writer = SummaryWriter(os.path.join(cfg.DIR.LOGS, 'test'))
# Create the networks
grnet = GRNet(cfg)
grnet.apply(utils.helpers.init_weights)
logging.debug('Parameters in GRNet: %d.' % utils.helpers.count_parameters(grnet))
# Move the network to GPU if possible
if torch.cuda.is_available():
grnet = torch.nn.DataParallel(grnet).cuda()
# Create the optimizers
grnet_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, grnet.parameters()),
lr=cfg.TRAIN.LEARNING_RATE,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
betas=cfg.TRAIN.BETAS)
grnet_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(grnet_optimizer,
milestones=cfg.TRAIN.LR_MILESTONES,
gamma=cfg.TRAIN.GAMMA)
# Set up loss functions
chamfer_dist = ChamferDistance()
gridding_loss = GriddingLoss( # lgtm [py/unused-local-variable]
scales=cfg.NETWORK.GRIDDING_LOSS_SCALES,
alphas=cfg.NETWORK.GRIDDING_LOSS_ALPHAS)
# Load pretrained model if exists
init_epoch = 0
best_metrics = None
if 'WEIGHTS' in cfg.CONST:
logging.info('Recovering from %s ...' % (cfg.CONST.WEIGHTS))
checkpoint = torch.load(cfg.CONST.WEIGHTS)
best_metrics = Metrics(cfg.TEST.METRIC_NAME, checkpoint['best_metrics'])
grnet.load_state_dict(checkpoint['grnet'])
logging.info('Recover complete. Current epoch = #%d; best metrics = %s.' % (init_epoch, best_metrics))
# Training/Testing the network
for epoch_idx in range(init_epoch + 1, cfg.TRAIN.N_EPOCHS + 1):
epoch_start_time = time()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter(['SparseLoss', 'DenseLoss'])
grnet.train()
batch_end_time = time()
n_batches = len(train_data_loader)
for batch_idx, (taxonomy_ids, model_ids, data) in enumerate(train_data_loader):
data_time.update(time() - batch_end_time)
for k, v in data.items():
data[k] = utils.helpers.var_or_cuda(v)
sparse_ptcloud, dense_ptcloud = grnet(data)
sparse_loss = chamfer_dist(sparse_ptcloud, data['gtcloud'])
dense_loss = chamfer_dist(dense_ptcloud, data['gtcloud'])
_loss = sparse_loss + dense_loss
losses.update([sparse_loss.item() * 1000, dense_loss.item() * 1000])
grnet.zero_grad()
_loss.backward()
grnet_optimizer.step()
n_itr = (epoch_idx - 1) * n_batches + batch_idx
train_writer.add_scalar('Loss/Batch/Sparse', sparse_loss.item() * 1000, n_itr)
train_writer.add_scalar('Loss/Batch/Dense', dense_loss.item() * 1000, n_itr)
batch_time.update(time() - batch_end_time)
batch_end_time = time()
logging.info('[Epoch %d/%d][Batch %d/%d] BatchTime = %.3f (s) DataTime = %.3f (s) Losses = %s' %
(epoch_idx, cfg.TRAIN.N_EPOCHS, batch_idx + 1, n_batches, batch_time.val(), data_time.val(),
['%.4f' % l for l in losses.val()]))
grnet_lr_scheduler.step()
epoch_end_time = time()
train_writer.add_scalar('Loss/Epoch/Sparse', losses.avg(0), epoch_idx)
train_writer.add_scalar('Loss/Epoch/Dense', losses.avg(1), epoch_idx)
logging.info(
'[Epoch %d/%d] EpochTime = %.3f (s) Losses = %s' %
(epoch_idx, cfg.TRAIN.N_EPOCHS, epoch_end_time - epoch_start_time, ['%.4f' % l for l in losses.avg()]))
# Validate the current model
metrics = test_net(cfg, epoch_idx, val_data_loader, val_writer, grnet)
# Save ckeckpoints
if epoch_idx % cfg.TRAIN.SAVE_FREQ == 0 or metrics.better_than(best_metrics):
file_name = 'ckpt-best.pth' if metrics.better_than(best_metrics) else 'ckpt-epoch-%03d.pth' % epoch_idx
output_path = os.path.join(cfg.DIR.CHECKPOINTS, file_name)
torch.save({
'epoch_index': epoch_idx,
'best_metrics': metrics.state_dict(),
'grnet': grnet.state_dict()
}, output_path) # yapf: disable
logging.info('Saved checkpoint to %s ...' % output_path)
if metrics.better_than(best_metrics):
best_metrics = metrics
train_writer.close()
val_writer.close()
``` |
{
"source": "565353780/jittor-subdivnet",
"score": 3
} |
#### File: jittor-subdivnet/maps/geometry.py
```python
from typing import List
import numpy as np
import triangle as tr
def to_barycentric(points, triangle):
"""
compute barycentric coordinates (u, v, w) for points w.r.t. triangle
"""
points = np.array(points)
triangle = np.array(triangle)
if triangle.shape[1] == 3:
a, b, c = np.linalg.solve(triangle.T, points)
elif triangle.shape[1] == 2:
A = np.vstack([triangle[1] - triangle[0], triangle[2] - triangle[0]])
b, c = np.linalg.solve(A.T, points - triangle[0])
a = 1 - b - c
else:
raise Exception("Invalid")
eps = 1e-5
return np.array([a, b, c])
def from_barycenteric(attr, bary):
"""
attr: [3, N] or [3,]
bary: [3]
"""
attr = np.array(attr)
bary = np.array(bary)
if len(attr.shape) == 1:
return (attr * bary).sum()
elif len(attr.shape) == 2:
return (attr * bary[:, None]).sum(axis=0)
else:
raise Exception("Invalid")
def CDT(vids, vertices):
V = vertices.shape[0]
ring = [(i, (i + 1) % V) for i in range(V)]
data = {"vertices": vertices, "segments": ring}
result = tr.triangulate(data, "pe")
new_edges = [
(k, v) for k, v in result["edges"] if not (k, v) in ring and not (v, k) in ring
]
new_faces = np.vectorize(lambda x: vids[x], otypes=[int])(result["triangles"])
new_edges = np.vectorize(lambda x: vids[x], otypes=[int])(new_edges)
return new_faces, new_edges
def MVT(v, neighbors):
edges = set()
for i in range(len(neighbors)):
j = i + 1 if i + 1 < len(neighbors) else 0
edges.add((neighbors[i], neighbors[j]))
edges.add((neighbors[j], neighbors[i]))
new_faces = []
new_edges = set()
for i, u in enumerate(neighbors):
j = i + 1 if i + 1 < len(neighbors) else 0
w = neighbors[j]
if u == v or w == v:
continue
new_faces.append([v, u, w])
if not (v, u) in edges:
new_edges.add((v, u))
if not (v, w) in edges:
new_edges.add((v, w))
new_faces = np.array(new_faces)
new_edges = np.array(list(new_edges))
return new_faces, new_edges
def one_ring_neighbor_uv(
neighbors: List[int],
vertices: np.ndarray,
i: int,
return_angle=False,
return_alpha=False,
):
neighbors_p = neighbors
neighbors_s = np.roll(neighbors_p, -1)
vertices_p = vertices[neighbors_p]
vertices_s = vertices[neighbors_s]
direct_p = vertices_p - vertices[i]
direct_s = vertices_s - vertices[i]
length_p = np.sqrt((direct_p ** 2).sum(axis=1))
length_s = np.sqrt((direct_s ** 2).sum(axis=1))
direct_p = direct_p / length_p[:, np.newaxis]
direct_s = direct_s / length_s[:, np.newaxis]
angle_v = np.arccos((direct_p * direct_s).sum(axis=1))
alpha = angle_v.sum()
A = 2 * np.pi / alpha
angle_v[1:] = np.cumsum(angle_v)[:-1]
angle_v[0] = 0
angle_v = angle_v * A
u = np.power(length_p, A) * np.cos(angle_v)
v = np.power(length_p, A) * np.sin(angle_v)
uv = np.vstack([u, v]).transpose()
if np.isnan(uv).any():
raise Exception('Found NAN')
ret = (uv,)
if return_angle:
ret += (angle_v,)
if return_alpha:
ret += (alpha,)
if len(ret) == 1:
ret = ret[0]
return ret
def plane_from_points(points):
v1 = points[1] - points[0]
v2 = points[2] - points[0]
cp = np.cross(v1, v2)
d = -np.dot(cp, points[2])
l = np.linalg.norm(cp)
cp /= l
d /= l
a, b, c = cp
return np.array([a, b, c, d])
def vector_angle(A, B):
return np.arccos(np.dot(A, B) / np.linalg.norm(A) / np.linalg.norm(B))
def triangle_angles(triangle):
'''
triangle: (3, 3)
'''
a = vector_angle(triangle[1] - triangle[0], triangle[2] - triangle[0])
b = vector_angle(triangle[2] - triangle[1], triangle[0] - triangle[1])
c = np.pi - a - b
return np.array([a, b, c])
def min_triangle_angles(triangle):
return triangle_angles(triangle).min()
def face_areas(verts, faces):
areas = []
for face in faces:
t = np.cross(verts[face[1]] - verts[face[0]],
verts[face[2]] - verts[face[0]])
areas.append(np.linalg.norm(t) / 2)
return np.array(areas)
``` |
{
"source": "565353780/lapnet-detect",
"score": 2
} |
#### File: lapnet-detect/src/detect.py
```python
import torch
from torch.nn.parameter import Parameter
import numpy as np
import os
import cv2
from collections import OrderedDict
from time import time
from LapNet import LAPNet
from create_dataset import createDataset
class LapNet_Test:
def __init__(self, model_name):
# torch.cuda.set_device(args.gpu_idx)
torch.cuda.set_device(0)
# self.INPUT_CHANNELS = 3
# self.OUTPUT_CHANNELS = 2
# self.LEARNING_RATE = args.lr #1e-5
# self.BATCH_SIZE = args.batch_size #20
# self.NUM_EPOCHS = args.epoch #100
# self.LOG_INTERVAL = 20
# self.INS_CH = 32
# self.SIZE = [args.img_size[0], args.img_size[1]] #[224, 224]
# self.NUM_WORKERS = args.num_workers #20
self.INPUT_CHANNELS = 3
self.OUTPUT_CHANNELS = 2
self.LEARNING_RATE = 3e-4
self.BATCH_SIZE = 32
self.NUM_EPOCHS = 10000000000000
self.LOG_INTERVAL = 20
self.INS_CH = 32
self.SIZE = [1024,512]
self.NUM_WORKERS = 32
self.model_name = model_name
self.root_path = '../'
self.model = LAPNet(input_ch=self.INPUT_CHANNELS, output_ch=self.OUTPUT_CHANNELS,internal_ch = 8).cuda()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.LEARNING_RATE, betas=(0.9, 0.99), amsgrad=True)
chkpt_filename = self.root_path + 'model/' + self.model_name
if not os.path.exists(self.root_path + 'model/'):
os.mkdir(self.root_path + 'model/')
if os.path.isfile(chkpt_filename):
checkpoint = torch.load(chkpt_filename)
self.start_epoch = checkpoint['epoch']
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.model.load_state_dict(checkpoint['net'])
self.load_state_dict(self.model, self.state_dict(self.model))
def state_dict(self, model, destination=None, prefix='', keep_vars=False):
own_state = model.module if isinstance(model, torch.nn.DataParallel) \
else model
if destination is None:
destination = OrderedDict()
for name, param in own_state._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.data
for name, buf in own_state._buffers.items():
if buf is not None:
destination[prefix + name] = buf
for name, module in own_state._modules.items():
if module is not None:
self.state_dict(module, destination, prefix + name + '.', keep_vars=keep_vars)
return destination
def load_state_dict(self, model, state_dict, strict=True):
own_state = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) \
else model.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
except Exception:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
def predict(self, image):
train_dataset = createDataset("", size=lapnet_test.SIZE, image=image)
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=24, pin_memory=True,
shuffle=False, num_workers=0)
img = list(enumerate(train_dataloader))[0][1]
img_tensor = torch.tensor(img).cuda()
sem_pred = lapnet_test.model(img_tensor)
seg_map = torch.squeeze(sem_pred, 0).cpu().detach().numpy()
seg_show = seg_map[1]
_, seg_show2 = cv2.threshold(seg_show + 1, 0, 0, cv2.THRESH_TOZERO)
seg_show2 = cv2.normalize(seg_show2, seg_show2, 0, 1, cv2.NORM_MINMAX)
seg_show2 = cv2.convertScaleAbs(seg_show2, seg_show2, 255)
result_img = cv2.applyColorMap(seg_show2, cv2.COLORMAP_MAGMA)
return result_img
def predict_with_path(self, image_path):
image = cv2.imread(image_path)
return self.predict(image)
def predict_video(self, video_path, show_video, save_video):
cap = cv2.VideoCapture(video_path)
video_appendix = video_path.split(".")[-1]
video_base_name = video_path.split("." + video_appendix)[0]
if save_video:
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter(video_base_name + "_output." + video_appendix, fourcc, cap.get(5), (1024, 1024))
frame_num = int(cap.get(7))
solved_num = 0
start_time = time()
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
source = cv2.resize(frame, (1024, 512))
edge = lapnet_test.predict(source)
merge = np.vstack([source, edge])
if save_video:
out.write(merge)
solved_num += 1
if show_video:
cv2.imshow("merge", merge)
k = cv2.waitKey(20)
if k & 0xff == ord("q"):
break
fps = 1.0 * solved_num / (time() - start_time)
time_needed = int(1.0 * (frame_num - solved_num) / fps)
minite_needed = int(time_needed / 60)
second_needed = time_needed % 60
print("\rProcess :", solved_num, "/", frame_num, "\tFPS =", int(fps), "\tTime needed :", minite_needed, "min", second_needed, "s ", end="")
print()
cap.release()
if save_video:
out.release()
cv2.destroyAllWindows()
def save_full_model(self, save_name):
example = torch.rand(1, 3, self.SIZE[1], self.SIZE[0]).cuda()
traced_script_moodule = torch.jit.trace(self.model, example)
traced_script_moodule.save(self.root_path + 'model/' + save_name)
if __name__ == "__main__":
model_name = "LapNet_chkpt_better_epoch6767_GPU0_HED_detect.pth"
video_path = "NVR_ch2_main_20201111164000_20201111170000.avi"
save_name = "LapNet_Edge_Detect.pt"
show_video = False
save_video = False
save_full_model = False
lapnet_test = LapNet_Test(model_name)
lapnet_test.model.eval()
if save_full_model:
lapnet_test.save_full_model(save_name)
else:
lapnet_test.predict_video(video_path, show_video, save_video)
``` |
{
"source": "565353780/mmsegmentation-segformer",
"score": 2
} |
#### File: 565353780/mmsegmentation-segformer/MMSegmentationTrainer.py
```python
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv.runner import init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from mmseg import __version__
from mmseg.apis import set_random_seed, train_segmentor
from mmseg.datasets import build_dataset
from mmseg.models import build_segmentor
from mmseg.utils import collect_env, get_root_logger
import torch.distributed as dist
class MMSegmentationTrainer:
def __init__(self):
self.reset()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(self.local_rank)
if 'MASTER_ADDR' not in os.environ:
os.environ['MASTER_ADDR'] = "192.168.1.15"
if 'MASTER_PORT' not in os.environ:
os.environ['MASTER_PORT'] = "5678"
dist.init_process_group('gloo', init_method='env://', rank=0, world_size=1)
def reset(self):
self.config = None
self.checkpoint = None
self.work_dir = None
self.model = None
self.model_ready = False
self.time_start = None
self.total_time_sum = 0
self.detected_num = 0
self.local_rank = 0
self.gpu_ids = range(1)
self.distributed = False
self.launcher = False
self.cfg = None
self.seed = None
self.meta = None
self.deterministic = False
self.timestamp = None
self.logger = None
self.env_info = None
self.datasets = None
self.no_validate = False
def resetTimer(self):
self.time_start = None
self.total_time_sum = 0
self.detected_num = 0
def startTimer(self):
self.time_start = time.time()
def endTimer(self, save_time=True):
time_end = time.time()
if not save_time:
return
if self.time_start is None:
print("startTimer must run first!")
return
if time_end > self.time_start:
self.total_time_sum += time_end - self.time_start
self.detected_num += 1
else:
print("Time end must > time start!")
def getAverageTime(self):
if self.detected_num == 0:
return -1
return 1.0 * self.total_time_sum / self.detected_num
def getAverageFPS(self):
if self.detected_num == 0:
return -1
return int(1.0 * self.detected_num / self.total_time_sum)
def setConfig(self, config):
self.config = config
self.cfg = Config.fromfile(self.config)
if self.cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
return
def setCheckPoint(self, checkpoint):
self.checkpoint = checkpoint
if self.checkpoint is not None:
self.cfg.resume_from = self.checkpoint
return
def setWorkDir(self, work_dir):
if work_dir is not None:
self.work_dir = work_dir
else:
self.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(self.config))[0])
self.cfg.work_dir = self.work_dir
return
def setSeed(self, seed):
self.seed = seed
if self.seed is not None:
logger.info(f'Set random seed to {self.seed}, deterministic: '
f'{self.deterministic}')
set_random_seed(self.seed, deterministic=self.deterministic)
self.cfg.seed = self.seed
return
def setEnv(self):
self.cfg.gpu_ids = self.gpu_ids
mmcv.mkdir_or_exist(osp.abspath(self.work_dir))
self.cfg.dump(osp.join(self.work_dir, osp.basename(self.config)))
self.timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(self.work_dir, f'{self.timestamp}.log')
self.logger = get_root_logger(log_file=log_file, log_level=self.cfg.log_level)
env_info_dict = collect_env()
self.env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
self.logger.info('Environment info:\n' + dash_line + self.env_info + '\n' +
dash_line)
return
def setMeta(self):
self.meta = dict()
self.meta['env_info'] = self.env_info
self.meta['seed'] = self.seed
self.meta['exp_name'] = osp.basename(self.config)
return
def initEnv(self, work_dir=None, seed=None):
self.setWorkDir(work_dir)
self.setSeed(seed)
self.setEnv()
self.setMeta()
self.logger.info(f'Distributed training: {self.distributed}')
self.logger.info(f'Config:\n{self.cfg.pretty_text}')
def loadModel(self, config, checkpoint):
self.setConfig(config)
self.setCheckPoint(checkpoint)
self.initEnv()
self.model = build_segmentor(
self.cfg.model,
train_cfg=self.cfg.get('train_cfg'),
test_cfg=self.cfg.get('test_cfg'))
self.logger.info(self.model)
return
def loadDatasets(self):
self.datasets = [build_dataset(self.cfg.data.train)]
if len(self.cfg.workflow) == 2:
val_dataset = copy.deepcopy(self.cfg.data.val)
val_dataset.pipeline = self.cfg.data.train.pipeline
self.datasets.append(build_dataset(val_dataset))
if self.cfg.checkpoint_config is not None:
self.cfg.checkpoint_config.meta = dict(
mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
config=self.cfg.pretty_text,
CLASSES=self.datasets[0].CLASSES,
PALETTE=self.datasets[0].PALETTE)
self.model.CLASSES = self.datasets[0].CLASSES
return
def train(self):
train_segmentor(
self.model,
self.datasets,
self.cfg,
distributed=self.distributed,
validate=(not self.no_validate),
timestamp=self.timestamp,
meta=self.meta)
return
if __name__ == "__main__":
config = "../SegFormer/local_configs/segformer/B5/segformer.b5.640x640.ade.160k.py"
checkpoint = None
mm_segmentation_trainer = MMSegmentationTrainer()
mm_segmentation_trainer.loadModel(config, checkpoint)
mm_segmentation_trainer.loadDatasets()
# for spending less GPU memory
mm_segmentation_trainer.cfg.data['samples_per_gpu'] = 1
mm_segmentation_trainer.train()
``` |
{
"source": "565353780/pointcloud-manage",
"score": 2
} |
#### File: pointcloud-manage/PointCloudClass/channel_pointcloud.py
```python
import os
from scipy.spatial.kdtree import KDTree
from tqdm import tqdm
from PointCloudClass.channel_point import ChannelPoint
class ChannelPointCloud(object):
def __init__(self):
self.point_list = []
self.kd_tree = None
self.xyz_changed = True
self.save_ignore_channel_name_list = ["r", "g", "b"]
return
def reset(self):
self.point_list.clear()
self.kd_tree = None
self.xyz_changed = True
return True
def getChannelValueList(self, channel_name):
channel_value_list = []
for point in self.point_list:
channel_value_list.append(point.getChannelValue(channel_name))
return channel_value_list
def getChannelListValueList(self, channel_name_list):
channel_list_value_list = []
for point in self.point_list:
channel_value_list = []
for channel_name in channel_name_list:
channel_value_list.append(point.getChannelValue(channel_name))
channel_list_value_list.append(channel_value_list)
return channel_list_value_list
def updateKDTree(self):
if not self.xyz_changed:
return True
self.kd_tree = None
xyz_list = self.getChannelListValueList(["x", "y", "z"])
if len(xyz_list) == 0:
return False
if None in xyz_list[0]:
return False
self.kd_tree = KDTree(xyz_list)
self.xyz_changed = False
return True
def loadData(self, pointcloud_file_path):
if not os.path.exists(pointcloud_file_path):
print("[ERROR][ChannelPointCloud::loadData]")
print("\t file not exist!")
return False
self.reset()
print("[INFO][ChannelPointCloud::loadData]")
print("\t start load pointcloud :")
print("\t pointcloud_file_path = " + pointcloud_file_path)
channel_name_list = []
lines = []
with open(pointcloud_file_path, "r") as f:
lines = f.readlines()
point_num = -1
loaded_point_num = 0
find_start_line = False
for line in tqdm(lines):
# load fields
if "property" in line:
channel_name = line.split("\n")[0].split(" ")[2]
channel_name_list.append(channel_name)
continue
if "FIELDS" in line:
channel_name_list = line.split("\n")[0].split("FIELDS ")[1].split(" ")
continue
# load point_num
if "element vertex" in line:
point_num = int(line.split("\n")[0].split(" ")[2])
continue
if "POINTS" in line:
point_num = int(line.split("\n")[0].split(" ")[1])
continue
if not find_start_line and "DATA ascii" in line or "end_header" in line:
find_start_line = True
continue
if not find_start_line:
continue
line_data = line.split("\n")[0].split(" ")
channel_value_list = []
for i in range(len(channel_name_list)):
channel_value_list.append(float(line_data[i]))
new_point = ChannelPoint()
new_point.setChannelValueList(channel_name_list, channel_value_list)
self.point_list.append(new_point)
loaded_point_num += 1
if loaded_point_num == point_num:
break
print("[INFO][ChannelPointCloud::loadData]")
print("\t loaded", loaded_point_num, "points from poitncloud!")
self.updateKDTree()
return True
def getNearestPointInfo(self, x, y, z):
self.updateKDTree()
if self.kd_tree is None:
return None, None
if len(self.point_list) == 0:
return None, None
nearest_dist, nearest_point_idx = self.kd_tree.query([x, y, z])
return nearest_dist, nearest_point_idx
def getNearestDist(self, x, y, z):
self.updateKDTree()
if self.kd_tree is None:
return None
if len(self.point_list) == 0:
return None
nearest_dist, _ = self.kd_tree.query([x, y, z])
return nearest_dist
def getSelfNearestDist(self, point_idx):
self.updateKDTree()
if self.kd_tree is None:
return None
if len(self.point_list) == 0:
return None
if point_idx >= len(self.point_list):
return None
xyz = self.point_list[point_idx].getChannelValueList(["x", "y", "z"])
if None in xyz:
return None
nearest_dist_list, _ = self.kd_tree.query([xyz[0], xyz[1], xyz[2]], 2)
return nearest_dist_list[1]
def getNearestPoint(self, x, y, z):
_, nearest_point_idx = self.getNearestPointInfo(x, y, z)
if nearest_point_idx is None:
return None
return self.point_list[nearest_point_idx]
def getNearestChannelValueListValue(self, x, y, z, channel_name_list):
nearest_channel_value_list = []
if len(channel_name_list) == 0:
return nearest_channel_value_list
nearest_point = self.getNearestPoint(x, y, z)
if nearest_point is None:
return None
for channel_name in channel_name_list:
nearest_channel_value_list.append(
nearest_point.getChannelValue(channel_name))
return nearest_channel_value_list
def copyChannelValue(self, target_pointcloud, channel_name_list):
pointcloud_size = len(self.point_list)
target_pointcloud_size = len(target_pointcloud.point_list)
if target_pointcloud_size == 0:
print("[ERROR][ChannelPointCloud::copyChannelValue]")
print("\t target pointcloud is empty!")
return False
if pointcloud_size > 0 and pointcloud_size != target_pointcloud_size:
print("[ERROR][ChannelPointCloud::copyChannelValue]")
print("\t pointcloud size not matched!")
return False
first_point_channel_value_list = \
target_pointcloud.point_list[0].getChannelValueList(channel_name_list)
if None in first_point_channel_value_list:
print("[ERROR][ChannelPointCloud::copyChannelValue]")
print("\t target_pointcloud doesn't have all channels needed!")
return False
print("[INFO][ChannelPointCloud::copyChannelValue]")
print("\t start copy channel value :")
print("\t channel_name_list = [", end="")
for channel_name in channel_name_list:
print(" " + channel_name, end="")
print(" ]...")
channel_list_value_list = \
target_pointcloud.getChannelListValueList(channel_name_list)
if pointcloud_size == 0:
for channel_value_list in tqdm(channel_list_value_list):
new_point = ChannelPoint()
new_point.setChannelValueList(channel_name_list, channel_value_list)
self.point_list.append(new_point)
self.updateKDTree()
return True
for i in tqdm(pointcloud_size):
channel_value_list = channel_list_value_list[i]
self.point_list[i].setChannelValueList(channel_name_list, channel_value_list)
self.updateKDTree()
return True
def setChannelValueByKDTree(self, target_pointcloud, channel_name_list):
self.updateKDTree()
if len(self.point_list) == 0:
return True
if len(target_pointcloud.point_list) == 0:
print("[ERROR][ChannelPointCloud::setChannelValueByKDTree]")
print("\t target pointcloud is empty!")
return False
first_point_xyz = self.point_list[0].getChannelValueList(["x", "y", "z"])
if None in first_point_xyz:
print("[ERROR][ChannelPointCloud::setChannelValueByKDTree]")
print("\t pointcloud xyz not found!")
return False
first_point_channel_value_list = \
target_pointcloud.getNearestChannelValueListValue(
first_point_xyz[0],
first_point_xyz[1],
first_point_xyz[2],
channel_name_list)
if None in first_point_channel_value_list:
print("[ERROR][ChannelPointCloud::setChannelValueByKDTree]")
print("\t target_pointcloud doesn't have all channels needed!")
return False
print("[INFO][ChannelPointCloud::setChannelValueByKDTree]")
print("\t start set channel value by KDTree :")
print("\t channel_name_list = [", end="")
for channel_name in channel_name_list:
print(" " + channel_name, end="")
print(" ]...")
for point in tqdm(self.point_list):
xyz = point.getChannelValueList(["x", "y", "z"])
channel_value_list = \
target_pointcloud.getNearestChannelValueListValue(xyz[0],
xyz[1],
xyz[2],
channel_name_list)
point.setChannelValueList(channel_name_list, channel_value_list)
return True
def removeOutlierPoints(self, outlier_dist_max):
self.updateKDTree()
if self.kd_tree is None:
print("[ERROR][ChannelPointCloud::removeOutlierPoints]")
print("\t kd_tree is None!")
return False
if outlier_dist_max == 0:
print("[ERROR][ChannelPointCloud::removeOutlierPoints]")
print("\t outlier_dist_max is 0!")
return False
print("[INFO][ChannelPointCloud::removeOutlierPoints]")
print("\t start remove outerlier points with outlier_dist_max = " + str(outlier_dist_max) + "...")
remove_point_idx_list = []
for i in tqdm(range(len(self.point_list))):
current_nearest_dist = self.getSelfNearestDist(i)
if current_nearest_dist > outlier_dist_max:
remove_point_idx_list.append(i)
if len(remove_point_idx_list) == 0:
return True
for i in range(len(remove_point_idx_list)):
self.point_list.pop(remove_point_idx_list[i] - i)
print("[INFO][ChannelPointCloud::removeOutlierPoints]")
print("\t removed " + str(len(remove_point_idx_list)) + " points...")
return True
def paintByLabel(self, label_channel_name, color_map):
if len(self.point_list) == 0:
return True
first_point_label_value = self.point_list[0].getChannelValue(label_channel_name)
if first_point_label_value is None:
print("[ERROR][ChannelPointCloud::paintByLabel]")
print("\t label_channel not found!")
return False
color_map_size = len(color_map)
if color_map_size == 0:
print("[ERROR][ChannelPointCloud::paintByLabel]")
print("\t color_map is empty!")
return False
print("[INFO][ChannelPointCloud::paintByLabel]")
print("\t start paint by label...")
for point in tqdm(self.point_list):
label_value = point.getChannelValue(label_channel_name)
rgb = color_map[label_value % color_map_size]
point.setChannelValueList(["r", "g", "b"], rgb)
return True
def getPCDHeader(self):
channel_list = []
point_num = len(self.point_list)
if point_num > 0:
channel_list = self.point_list[0].channel_list
pcd_header = "# .PCD v0.7 - Point Cloud Data file format\n"
pcd_header += "VERSION 0.7\n"
pcd_header += "FIELDS"
for channel in channel_list:
if channel.name in self.save_ignore_channel_name_list:
continue
pcd_header += " " + channel.name
pcd_header += "\n"
pcd_header += "SIZE"
for channel in channel_list:
if channel.name in self.save_ignore_channel_name_list:
continue
pcd_header += " " + str(channel.size)
pcd_header += "\n"
pcd_header += "TYPE"
for channel in channel_list:
if channel.name in self.save_ignore_channel_name_list:
continue
pcd_header += " " + channel.type
pcd_header += "\n"
pcd_header += "COUNT"
for channel in channel_list:
if channel.name in self.save_ignore_channel_name_list:
continue
pcd_header += " " + str(channel.count)
pcd_header += "\n"
pcd_header += "WIDTH " + str(point_num) + "\n"
pcd_header += "HEIGHT 1\n"
pcd_header += "VIEWPOINT 0 0 0 1 0 0 0\n"
pcd_header += "POINTS " + str(point_num) + "\n"
pcd_header += "DATA ascii\n"
return pcd_header
def savePointCloud(self, save_pointcloud_file_path):
print("[INFO][ChannelPointCloud::savePointCloud]")
print("\t start save pointcloud to" + save_pointcloud_file_path + "...")
with open(save_pointcloud_file_path, "w") as f:
pcd_header = self.getPCDHeader()
f.write(pcd_header)
for point in tqdm(self.point_list):
last_channel_idx = len(point.channel_list) - 1
for i in range(last_channel_idx + 1):
if point.channel_list[i].name in self.save_ignore_channel_name_list:
if i == last_channel_idx:
f.write("\n")
continue
f.write(str(point.channel_list[i].value))
if i < last_channel_idx:
f.write(" ")
else:
f.write("\n")
return True
def outputInfo(self, info_level=0):
line_start = "\t" * info_level
print(line_start + "[ChannelPointCloud]")
for point in self.point_list:
point.outputInfo(info_level + 1)
return True
```
#### File: pointcloud-manage/PointCloudClass/down_sample.py
```python
import open3d as o3d
def downSample(pointcloud_file_path, down_sample_cluster_num, save_pointcloud_file_path):
print("[INFO][downSample]")
print("\t start down sampling pointcloud :")
print("\t down_sample_cluster_num = " + str(down_sample_cluster_num) + "...")
pointcloud = o3d.io.read_point_cloud(pointcloud_file_path, print_progress=True)
down_sampled_pointcloud = o3d.geometry.PointCloud.uniform_down_sample(
pointcloud, down_sample_cluster_num)
o3d.io.write_point_cloud(
save_pointcloud_file_path,
down_sampled_pointcloud,
write_ascii=True,
print_progress=True)
print("SUCCESS!")
return True
```
#### File: pointcloud-manage/PointCloudClass/renderer.py
```python
import cv2
import numpy as np
from math import cos, sin, pi
from tqdm import tqdm
import open3d as o3d
def render(pointcloud_file_path, estimate_normals_radius, estimate_normals_max_nn):
pointcloud = o3d.io.read_point_cloud(pointcloud_file_path, print_progress=True)
pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=estimate_normals_radius,
max_nn=estimate_normals_max_nn))
o3d.visualization.draw_geometries([pointcloud])
return True
class Renderer(object):
def __init__(self):
self.vis = o3d.visualization.Visualizer()
self.render_center = None
self.euler_angle = [0, 0, 0]
return
def getRotationMatrixFromEulerAngle(self, euler_angle):
R_x = np.array([
[1, 0, 0],
[0, cos(euler_angle[0]), -sin(euler_angle[0])],
[0, sin(euler_angle[0]), cos(euler_angle[0])]
])
R_y = np.array([
[cos(euler_angle[1]), 0, sin(euler_angle[1])],
[0, 1, 0],
[-sin(euler_angle[1]), 0, cos(euler_angle[1])]
])
R_z = np.array([
[cos(euler_angle[2]), -sin(euler_angle[2]), 0],
[sin(euler_angle[2]), cos(euler_angle[2]), 0],
[0, 0, 1]
])
rotation_matrix = np.dot(R_z, np.dot(R_y, R_x))
return rotation_matrix
def getRotateDirection(self, direction_vector, euler_angle):
np_direction_vector = np.array(direction_vector)
direction_vector_norm = np.linalg.norm(np_direction_vector)
if direction_vector_norm == 0:
print("[ERROR][Renderer::getRotateDirection]")
print("\t direction_vector_norm is 0!")
return None
np_unit_direction_vector = np_direction_vector / direction_vector_norm
rotation_matrix = self.getRotationMatrixFromEulerAngle(euler_angle)
rotate_direction = np.dot(rotation_matrix, np_unit_direction_vector)
return rotate_direction.tolist()
def rotateVis(self, delta_rotate_angle):
self.euler_angle[0] = 0
self.euler_angle[1] = -10 * pi / 180.0
self.euler_angle[2] += delta_rotate_angle * pi / 180.0
ctr = self.vis.get_view_control()
front_direction = self.getRotateDirection(
[1, 0, 0], self.euler_angle)
ctr.set_front(front_direction)
up_direction = self.getRotateDirection(
[0, 0, 1], self.euler_angle)
ctr.set_up(up_direction)
ctr.set_lookat(self.render_center)
# ctr.set_zoom(0.5)
return True
def render(self, show_labels, scene_pointcloud_file_path=None):
delta_rotate_angle = 0.5
if scene_pointcloud_file_path is not None:
print("start reading floor and wall...")
self.splitLabeledPoints(scene_pointcloud_file_path)
rendered_pointcloud = o3d.geometry.PointCloud()
render_points = []
render_colors = []
print("start create rendered pointcloud...")
for i in tqdm(range(len(self.pointcloud_list))):
points = np.asarray(self.pointcloud_list[i].points).tolist()
if len(points) == 0:
continue
for point in points:
render_points.append(point)
render_colors.append(self.d3_40_colors_rgb[i % len(self.d3_40_colors_rgb)] / 255.0)
if scene_pointcloud_file_path is not None:
print("start create rendered floor...")
for wall_point in tqdm(self.labeled_point_cluster_list[0]):
if abs(wall_point[2]) > 0.01:
continue
render_points.append(wall_point)
render_colors.append(np.array([132, 133, 135], dtype=np.uint8) / 255.0)
rendered_pointcloud.points = o3d.utility.Vector3dVector(np.array(render_points))
rendered_pointcloud.colors = o3d.utility.Vector3dVector(np.array(render_colors))
rendered_pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
self.render_center = rendered_pointcloud.get_axis_aligned_bounding_box().get_center()
self.vis.create_window(window_name="Open3D RenderObject")
render_option = self.vis.get_render_option()
render_option.background_color = np.array([1, 1, 1])
render_option.point_size = 1
self.vis.add_geometry(rendered_pointcloud)
while True:
self.rotateVis(delta_rotate_angle)
# self.vis.update_geometry()
self.vis.poll_events()
self.vis.update_renderer()
if ord('q') == cv2.waitKey(1):
break
self.vis.destroy_window()
return True
def saveRender(self, output_video_file_path):
fps = 30
video_width = 1920
video_height = 1080
delta_rotate_angle = 0.5
if scene_pointcloud_file_path is not None:
print("start reading floor and wall...")
self.splitLabeledPoints(scene_pointcloud_file_path)
rendered_pointcloud = o3d.geometry.PointCloud()
render_points = []
render_colors = []
print("start create rendered pointcloud...")
for i in tqdm(range(len(self.pointcloud_list))):
points = np.asarray(self.pointcloud_list[i].points).tolist()
if len(points) == 0:
continue
for point in points:
render_points.append(point)
render_colors.append(self.d3_40_colors_rgb[i % len(self.d3_40_colors_rgb)] / 255.0)
if scene_pointcloud_file_path is not None:
print("start create rendered floor...")
for wall_point in tqdm(self.labeled_point_cluster_list[0]):
if abs(wall_point[2]) > 0.01:
continue
render_points.append(wall_point)
render_colors.append(np.array([132, 133, 135], dtype=np.uint8) / 255.0)
rendered_pointcloud.points = o3d.utility.Vector3dVector(np.array(render_points))
rendered_pointcloud.colors = o3d.utility.Vector3dVector(np.array(render_colors))
self.render_center = rendered_pointcloud.get_axis_aligned_bounding_box().get_center()
self.vis.create_window(window_name="Open3D RenderObject")
render_option = self.vis.get_render_option()
render_option.background_color = np.array([1, 1, 1])
render_option.point_size = 1
self.vis.add_geometry(rendered_pointcloud)
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter(output_video_file_path, fourcc, fps, (video_width, video_height))
for i in range(int(360 / delta_rotate_angle)):
self.rotateVis(0.5)
# self.vis.update_geometry()
self.vis.poll_events()
self.vis.update_renderer()
open3d_image = np.asarray(self.vis.capture_screen_float_buffer()) * 255.0
cv_image = cv2.cvtColor(open3d_image, cv2.COLOR_RGB2BGR).astype(np.uint8)
out.write(cv_image)
self.vis.destroy_window()
out.release()
return True
``` |
{
"source": "565353780/poly-yolo",
"score": 2
} |
#### File: poly-yolo/poly_yolo/poly_yolo_without_polygon.py
```python
from datetime import datetime
import colorsys
import os
import sys
from functools import reduce
from functools import wraps
import math
import random as rd
import cv2 as cv
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping, Callback
from keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate
from keras.layers import Input, GlobalAveragePooling2D, Reshape, Dense, Permute, multiply, Activation, add, Lambda, concatenate, MaxPooling2D
from keras.layers import Input, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.models import load_model
from keras.optimizers import Adadelta, Adagrad
from keras.regularizers import l2
from keras.utils import multi_gpu_model
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
np.set_printoptions(precision=3, suppress=True)
MAX_VERTICES = 1000 #that allows the labels to have 1000 vertices per polygon at max. They are reduced for training
ANGLE_STEP = 15 #that means Poly-YOLO will detect 360/15=24 vertices per polygon at max
NUM_ANGLES3 = 0
NUM_ANGLES = 0
grid_size_multiplier = 4 #that is resolution of the output scale compared with input. So it is 1/4
anchor_mask = [[0,1,2,3,4,5,6,7,8], [0,1,2,3,4,5,6,7,8], [0,1,2,3,4,5,6,7,8]] #that should be optimized
anchors_per_level = 9 #single scale and nine anchors
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
iw = image.shape[1]
ih = image.shape[0]
w, h = size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
cvi = cv.cvtColor(image, cv.COLOR_BGR2RGB)
cvi = cv.resize(cvi, (nw, nh), interpolation=cv.INTER_CUBIC)
dx = int((w - nw) // 2)
dy = int((h - nh) // 2)
new_image = np.zeros((h, w, 3), dtype='uint8')
new_image[...] = 128
if nw <= w and nh <= h:
new_image[dy:dy + nh, dx:dx + nw, :] = cvi
else:
new_image = cvi[-dy:-dy + h, -dx:-dx + w, :]
return new_image.astype('float32') / 255.0
def rand(a=0, b=1):
return np.random.rand() * (b - a) + a
def get_random_data(line, input_shape, random=True, max_boxes=80, hue_alter=20, sat_alter=30, val_alter=30, proc_img=True):
# load data
# the color conversion is later. it is not necessary to realize bgr->rgb->hsv->rgb
image = cv.imread(line[0])
iw = image.shape[1]
ih = image.shape[0]
h, w = input_shape
box = np.array([np.array(list(map(float, box.split(',')[:5]))) for box in line[1:]])
if not random:
# resize image
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
dx = (w - nw) // 2
dy = (h - nh) // 2
image_data = 0
if proc_img:
# image = image.resize((nw, nh), Image.BICUBIC)
image = cv.cvtColor(
cv.resize(image, (nw, nh), interpolation=cv.INTER_CUBIC), cv.COLOR_BGR2RGB)
image = Image.fromarray(image)
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image) / 255.
# correct boxes
box_data = np.zeros((max_boxes, 5))
if len(box) > 0:
np.random.shuffle(box)
if len(box) > max_boxes:
box = box[:max_boxes]
box[:, [0, 2]] = box[:, [0, 2]] * scale + dx
box[:, [1, 3]] = box[:, [1, 3]] * scale + dy
box_data[:len(box), 0:5] = box[:, 0:5]
return image_data, box_data
# resize image
random_scale = rd.uniform(.6, 1.4)
scale = min(w / iw, h / ih)
nw = int(iw * scale * random_scale)
nh = int(ih * scale * random_scale)
# force nw a nh to be an even
if (nw % 2) == 1:
nw = nw + 1
if (nh % 2) == 1:
nh = nh + 1
# jitter for slight distort of aspect ratio
if np.random.rand() < 0.3:
if np.random.rand() < 0.5:
nw = int(nw*rd.uniform(.8, 1.0))
else:
nh = int(nh*rd.uniform(.8, 1.0))
image = cv.resize(image, (nw, nh), interpolation=cv.INTER_CUBIC)
nwiw = nw/iw
nhih = nh/ih
# clahe. applied on resized image to save time. but before placing to avoid
# the influence of homogenous background
if np.random.rand() < 0.05:
clahe = cv.createCLAHE(clipLimit=2, tileGridSize=(8, 8))
lab = cv.cvtColor(image, cv.COLOR_BGR2LAB)
l, a, b = cv.split(lab)
cl = clahe.apply(l)
limg = cv.merge((cl, a, b))
image = cv.cvtColor(limg, cv.COLOR_LAB2BGR)
# place image
dx = rd.randint(0, max(w - nw, 0))
dy = rd.randint(0, max(h - nh, 0))
new_image = np.full((h, w, 3), 128, dtype='uint8')
new_image, crop_coords, new_img_coords = random_crop(
image, new_image)
# flip image or not
flip = rd.random() < .5
if flip:
new_image = cv.flip(new_image, 1)
# distort image
hsv = np.int32(cv.cvtColor(new_image, cv.COLOR_BGR2HSV))
# linear hsv distortion
hsv[..., 0] += rd.randint(-hue_alter, hue_alter)
hsv[..., 1] += rd.randint(-sat_alter, sat_alter)
hsv[..., 2] += rd.randint(-val_alter, val_alter)
# additional non-linear distortion of saturation and value
if np.random.rand() < 0.5:
hsv[..., 1] = hsv[..., 1]*rd.uniform(.7, 1.3)
hsv[..., 2] = hsv[..., 2]*rd.uniform(.7, 1.3)
hsv[..., 0][hsv[..., 0] > 179] = 179
hsv[..., 0][hsv[..., 0] < 0] = 0
hsv[..., 1][hsv[..., 1] > 255] = 255
hsv[..., 1][hsv[..., 1] < 0] = 0
hsv[..., 2][hsv[..., 2] > 255] = 255
hsv[..., 2][hsv[..., 2] < 0] = 0
image_data = cv.cvtColor(
np.uint8(hsv), cv.COLOR_HSV2RGB).astype('float32') / 255.0
# add noise
if np.random.rand() < 0.15:
image_data = np.clip(image_data + np.random.rand() *
image_data.std() * np.random.random(image_data.shape), 0, 1)
# correct boxes
box_data = np.zeros((max_boxes, 5))
if len(box) > 0:
np.random.shuffle(box)
# rescaling separately because 5-th element is class
box[:, [0, 2]] = box[:, [0, 2]] * nwiw
box[:, [1, 3]] = box[:, [1, 3]] * nhih
# mask out boxes that lies outside of croping window
mask = (box[:, 1] >= crop_coords[0]) & (box[:, 3] < crop_coords[1]) & (
box[:, 0] >= crop_coords[2]) & (box[:, 2] < crop_coords[3])
box = box[mask]
# transform boxes to new coordinate system w.r.t new_image
box[:, :2] = box[:, :2] - [crop_coords[2], crop_coords[0]] + [new_img_coords[2], new_img_coords[0]]
box[:, 2:4] = box[:, 2:4] - [crop_coords[2], crop_coords[0]] + [new_img_coords[2], new_img_coords[0]]
if flip:
box[:, [0, 2]] = (w-1) - box[:, [2, 0]]
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] >= w] = w-1
box[:, 3][box[:, 3] >= h] = h-1
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box
if len(box) > max_boxes:
box = box[:max_boxes]
box_data[:len(box), 0:5] = box[:, 0:5]
return image_data, box_data
def random_crop(img, new_img):
"""Creates random crop from img and insert it into new_img
Args:
img (numpy array): Image to be cropped
new_img (numpy array): Image to which the crop will be inserted into.
Returns:
tuple: Tuple of image containing the crop, list of coordinates used to crop img and list of coordinates where the crop
has been inserted into in new_img
"""
h, w = img.shape[:2]
crop_shape = new_img.shape[:2]
crop_coords = [0, 0, 0, 0]
new_pos = [0, 0, 0, 0]
# if image height is smaller than cropping window
if h < crop_shape[0]:
# cropping whole image [0,h]
crop_coords[1] = h
# randomly position whole img along height dimension
val = rd.randint(0, crop_shape[0]-h)
new_pos[0:2] = [val, val + h]
else:
# if image height is bigger than cropping window
# randomly position cropping window on image
crop_h_shift = rd.randint(crop_shape[0], h)
crop_coords[0:2] = [crop_h_shift - crop_shape[0], crop_h_shift]
new_pos[0:2] = [0, crop_shape[0]]
# same as above for image width
if w < crop_shape[1]:
crop_coords[3] = w
val = rd.randint(0, crop_shape[1] - w)
new_pos[2:4] = [val, val + w]
else:
crop_w_shift = rd.randint(crop_shape[1], w)
crop_coords[2:4] = [crop_w_shift - crop_shape[1], crop_w_shift]
new_pos[2:4] = [0, crop_shape[1]]
# slice, insert and return image including crop and coordinates used for cropping and inserting
# coordinates are later used for boxes adjustments.
new_img[new_pos[0]:new_pos[1], new_pos[2]:new_pos[3],
:] = img[crop_coords[0]:crop_coords[1], crop_coords[2]:crop_coords[3], :]
return new_img, crop_coords, new_pos
"""Poly-YOLO Model Defined in Keras."""
@wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for Convolution2D."""
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides') == (2, 2) else 'same'
darknet_conv_kwargs.update(kwargs)
return Conv2D(*args, **darknet_conv_kwargs)
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(),
LeakyReLU(alpha=0.1))
def resblock_body(x, num_filters, num_blocks):
'''A series of resblocks starting with a downsampling Convolution2D'''
# Darknet uses left and top padding instead of 'same' mode
x = ZeroPadding2D(((1, 0), (1, 0)))(x)
x = DarknetConv2D_BN_Leaky(num_filters, (3, 3), strides=(2, 2))(x)
for i in range(num_blocks):
y = compose(
DarknetConv2D_BN_Leaky(num_filters // 2, (1, 1)),
DarknetConv2D_BN_Leaky(num_filters, (3, 3)))(x)
y = squeeze_excite_block(y)
x = Add()([x, y])
return x
# https://github.com/titu1994/keras-squeeze-excite-network/blob/master/keras_squeeze_excite_network/se_resnet.py
def squeeze_excite_block(tensor, ratio=16):
init = tensor
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
filters = init._keras_shape[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, kernel_initializer='he_normal', use_bias=False)(se)
se = LeakyReLU(alpha=0.1)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
if K.image_data_format() == 'channels_first':
se = Permute((3, 1, 2))(se)
x = multiply([init, se])
return x
def _tensor_shape(tensor):
return getattr(tensor, '_keras_shape')
def darknet_body(x):
'''Darknent body having 52 Convolution2D layers'''
base = 6 # orig YOLOv3 has base = 8
x = DarknetConv2D_BN_Leaky(base * 4, (3, 3))(x)
x = resblock_body(x, base * 8, 1)
x = resblock_body(x, base * 16, 2)
tiny = x
x = resblock_body(x, base * 32, 8)
small = x
x = resblock_body(x, base * 64, 8)
medium = x
x = resblock_body(x, base * 128, 8)
big = x
return tiny, small, medium, big
def make_last_layers(x, num_filters, out_filters):
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),
DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),
DarknetConv2D_BN_Leaky(num_filters, (1, 1)))(x)
y = compose(
DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),
DarknetConv2D(out_filters, (1, 1)))(x)
return x, y
def yolo_body(inputs, num_anchors, num_classes):
"""Create Poly-YOLO model CNN body in Keras."""
tiny, small, medium, big = darknet_body(inputs)
base = 6
tiny = DarknetConv2D_BN_Leaky(base*32, (1, 1))(tiny)
small = DarknetConv2D_BN_Leaky(base*32, (1, 1))(small)
medium = DarknetConv2D_BN_Leaky(base*32, (1, 1))(medium)
big = DarknetConv2D_BN_Leaky(base*32, (1, 1))(big)
all = Add()([medium, UpSampling2D(2,interpolation='bilinear')(big)])
all = Add()([small, UpSampling2D(2,interpolation='bilinear')(all)])
all = Add()([tiny, UpSampling2D(2,interpolation='bilinear')(all)])
num_filters = base*32
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),
DarknetConv2D_BN_Leaky(num_filters, (1, 1)))(all)
all = compose(
DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)),
DarknetConv2D(num_anchors * (num_classes + 5 + NUM_ANGLES3), (1, 1)))(x)
return Model(inputs, all)
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
"""Convert final layer features to bounding box parameters."""
num_anchors = anchors_per_level
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(tf.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1], name='yolo_head/tile/reshape/grid_y'),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(tf.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1], name='yolo_head/tile/reshape/grid_x'),
[grid_shape[0], 1, 1, 1])
grid = tf.concat([grid_x, grid_y], axis=-1, name='yolo_head/concatenate/grid')
grid = K.cast(grid, K.dtype(feats))
feats = tf.reshape(
feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5 + NUM_ANGLES3], name='yolo_head/reshape/feats')
# Adjust predictions to each spatial grid point and anchor size.
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[...,::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[...,::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:5 + num_classes])
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
'''Get corrected boxes'''
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = K.cast(input_shape, K.dtype(box_yx))
image_shape = K.cast(image_shape, K.dtype(box_yx))
new_shape = K.round(image_shape * K.min(input_shape / image_shape))
offset = (input_shape - new_shape) / 2. / input_shape
scale = input_shape / new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = K.concatenate([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
])
# Scale boxes back to original image shape.
boxes *= K.concatenate([image_shape, image_shape])
return boxes
def yolo_correct_polygons(polygons_x, polygons_y, polygons_confidence, boxes, input_shape, image_shape):
polygons = K.concatenate([polygons_x, polygons_y, polygons_confidence])
return polygons
def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):
'''Process Conv layer output'''
box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats, anchors, num_classes, input_shape)
boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
boxes = K.reshape(boxes, [-1, 4])
box_scores = box_confidence * box_class_probs
box_scores = K.reshape(box_scores, [-1, num_classes])
return boxes, box_scores
def yolo_eval(yolo_outputs,
anchors,
num_classes,
image_shape,
max_boxes=80,
score_threshold=.5,
iou_threshold=.5):
"""Evaluate YOLO model on given input and return filtered boxes."""
input_shape = K.shape(yolo_outputs)[1:3] * grid_size_multiplier
boxes = []
box_scores = []
for l in range(1):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs,
anchors[anchor_mask[l]], num_classes, input_shape,
image_shape)
boxes.append(_boxes)
box_scores.append(_box_scores)
boxes = K.concatenate(boxes, axis=0)
box_scores = K.concatenate(box_scores, axis=0)
mask = box_scores >= score_threshold
box_scores >= score_threshold
max_boxes_tensor = K.constant(max_boxes, dtype='int32')
boxes_ = []
scores_ = []
classes_ = []
for c in range(num_classes):
# TODO: use keras backend instead of tf.
class_boxes = tf.boolean_mask(boxes, mask[:, c])
class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
nms_index = tf.image.non_max_suppression(
class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
class_boxes = K.gather(class_boxes, nms_index)
class_box_scores = K.gather(class_box_scores, nms_index)
classes = K.ones_like(class_box_scores, 'int32') * c
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
boxes_ = K.concatenate(boxes_, axis=0)
scores_ = K.concatenate(scores_, axis=0)
classes_ = K.concatenate(classes_, axis=0)
return boxes_, scores_, classes_
def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):
'''Preprocess true boxes to training input format
Parameters
----------
true_boxes: array, shape=(m, T, 5+69)
Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape
vstup je to nase kratke
input_shape: array-like, hw, multiples of 32
anchors: array, shape=(N, 2), wh
num_classes: integer
Returns
-------
y_true: list of array, shape like yolo_outputs, xywh are reletive value
'''
assert (true_boxes[..., 4] < num_classes).all(), 'class id must be less than num_classes'
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32')
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[:,:, 5:NUM_ANGLES3 + 5:3] /= np.clip(np.expand_dims(np.sqrt(np.power(boxes_wh[:, :, 0], 2) + np.power(boxes_wh[:, :, 1], 2)), -1), 0.0001, 9999999)
true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]
m = true_boxes.shape[0]
grid_shapes = [input_shape // {0: grid_size_multiplier}[l] for l in range(1)]
y_true = [np.zeros((m, grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + num_classes + NUM_ANGLES3),
dtype='float32') for l in range(1)]
# Expand dim to apply broadcasting.
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
valid_mask = boxes_wh[..., 0] > 0
for b in range(m):
# Discard zero rows.
wh = boxes_wh[b, valid_mask[b]]
if len(wh) == 0: continue
# Expand dim to apply broadcasting.
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
# Find best anchor for each true box
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
l = 0
if n in anchor_mask[l]:
i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype('int32')
k = anchor_mask[l].index(n)
c = true_boxes[b, t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5 + c] = 1
y_true[l][b, j, i, k, 5 + num_classes:5 + num_classes + NUM_ANGLES3] = true_boxes[b, t, 5: 5 + NUM_ANGLES3]
return y_true
def box_iou(b1, b2):
"""Return iou tensor
Parameters
----------
b1: tensor, shape=(i1,...,iN, 4), xywh
b2: tensor, shape=(j, 4), xywh
Returns
-------
iou: tensor, shape=(i1,...,iN, j)
"""
# Expand dim to apply broadcasting.
b1 = K.expand_dims(b1, -2)
b1_xy = b1[..., :2]
b1_wh = b1[..., 2:4]
b1_wh_half = b1_wh / 2.
b1_mins = b1_xy - b1_wh_half
b1_maxes = b1_xy + b1_wh_half
# Expand dim to apply broadcasting.
b2 = K.expand_dims(b2, 0)
b2_xy = b2[..., :2]
b2_wh = b2[..., 2:4]
b2_wh_half = b2_wh / 2.
b2_mins = b2_xy - b2_wh_half
b2_maxes = b2_xy + b2_wh_half
intersect_mins = K.maximum(b1_mins, b2_mins)
intersect_maxes = K.minimum(b1_maxes, b2_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b1_wh[..., 0] * b1_wh[..., 1]
b2_area = b2_wh[..., 0] * b2_wh[..., 1]
iou = intersect_area / (b1_area + b2_area - intersect_area)
return iou
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5):
"""Return yolo_loss tensor
Parameters
----------
yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
y_true: list of array, the output of preprocess_true_boxes
anchors: array, shape=(N, 2), wh
num_classes: integer
ignore_thresh: float, the iou threshold whether to ignore object confidence loss
Returns
-------
loss: tensor, shape=(1,)
"""
num_layers = 1
yolo_outputs = args[:num_layers]
y_true = args[num_layers:]
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * grid_size_multiplier, K.dtype(y_true[0]))
grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]
loss = 0
m = K.shape(yolo_outputs[0])[0] # batch size, tensor
mf = K.cast(m, K.dtype(yolo_outputs[0]))
for layer in range(num_layers):
object_mask = y_true[layer][..., 4:5]
true_class_probs = y_true[layer][..., 5:5 + num_classes]
grid, raw_pred, pred_xy, pred_wh= yolo_head(yolo_outputs[layer], anchors[anchor_mask[layer]], num_classes, input_shape, calc_loss=True)
pred_box = K.concatenate([pred_xy, pred_wh])
raw_true_xy = y_true[layer][..., :2] * grid_shapes[layer][::-1] - grid
raw_true_wh = K.log(y_true[layer][..., 2:4] / anchors[anchor_mask[layer]] * input_shape[::-1])
raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf
box_loss_scale = 2 - y_true[layer][..., 2:3] * y_true[layer][..., 3:4]
# Find ignore mask, iterate over each of batch.
ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
object_mask_bool = K.cast(object_mask, 'bool')
def loop_body(b, ignore_mask):
true_box = tf.boolean_mask(y_true[layer][b, ..., 0:4], object_mask_bool[b, ..., 0])
iou = box_iou(pred_box[b], true_box)
best_iou = K.max(iou, axis=-1)
ignore_mask = ignore_mask.write(b, K.cast(best_iou < ignore_thresh, K.dtype(true_box)))
return b + 1, ignore_mask
_, ignore_mask = tf.while_loop(lambda b, *args: b < m, loop_body, [0, ignore_mask])
ignore_mask = ignore_mask.stack()
ignore_mask = K.expand_dims(ignore_mask, -1)
# K.binary_crossentropy is helpful to avoid exp overflow.
xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[..., 0:2], from_logits=True)
wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh - raw_pred[..., 2:4])
confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[..., 4:5], from_logits=True) + (1 - object_mask) * K.binary_crossentropy(object_mask, raw_pred[..., 4:5],
from_logits=True) * ignore_mask
class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[..., 5:5 + num_classes], from_logits=True)
xy_loss = K.sum(xy_loss) / mf
wh_loss = K.sum(wh_loss) / mf
class_loss = K.sum(class_loss) / mf
confidence_loss = K.sum(confidence_loss) / mf
loss += (xy_loss + wh_loss + confidence_loss + class_loss)/ (K.sum(object_mask) + 1)*mf
return loss
class YOLO(object):
_defaults = {
"model_path": 'model_data/yolo.h5',
"anchors_path": 'yolo_anchors.txt',
"classes_path": 'yolo_classes.txt',
"score": 0.2,
"iou": 0.5,
"model_image_size": (416,832),
"gpu_num": 1,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = yolo_body(Input(shape=(None, None, 3)), anchors_per_level, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors / len(self.yolo_model.output) * (num_classes + 5 + NUM_ANGLES3), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2,))
if self.gpu_num >= 2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
# start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
print('THE functionality is not implemented!')
image_data = np.expand_dims(boxed_image, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.shape[0], image.shape[1]],
K.learning_phase(): 0
})
return out_boxes, out_scores, out_classes
def close_session(self):
self.sess.close()
if __name__ == "__main__":
"""
Retrain the YOLO model for your own dataset.
"""
def _main():
phase = 1
annotation_path = 'citytrain.txt'
validation_path = 'val.txt'
log_dir = 'models/'
classes_path = 'yolo_classes.txt'
anchors_path = 'yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (416,832) # multiple of 32, hw
if phase == 1:
model = create_model(input_shape, anchors, num_classes, load_pretrained=False)
else:
model = create_model(input_shape, anchors, num_classes, load_pretrained=True, weights_path=log_dir+'model.h5')
print(model.summary())
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=1, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, verbose=1, delta=0.03)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1)
with open(annotation_path) as f:
lines = f.readlines()
with open(validation_path) as f:
lines_val = f.readlines()
for i in range (0, len(lines)):
lines[i] = lines[i].split()
for i in range(0, len(lines_val)):
lines_val[i] = lines_val[i].split()
np.random.seed(1)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines_val))
num_train = len(lines)
batch_size = 3 # decrease/increase batch size according to your memory of your GPU
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
from numpy.random import seed
from tensorflow import set_random_seed
np.random.seed(1)
seed(1)
set_random_seed(1)
model.compile(optimizer=Adadelta(), loss={'yolo_loss': lambda y_true, y_pred: y_pred})
epochs = 100
history = model.fit_generator(data_generator_wrapper(lines, batch_size, input_shape, anchors, num_classes, True),
steps_per_epoch=max(1, num_train // batch_size),
validation_data=data_generator_wrapper(lines_val, batch_size, input_shape, anchors, num_classes, False),
validation_steps=max(1, num_val // batch_size),
epochs=epochs,
initial_epoch=0,
callbacks=[reduce_lr, early_stopping, checkpoint])
def get_classes(classes_path):
"""loads the classes"""
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
"""loads the anchors from a file"""
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
"""create the training model"""
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = Input(shape=(h // grid_size_multiplier, w // grid_size_multiplier, anchors_per_level, num_classes + 5 + NUM_ANGLES3))
model_body = yolo_body(image_input, anchors_per_level, num_classes)
print('Create Poly-YOLO model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[model_body.output, y_true])
model = Model([model_body.input, y_true], model_loss)
# print(model.summary())
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, is_random):
"""data generator for fit_generator"""
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i == 0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=is_random)
image_data.append(image)
box_data.append(box)
i = (i + 1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes, random):
n = len(annotation_lines)
if n == 0 or batch_size <= 0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, random)
if __name__ == '__main__':
_main()
``` |
{
"source": "565353780/py-curling-reconstruct",
"score": 3
} |
#### File: py-curling-reconstruct/track_Reconstruct/trackReconstruct.py
```python
import cv2
import numpy as np
from matplotlib import pyplot as plt
import os
track_width = 4.75
track_height = 44.5
back_height = 1.22
circle_height = 4.88
hog_height = 11.28
track_num = 6
def project(point, H):
# print("H",H)
actual_p = np.insert(np.array(point, np.float), len(point), 1).reshape(3, 1)
# print("actual_p:",actual_p)
projected_p = ((np.mat(H) * actual_p).flatten()).tolist()[0]
if (projected_p[2] != 0):
for i in range(3):
projected_p[i] = projected_p[i] / projected_p[2]
return projected_p[:-1]
def findPerspectiveMatrix(origin_quad, target_quad):
src_pts = np.array(origin_quad, np.float32).reshape(-1, 1, 2)
dst_pts = np.array(target_quad, np.float32).reshape(-1, 1, 2)
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
return np.mat(H)
# generate 2*num+1 rect
def generateTracks(center, width, height, interval, num, track_polygon_list):
for i in range(-num, num):
polygon = []
x = center[0] + i * (width + interval)
y = center[1]
addRect(x, y, width, height, polygon)
track_polygon_list.append(polygon)
pass
def addRect(x, y, w, h, out):
out.append([x - w / 2, y - h / 2])
out.append([x + w / 2, y - h / 2])
out.append([x + w / 2, y + h / 2])
out.append([x - w / 2, y + h / 2])
def projectTracks(H, track_polygon_list):
projected_polygon_list = []
for polygon in track_polygon_list:
tmp_list = []
for point in polygon:
projected_point = project(point, H)
tmp_list.append(projected_point)
projected_polygon_list.append(tmp_list)
return projected_polygon_list
def drawProjectedPolygon():
global projected_polygon_list
global image
for polygon in projected_polygon_list:
print("polygon", polygon)
cv2.polylines(image, [np.array(polygon, np.int)], True, (0, 0, 255))
plt.imshow(image[:, :, ::-1])
plt.draw()
def load_points(filename):
global image
global target_quad
target_quad = []
in_filename = filename + ".txt"
with open(in_filename, "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if not len(line): #
continue #
data_list = line.split(",")
print(data_list[0] + "," + data_list[1])
point = [float(data_list[0]), float(data_list[1])]
target_quad.append(point)
cv2.circle(image, (int(point[0]), int(point[1])), 5, (0, 255, 255), -1)
# cv2.polylines(image,[np.array(target_quad,np.int)],True,(0,255,255))
plt.imshow(image[:, :, ::-1])
plt.draw()
f.close()
def save_points(filename):
global target_quad
out_filename = filename + ".txt"
with open(out_filename, "w") as f:
for point in target_quad:
print(str(point[0]) + "," + str(point[1]), file=f)
f.close()
def on_key_press(event):
global target_quad
global track_polygon_list
global image
global tempfilename
global H
if event.key == "enter":
plt.draw()
elif event.key == "c":
print(track_polygon_list[0])
H = findPerspectiveMatrix(track_polygon_list[3], target_quad)
projectTracks()
drawProjectedPolygon()
elif event.key == "i":
load_points(tempfilename)
elif event.key == "o":
save_points(tempfilename)
def on_press(event):
print("pressed:")
global target_quad
global image
global origin_img
if event.button == 1: #
if event.xdata == None:
return
target_quad.append([event.xdata, event.ydata])
cv2.circle(image, (int(event.xdata), int(event.ydata)), 10, (0, 255, 255), -1)
plt.imshow(image[:, :, ::-1])
plt.draw()
print("add position:", event.button, event.xdata, event.ydata)
elif event.button == 3: #
target_quad = []
image = origin_img
plt.imshow(img[:, :, ::-1])
plt.draw()
print("clear positions")
def drawTrack(image_container, h, rock_point_list):
blue = (171, 130, 57)
white = (255, 255, 255)
red = (60, 20, 220)
yellow = (14, 183, 235)
scale = h / track_height
actual_h, actual_w = int(scale * track_height), int(track_num * scale * track_width)
tmp_image = np.zeros((actual_h, actual_w, 3), np.uint8)
tmp_image[:, :, 0] = 255
tmp_image[:, :, 1] = 255
tmp_image[:, :, 2] = 255
# draw vertical
for i in range(1, track_num):
x = int(scale * track_width * i)
cv2.line(tmp_image, (x, 0), (x, actual_h), blue, 1)
# draw horizonal
height_list = [int(scale * hog_height), int(scale * (track_height - hog_height)), int(scale * back_height),
int(scale * (track_height - back_height))]
color_list = [red, red, blue, blue]
for i in range(2):
cv2.line(tmp_image, (0, height_list[i]), (actual_w, height_list[i]), color_list[i], 1)
pass
circle_height_list = [int(scale * circle_height), int(scale * (track_height - circle_height))]
for i in range(track_num):
x = int(scale * track_width * (i + 0.5))
for height_j in circle_height_list:
drawTrackCircles(tmp_image, x, height_j, scale, blue, red, white)
for point in rock_point_list:
cv2.circle(tmp_image, (int(point[0] * actual_w), actual_h - int(point[1] * actual_h)), 3, red, 2, cv2.LINE_AA)
image_container.append(tmp_image)
def drawTrackCircles(image, x, y, scale, blue, red, white):
circle_r_list = [1.83, 1.22, 0.61, 0.15]
circle_color_list = [blue, white, red, white]
for i in range(4):
cv2.circle(image, (int(x), int(y)), int(circle_r_list[i] * scale), circle_color_list[i], -1, cv2.LINE_AA)
pass
## test sample
# H=np.mat(np.zeros((3,3))) #project matrix
# #polygon point order are conter clock wise
# track_polygon_list=[]
# projected_polygon_list=[]
# #4 point to be align
# target_quad=[]
# center=[0.,0.]
# generateTracks(center,4.75,44.5,0,3)
# filename="curling_4.jpg"
# image=cv2.imread(filename)
# (tempfilename,extension) = os.path.splitext(filename)
# (h,w,_)=image.shape
# scale=min(512/w,512/h)
# w1=int(w*scale)
# h1=int(h*scale)
# image=cv2.resize(image,(w1,h1))
# origin_img=image
# tmp_image=[]
# drawTrack(tmp_image)
# fig = plt.figure()
# fig.canvas.mpl_connect('button_press_event', on_press)
# fig.canvas.mpl_connect('key_press_event', on_key_press)
# plt.imshow(tmp_image[0][:,:,::-1])
# plt.ioff()
# plt.show()
``` |
{
"source": "565353780/pytorch-ssd",
"score": 3
} |
#### File: 565353780/pytorch-ssd/detect.py
```python
from torchvision import transforms
from utils import *
from PIL import Image, ImageDraw, ImageFont
import time
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load model checkpoint
checkpoint = 'checkpoint_ssd300.pth.tar'
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
model = checkpoint['model']
model = model.to(device)
model.eval()
# Transforms
resize = transforms.Resize((300, 300))
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def detect(original_image, min_score, max_overlap, top_k, suppress=None):
"""
Detect objects in an image with a trained SSD300, and visualize the results.
:param original_image: image, a PIL Image
:param min_score: minimum threshold for a detected box to be considered a match for a certain class
:param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via Non-Maximum Suppression (NMS)
:param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'
:param suppress: classes that you know for sure cannot be in the image or you do not want in the image, a list
:return: annotated image, a PIL Image
"""
# Transform
image = normalize(to_tensor(resize(original_image)))
# Move to default device
image = image.to(device)
# Forward prop.
predicted_locs, predicted_scores = model(image.unsqueeze(0))
# Detect objects in SSD output
det_boxes, det_labels, det_scores = model.detect_objects(predicted_locs, predicted_scores, min_score=min_score,
max_overlap=max_overlap, top_k=top_k)
# Move detections to the CPU
det_boxes = det_boxes[0].to('cpu')
# Transform to original image dimensions
original_dims = torch.FloatTensor(
[original_image.width, original_image.height, original_image.width, original_image.height]).unsqueeze(0)
det_boxes = det_boxes * original_dims
# Decode class integer labels
det_labels = [rev_label_map[l] for l in det_labels[0].to('cpu').tolist()]
# If no objects found, the detected labels will be set to ['0.'], i.e. ['background'] in SSD300.detect_objects() in model.py
if det_labels == ['background']:
# Just return original image
return original_image
# Annotate
annotated_image = original_image
draw = ImageDraw.Draw(annotated_image)
# font = ImageFont.truetype("./calibril.ttf", 15)
font = ImageFont.load_default()
# Suppress specific classes, if needed
for i in range(det_boxes.size(0)):
if suppress is not None:
if det_labels[i] in suppress:
continue
# Boxes
box_location = det_boxes[i].tolist()
draw.rectangle(xy=box_location, outline=label_color_map[det_labels[i]])
draw.rectangle(xy=[l + 1. for l in box_location], outline=label_color_map[
det_labels[i]]) # a second rectangle at an offset of 1 pixel to increase line thickness
# draw.rectangle(xy=[l + 2. for l in box_location], outline=label_color_map[
# det_labels[i]]) # a third rectangle at an offset of 1 pixel to increase line thickness
# draw.rectangle(xy=[l + 3. for l in box_location], outline=label_color_map[
# det_labels[i]]) # a fourth rectangle at an offset of 1 pixel to increase line thickness
# Text
text_size = font.getsize(det_labels[i].upper())
text_location = [box_location[0] + 2., box_location[1] - text_size[1]]
textbox_location = [box_location[0], box_location[1] - text_size[1], box_location[0] + text_size[0] + 4.,
box_location[1]]
draw.rectangle(xy=textbox_location, fill=label_color_map[det_labels[i]])
draw.text(xy=text_location, text=det_labels[i].upper(), fill='white',
font=font)
del draw
return annotated_image
if __name__ == '__main__':
img_path_list = os.listdir("sample_images")
try:
os.makedirs('result/')
except:
print('Destination dir exists')
pass
time_list = []
for i in range(10):
for img_path in img_path_list:
original_image = Image.open('sample_images/' + img_path, mode='r')
original_image = original_image.convert('RGB')
sta = time.time()
ret = detect(original_image, min_score=0.6, max_overlap=0.3, top_k=20)#.show()
time_spend = time.time() - sta
print("Once detect cost time:", time_spend)
time_list.append(time_spend)
ret.save(os.path.join('result', img_path))
avg_time = 0
for time in time_list:
avg_time += time
avg_time /= len(time_list)
print("average time spend = ", avg_time)
``` |
{
"source": "565353780/pytorch-voxblox-plus-plus",
"score": 2
} |
#### File: catkin_boost_python_buildtool_test/test/test.py
```python
import unittest
import catkin_boost_python_test as cbpt
from nose import SkipTest
class TestTest(unittest.TestCase):
def test_test(self):
self.assertEqual(cbpt.test(), 42)
if __name__ == '__main__':
unittest.main()
```
#### File: mask_rcnn_ros/scripts/Detectron2Detector.py
```python
import cv2
from detectron2.config import get_cfg
from detectron2.engine.defaults import DefaultPredictor
class Detectron2Detector(object):
def __init__(self):
self.model_path = None
self.config_file = None
self.confidence_threshold = 0.5
self.cfg = None
self.predictor = None
return
def loadModel(self,
model_path,
config_file):
self.model_path = model_path
self.config_file = config_file
print("start loading model...", end="")
self.cfg = get_cfg()
self.cfg.merge_from_file(config_file)
self.cfg.MODEL.WEIGHTS = model_path
self.cfg.MODEL.RETINANET.SCORE_THRESH_TEST = self.confidence_threshold
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = self.confidence_threshold
self.cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = self.confidence_threshold
self.cfg.freeze()
self.predictor = DefaultPredictor(self.cfg)
print("SUCCESS!")
return True
def detect_image(self, image):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
"""
result = self.predictor(image)
pred_boxes = result["instances"].pred_boxes.tensor.cpu().numpy()
scores = result["instances"].scores.cpu().numpy()
pred_classes = result["instances"].pred_classes.cpu().numpy()
pred_masks = result["instances"].pred_masks.cpu().numpy()
result_dict = {}
result_dict["pred_boxes"] = pred_boxes
result_dict["scores"] = scores
result_dict["pred_classes"] = pred_classes
result_dict["pred_masks"] = pred_masks
return result_dict
if __name__ == "__main__":
model_path = "/home/chli/.ros/model_final_a3ec72.pkl"
config_file = "/home/chli/.ros/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml"
detectron2_detector = Detectron2Detector()
detectron2_detector.loadModel(model_path, config_file)
image_path = "/home/chli/vpp_ws/test.jpg"
image = cv2.imread(image_path)
result_dict = detectron2_detector.detect_image(image)
print(result_dict)
for box in result_dict["pred_boxes"].astype(int):
cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 3)
cv2.imshow("result", image)
cv2.waitKey(5000)
```
#### File: pointcloud_diff/scripts/PointCloudDiff.py
```python
import os
import numpy as np
import open3d as o3d
from time import time, sleep
import rospy
from sensor_msgs.point_cloud2 import read_points
from vpp_msgs.srv import GetMap
from tensorboard_logger_ros.msg import Scalar
from tensorboard_logger_ros.srv import ScalarToBool
point_move_dict = {
"01": [-9.2, 0.1, 0],
"02": [0.9, -1.6, 0],
"03": [-8.3, -0.5, 0],
"04": [7.35, -4.2, 0],
"05": [0, -0.4, 0],
"06": [-4.1, -3.32, 0]
}
DEBUG = False
class PointCloudDiff(object):
def __init__(self):
self.scene_pointcloud_folder_path = None
# dataset data
self.scene_pointcloud = None
self.scene_point_num = None
self.object_pointcloud_list = None
self.merge_object_pointcloud = None
self.valid_object_pointcloud_list = None
self.merge_valid_object_pointcloud_list = None
# recon data
self.object_pointcloud_save_path = None
self.object_last_create_time = None
self.object_last_modify_time = None
self.log_start_time = None
self.last_log_time = None
if not DEBUG:
sleep(10)
self.get_map_proxy = rospy.ServiceProxy("/gsm_node/get_map", GetMap)
self.tf_logger_proxy = rospy.ServiceProxy('/tensorboard_logger/log_scalar', ScalarToBool)
return
def loadScenePointCloud(self, scene_pointcloud_folder_path):
self.scene_pointcloud_folder_path = scene_pointcloud_folder_path
if self.scene_pointcloud_folder_path[-1] != "/":
self.scene_pointcloud_folder_path += "/"
scene_idx = self.scene_pointcloud_folder_path.split("/")[-2]
scene_point_move_list = point_move_dict[scene_idx]
if not os.path.exists(self.scene_pointcloud_folder_path):
print("[ERROR][PointCloudDiff::loadScenePointCloud]")
print("\t scene_pointcloud_folder not exist!")
return False
scene_pointcloud_folder_filename_list = \
os.listdir(self.scene_pointcloud_folder_path)
scene_pointcloud_filename = None
for scene_pointcloud_folder_filename in scene_pointcloud_folder_filename_list:
if ".ply" not in scene_pointcloud_folder_filename:
continue
scene_pointcloud_filename = scene_pointcloud_folder_filename
break
scene_pointcloud_file_path = \
self.scene_pointcloud_folder_path + scene_pointcloud_filename
pointcloud_file_path_split_list = scene_pointcloud_file_path.split(".")
if pointcloud_file_path_split_list[-1] == "obj":
mesh = o3d.io.read_triangle_mesh(scene_pointcloud_file_path)
self.scene_pointcloud = o3d.geometry.PointCloud()
scene_pointcloud_points = np.array(mesh.vertices)
scene_pointcloud_points[:, :] += scene_point_move_list
self.scene_pointcloud.points = \
o3d.utility.Vector3dVector(scene_pointcloud_points)
self.scene_point_num = scene_pointcloud_points.shape[0]
return True
self.scene_pointcloud = o3d.io.read_point_cloud(scene_pointcloud_file_path)
scene_pointcloud_points = np.array(self.scene_pointcloud.points)
scene_pointcloud_points[:, :] += scene_point_move_list
self.scene_pointcloud.points = \
o3d.utility.Vector3dVector(scene_pointcloud_points)
self.scene_point_num = scene_pointcloud_points.shape[0]
self.scene_pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=0.1, max_nn=30))
return True
def updateObjectPointCloudSavePath(self):
object_pointcloud_save_basepath = os.path.expanduser('~') + \
"/.ros/RUN_LOG/PointCloud2ToObjectVecConverterServer/"
pointcloud_save_folder_list = os.listdir(object_pointcloud_save_basepath)
if len(pointcloud_save_folder_list) == 0:
print("[ERROR][PointCloudDiff::getObjectPointCloudSavePath]")
print("\t pointcloud_save_folder not exist!")
return False
max_idx_list = None
max_idx_folder_name = None
for pointcloud_save_folder in pointcloud_save_folder_list:
date_split_list = pointcloud_save_folder.split("_")
if len(date_split_list) < 4:
continue
if "-" not in date_split_list[3]:
continue
time_split_list = date_split_list[3].split("-")
if(len(time_split_list) != 3):
continue
current_idx_list = [
int(date_split_list[0]),
int(date_split_list[1]),
int(date_split_list[2]),
int(time_split_list[0]),
int(time_split_list[1]),
int(time_split_list[2])
]
if max_idx_list is None:
max_idx_list = current_idx_list
max_idx_folder_name = pointcloud_save_folder
continue
for i in range(len(max_idx_list)):
if current_idx_list[i] > max_idx_list[i]:
max_idx_list = current_idx_list
max_idx_folder_name = pointcloud_save_folder
break
if current_idx_list[i] < max_idx_list[i]:
break
if max_idx_folder_name is None:
print("[ERROR][PointCloudDiff::getObjectPointCloudSavePath]")
print("\t find latest folder failed!")
return False
self.object_pointcloud_save_path = object_pointcloud_save_basepath + \
max_idx_folder_name + "/"
return True
def getMergePointCloud(self, pointcloud_list):
merge_pointcloud = o3d.geometry.PointCloud()
points_list = []
colors_list = []
for pointcloud in pointcloud_list:
points_list.append(np.array(pointcloud.points))
colors_list.append(np.array(pointcloud.colors))
merge_points = np.concatenate(points_list, axis=0)
merge_colors = np.concatenate(colors_list, axis=0)
merge_pointcloud.points = o3d.utility.Vector3dVector(merge_points)
merge_pointcloud.colors = o3d.utility.Vector3dVector(merge_colors)
return merge_pointcloud
def loadObjectPointCloud(self):
self.object_pointcloud_list = []
scene_idx = self.scene_pointcloud_folder_path.split("/")[-2]
object_point_move_list = point_move_dict[scene_idx]
object_pointcloud_folder_path = self.scene_pointcloud_folder_path + \
"region_objects/"
object_pointcloud_filename_list = os.listdir(object_pointcloud_folder_path)
for object_pointcloud_filename in object_pointcloud_filename_list:
object_pointcloud_filepath = object_pointcloud_folder_path + \
object_pointcloud_filename
object_pointcloud = o3d.io.read_point_cloud(object_pointcloud_filepath)
object_points = np.array(object_pointcloud.points)
object_points[:, :] += object_point_move_list
object_pointcloud.points = \
o3d.utility.Vector3dVector(object_points)
self.object_pointcloud_list.append(object_pointcloud)
self.merge_object_pointcloud = \
self.getMergePointCloud(self.object_pointcloud_list)
self.merge_object_pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=0.1, max_nn=30))
return True
def loadValidObjectPointCloud(self):
self.valid_object_pointcloud_list = []
scene_idx = self.scene_pointcloud_folder_path.split("/")[-2]
object_point_move_list = point_move_dict[scene_idx]
object_pointcloud_folder_path = \
self.scene_pointcloud_folder_path + "valid_region_objects/"
object_pointcloud_filename_list = \
os.listdir(object_pointcloud_folder_path)
for object_pointcloud_filename in object_pointcloud_filename_list:
object_pointcloud_filepath = object_pointcloud_folder_path + \
object_pointcloud_filename
object_pointcloud = o3d.io.read_point_cloud(object_pointcloud_filepath)
object_points = np.array(object_pointcloud.points)
object_points[:, :] += object_point_move_list
object_pointcloud.points = \
o3d.utility.Vector3dVector(object_points)
self.valid_object_pointcloud_list.append(object_pointcloud)
self.merge_valid_object_pointcloud = \
self.getMergePointCloud(self.valid_object_pointcloud_list)
self.merge_valid_object_pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=0.1, max_nn=30))
return True
def loadAllPointCloud(self, scene_pointcloud_folder_path):
if not self.updateObjectPointCloudSavePath():
print("[ERROR][PointCloudDiff::loadAllPointCloud]")
print("\t updateObjectPointCloudSavePath failed!")
return False
if not self.loadScenePointCloud(scene_pointcloud_folder_path):
print("[ERROR][PointCloudDiff::loadAllPointCloud]")
print("\t loadScenePointCloud failed!")
return False
if not self.loadObjectPointCloud():
print("[ERROR][PointCloudDiff::loadAllPointCloud]")
print("\t loadObjectPointCloud failed!")
return False
if not self.loadValidObjectPointCloud():
print("[ERROR][PointCloudDiff::loadAllPointCloud]")
print("\t loadValidObjectPointCloud failed!")
return False
return True
def logScalar(self, name, step, value):
scalar = Scalar()
scalar.name = str(name)
scalar.step = int(step)
scalar.value = float(value)
log_success = self.tf_logger_proxy(scalar)
return log_success
def loadPointCloud2Msg(self, pointcloud2_msg):
point_list = \
read_points(pointcloud2_msg,
skip_nans=True,
field_names=("x", "y", "z"))
point_array = []
for point in point_list:
point_array.append(point[0:3])
pointcloud = o3d.geometry.PointCloud()
pointcloud.points = o3d.utility.Vector3dVector(np.array(point_array))
return pointcloud
def logSceneData(self):
pointcloud2_msg = self.get_map_proxy()
current_pcd = self.loadPointCloud2Msg(pointcloud2_msg.map_cloud)
dist_to_scene = current_pcd.compute_point_cloud_distance(self.scene_pointcloud)
dist_to_scene = np.asarray(dist_to_scene)
avg_dist2_error = 0
for dist in dist_to_scene:
avg_dist2_error += dist * dist
avg_dist2_error /= dist_to_scene.shape[0]
dist_to_recon = \
self.scene_pointcloud.compute_point_cloud_distance(current_pcd)
dist_to_recon = np.asarray(dist_to_recon)
recon_point_num = len(np.where(dist_to_recon < 0.2)[0])
recon_percent = 1.0 * recon_point_num / self.scene_point_num
if not self.logScalar("PointCloudDiff/scene_error",
self.last_log_time - self.log_start_time,
avg_dist2_error):
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logScalar for point_distance_mean failed!")
return False
if not self.logScalar("PointCloudDiff/scene_completeness",
self.last_log_time - self.log_start_time,
recon_percent):
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logScalar for recon_percent failed!")
return False
return True
def logObjectData(self):
pointcloud_save_filename_list = \
os.listdir(self.object_pointcloud_save_path)
if len(pointcloud_save_filename_list) == 0:
return True
object_filename_list = []
for pointcloud_save_filename in pointcloud_save_filename_list:
if pointcloud_save_filename[:7] != "object_":
continue
object_filename_list.append(pointcloud_save_filename)
if len(object_filename_list) == 0:
return True
object_current_create_time = os.path.getctime(
self.object_pointcloud_save_path + "object_0.pcd")
object_current_modify_time = os.path.getmtime(
self.object_pointcloud_save_path + "object_0.pcd")
if object_current_create_time == self.object_last_create_time and \
object_current_modify_time == self.object_last_modify_time:
return True
self.object_last_create_time = object_current_create_time
self.object_last_modify_time = object_current_modify_time
recon_object_pointcloud_list = []
for object_filename in object_filename_list:
recon_object_pointcloud = o3d.io.read_point_cloud(
self.object_pointcloud_save_path + object_filename)
if np.array(recon_object_pointcloud.points).shape[0] == 0:
self.object_last_create_time = None
self.object_last_modify_time = None
print("[WARN][PointCloudDiff::logObjectData]")
print("\t object pointcloud files are updating, skip this logging!")
return True
recon_object_pointcloud_list.append(recon_object_pointcloud)
recon_merge_object_pointcloud = self.getMergePointCloud(
recon_object_pointcloud_list)
recon_merge_object_pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=0.1, max_nn=30))
if DEBUG:
o3d.visualization.draw_geometries([
self.merge_object_pointcloud, recon_merge_object_pointcloud])
exit()
dist_to_scene = \
recon_merge_object_pointcloud.compute_point_cloud_distance(
self.merge_object_pointcloud)
dist_to_scene = np.asarray(dist_to_scene)
avg_dist2_error = 0
for dist in dist_to_scene:
avg_dist2_error += dist * dist
avg_dist2_error /= dist_to_scene.shape[0]
dist_to_recon = \
self.merge_object_pointcloud.compute_point_cloud_distance(
recon_merge_object_pointcloud)
dist_to_recon = np.asarray(dist_to_recon)
recon_point_num = len(np.where(dist_to_recon < 0.2)[0])
recon_percent = 1.0 * recon_point_num / self.scene_point_num
if not self.logScalar("PointCloudDiff/object_error",
self.last_log_time - self.log_start_time,
avg_dist2_error):
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logScalar for point_distance_mean failed!")
return False
if not self.logScalar("PointCloudDiff/object_completeness",
self.last_log_time - self.log_start_time,
recon_percent):
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logScalar for recon_percent failed!")
return False
dist_to_scene = \
recon_merge_object_pointcloud.compute_point_cloud_distance(
self.merge_valid_object_pointcloud)
dist_to_scene = np.asarray(dist_to_scene)
avg_dist2_error = 0
for dist in dist_to_scene:
avg_dist2_error += dist * dist
avg_dist2_error /= dist_to_scene.shape[0]
dist_to_recon = \
self.merge_valid_object_pointcloud.compute_point_cloud_distance(
recon_merge_object_pointcloud)
dist_to_recon = np.asarray(dist_to_recon)
recon_point_num = len(np.where(dist_to_recon < 0.2)[0])
recon_percent = 1.0 * recon_point_num / self.scene_point_num
if not self.logScalar("PointCloudDiff/valid_object_error",
self.last_log_time - self.log_start_time,
avg_dist2_error):
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logScalar for point_distance_mean failed!")
return False
if not self.logScalar("PointCloudDiff/valid_object_completeness",
self.last_log_time - self.log_start_time,
recon_percent):
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logScalar for recon_percent failed!")
return False
return True
def startComparePointCloud(self):
self.log_start_time = time()
self.last_log_time = self.log_start_time
while True:
if not DEBUG:
sleep(10)
new_log_time = time()
if new_log_time == self.last_log_time:
return True
self.last_log_time = new_log_time
if not DEBUG:
if not self.logSceneData():
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logSceneData failed!")
break
if not self.logObjectData():
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logObjectData failed!")
break
return True
if __name__ == "__main__":
rospy.init_node("PointCloudDiff")
scene_pointcloud_folder_path = \
os.path.expanduser('~') + "/" + \
rospy.get_param("/scene_pointcloud_folder_path")
pointcloud_diff = PointCloudDiff()
pointcloud_diff.loadAllPointCloud(scene_pointcloud_folder_path)
pointcloud_diff.startComparePointCloud()
``` |
{
"source": "565353780/railway-catenary-detect",
"score": 2
} |
#### File: Python/LapNet/test.py
```python
import torch
import numpy as np
import time
import os
# import argparse
import cv2
from LapNet import LAPNet
from loss import DiscriminativeLoss
from create_dataset import createDataset
from logger import Logger
from torch.nn import DataParallel
from collections import OrderedDict
from torch.nn.parameter import Parameter
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class LapNet_Test:
def __init__(self):
# torch.cuda.set_device(args.gpu_idx)
torch.cuda.set_device(0)
# self.INPUT_CHANNELS = 3
# self.OUTPUT_CHANNELS = 2
# self.LEARNING_RATE = args.lr #1e-5
# self.BATCH_SIZE = args.batch_size #20
# self.NUM_EPOCHS = args.epoch #100
# self.LOG_INTERVAL = 20
# self.INS_CH = 32
# self.SIZE = [args.img_size[0], args.img_size[1]] #[224, 224]
# self.NUM_WORKERS = args.num_workers #20
self.INPUT_CHANNELS = 3
self.OUTPUT_CHANNELS = 2
self.LEARNING_RATE = 3e-4
self.BATCH_SIZE = 32
self.NUM_EPOCHS = 10000000000000
self.LOG_INTERVAL = 20
self.INS_CH = 32
self.SIZE = [1024,512]
self.NUM_WORKERS = 32
self.root_path = os.getcwd() + '/../src/Python/LapNet'
# dataset_path = args.dataset_path
self.model = LAPNet(input_ch=self.INPUT_CHANNELS, output_ch=self.OUTPUT_CHANNELS,internal_ch = 8).cuda()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.LEARNING_RATE, betas=(0.9, 0.99), amsgrad=True)
self.start_epoch = -1
# chkpt_filename = "LapNet_chkpt_default"+"_GPU"+str(args.gpu_idx)+".pth"
current_file_list = os.listdir(self.root_path + '/trained_model')
current_epoch_num = -1
for file_name in current_file_list:
print("filename:",file_name)
self.output(["filename:",file_name])
if 'LapNet_chkpt_better_epoch' in file_name:
temp_epoch_num = int(file_name.split('_')[3].split('h')[1])
if temp_epoch_num > current_epoch_num:
current_epoch_num = temp_epoch_num
# chkpt_filename = self.root_path + '/trained_model/'+"LapNet_chkpt_better_epoch" + str(current_epoch_num) + "_GPU" + str(args.gpu_idx) + ".pth"
chkpt_filename = self.root_path + '/trained_model/'+"LapNet_chkpt_better_epoch" + str(current_epoch_num) + "_GPU" + str(0) + ".pth"
if not os.path.exists(self.root_path + '/trained_model'):
os.mkdir(self.root_path + '/trained_model')
if os.path.isfile(chkpt_filename):
checkpoint = torch.load(chkpt_filename)
self.start_epoch = checkpoint['epoch']
print("Found Checkpoint file",chkpt_filename,".")
self.output(["Found Checkpoint file",chkpt_filename,"."])
print("The checkpoint was saved at epoch",checkpoint['epoch'],".")
self.output(["The checkpoint was saved at epoch",str(checkpoint['epoch']),"."])
print("Training stats is reset form epoch",self.start_epoch)
self.output(["Training stats is reset form epoch",str(self.start_epoch)])
# if(args.optimizer_reset != 1):
if(100 != 1):
self.optimizer.load_state_dict(checkpoint['optimizer'])
else:
print("Optimizer State Reset.")
self.output(["Optimizer State Reset."])
self.model.load_state_dict(checkpoint['net'])
self.load_state_dict(self.model, self.state_dict(self.model))
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20,30,400,1000,6000,7000,8000], gamma=0.9)
def output(self, string):
with open(self.root_path + '/temp_output.txt', 'a+') as f:
for str_ in string:
f.write(str_)
f.write('\n')
return
def state_dict(self, model, destination=None, prefix='', keep_vars=False):
own_state = model.module if isinstance(model, torch.nn.DataParallel) \
else model
if destination is None:
destination = OrderedDict()
for name, param in own_state._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.data
for name, buf in own_state._buffers.items():
if buf is not None:
destination[prefix + name] = buf
for name, module in own_state._modules.items():
if module is not None:
self.state_dict(module, destination, prefix + name + '.', keep_vars=keep_vars)
return destination
def load_state_dict(self, model, state_dict, strict=True):
own_state = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) \
else model.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
except Exception:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
def test(self):
self.model.eval()
img_in_path = os.getcwd() + "/../Server_DataBase/message_set/lapnet_eval.jpg"
img_out_path = os.getcwd() + "/../Server_DataBase/message_set/lapnet_eval_out.jpg"
img_send_finished = os.getcwd() + "/../Server_DataBase/message_set/lapnet_eval_finish.txt"
test_stopped = os.getcwd() + "/../Server_DataBase/message_set/lapnet_test_stopped.txt"
while True:
if os.path.exists(test_stopped):
os.remove(test_stopped)
return
if not os.path.exists(img_send_finished):
continue
os.remove(img_send_finished)
train_dataset = createDataset(img_in_path, size=self.SIZE)
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=24, pin_memory=True,
shuffle=False, num_workers=0)
img = list(enumerate(train_dataloader))[0][1]
img_tensor = torch.tensor(img).cuda()
# Predictions
sem_pred = self.model(img_tensor)
# sem_pred=torch.floor(sem_pred)
seg_map = torch.squeeze(sem_pred, 0).cpu().detach().numpy()
seg_show = seg_map[1]
# (h,w)=seg_show.shape
# result_img = np.zeros((h, w))
# for i in range(result_img.shape[0]):
# for j in range(result_img.shape[1]):
# if seg_show[i][j] > -2:
# result_img[i][j] = 255
_, seg_show2 = cv2.threshold(seg_show + 1, 0, 0, cv2.THRESH_TOZERO)
seg_show2 = cv2.normalize(seg_show2, seg_show2, 0, 1, cv2.NORM_MINMAX)
seg_show2 = cv2.convertScaleAbs(seg_show2, seg_show2, 255)
result_img = cv2.applyColorMap(seg_show2, cv2.COLORMAP_MAGMA)
cv2.imwrite(img_out_path, result_img)
os.remove(img_in_path)
def Main_LapNet_Test():
# logger = Logger('./logslite'+str(args.gpu_idx))
# parser = argparse.ArgumentParser(description="Train model")
# parser.add_argument('--dataset-path', default='Dataset/train_dataset')
# parser.add_argument('--lr', type=float, default=3e-4, help='learning rate')
# parser.add_argument('--batch-size', type=int, default=32, help='batch size')
# parser.add_argument('--img-size', type=int, nargs='+', default=[1024,512], help='image resolution: [width height]')
# parser.add_argument('--epoch', type=int, default=10000000000000)
# parser.add_argument('--gpu-idx',type = int,default= 0, help='using gpu(idx)')
# parser.add_argument('--optimizer-reset', type=int, default=100)
# parser.add_argument('--num-workers', type=int, default=1)
# args = parser.parse_args()
# if os.getcwd()[-6:] != "LapNet":
# os.chdir(os.getcwd() + '/../src/Python/LapNet')
with open(os.getcwd() + '/../src/Python/LapNet/temp_train_mode.txt', 'w') as f:
f.write('True')
print('in ok')
lapnet_test = LapNet_Test()
print('init ok')
lapnet_test.test()
print('test ok')
return
if __name__ == "__main__":
Main_LapNet_Test()
``` |
{
"source": "565353780/robot-manage-ros",
"score": 2
} |
#### File: robot_move_state_manager/scripts/RobotMoveStateManager.py
```python
from math import cos, sin, sqrt
import numpy as np
from time import time, sleep
import rospy
from tf import transformations
from gazebo_msgs.srv import GetModelState
from tensorboard_logger_ros.msg import Scalar
from tensorboard_logger_ros.srv import ScalarToBool
class RobotMoveStateManager(object):
def __init__(self):
self.robot_name = None
self.robot_num = None
self.robot_move_dist_list = None
self.robot_wait_time_list = None
sleep(10)
self.get_model_state_proxy = \
rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
self.tf_logger_proxy = \
rospy.ServiceProxy('/tensorboard_logger/log_scalar', ScalarToBool)
return
def loadRobot(self, robot_name, robot_num):
self.robot_name = robot_name
self.robot_num = robot_num
self.robot_move_dist_list = [0 for _ in range(self.robot_num)]
self.robot_wait_time_list = [0 for _ in range(self.robot_num)]
return True
def logScalar(self, name, step, value):
scalar = Scalar()
scalar.name = str(name)
scalar.step = int(step)
scalar.value = float(value)
log_success = self.tf_logger_proxy(scalar)
return log_success
def getRobotState(self, robot_name):
robot_state = self.get_model_state_proxy(robot_name, "")
return robot_state
def getEulerAngleFromQuaternion(self, quaternion):
(roll, pitch, yaw) = transformations.euler_from_quaternion([
quaternion[0], quaternion[1], quaternion[2], quaternion[3]])
return np.array([roll, pitch, yaw])
def getQuaternionFromEulerAngle(self, euler_angle):
quaternion = transformations.quaternion_from_euler(
euler_angle[0], euler_angle[1], euler_angle[2])
return np.array([quaternion[0], quaternion[1], quaternion[2], quaternion[3]])
def getRotationMatrixFromEulerAngle(self, euler_angle):
R_x = np.array([
[1, 0, 0],
[0, cos(euler_angle[0]), -sin(euler_angle[0])],
[0, sin(euler_angle[0]), cos(euler_angle[0])]
])
R_y = np.array([
[cos(euler_angle[1]), 0, sin(euler_angle[1])],
[0, 1, 0],
[-sin(euler_angle[1]), 0, cos(euler_angle[1])]
])
R_z = np.array([
[cos(euler_angle[2]), -sin(euler_angle[2]), 0],
[sin(euler_angle[2]), cos(euler_angle[2]), 0],
[0, 0, 1]
])
rotation_matrix = np.dot(R_z, np.dot( R_y, R_x ))
return rotation_matrix
def getForwardDirection(self, robot_state):
x_axis_direction = np.array([1, 0, 0])
robot_orientation = robot_state.pose.orientation
robot_quaternion = [
robot_orientation.x,
robot_orientation.y,
robot_orientation.z,
robot_orientation.w]
euler_angle = self.getEulerAngleFromQuaternion(robot_quaternion)
rotation_matrix = self.getRotationMatrixFromEulerAngle(euler_angle)
forward_direction = np.dot(rotation_matrix, x_axis_direction)
forward_direction = np.array([forward_direction[0], forward_direction[1], 0])
forward_direction_norm = np.linalg.norm(forward_direction)
if forward_direction_norm == 0:
print("[ERROR][RobotMoveStateManager::getForwardDirection]")
print("\t forward_direction_norm is 0!")
return None
forward_direction /= forward_direction_norm
return forward_direction
def getAllRobotState(self):
robot_state_list = []
if self.robot_name is None:
print("[ERROR][RobotMoveStateManager::getAllRobotState]")
print("\t robot_name is None!")
return None
if self.robot_num is None:
print("[ERROR][RobotMoveStateManager::getAllRobotState]")
print("\t robot_num is None!")
return None
if self.robot_num < 1:
print("[ERROR][RobotMoveStateManager::getAllRobotState]")
print("\t robot_num not valid!")
return None
for robot_idx in range(self.robot_num):
current_robot_full_name = self.robot_name + str(robot_idx)
current_robot_state = self.getRobotState(current_robot_full_name)
if current_robot_state is None:
print("[ERROR][RobotMoveStateManager::getAllRobotState]")
print("\t getRobotState for " + current_robot_full_name + " failed!")
return None
robot_state_list.append(current_robot_state)
return robot_state_list
def getPosisionDiff2(self, state_1, state_2):
position_1 = state_1.pose.position
position_2 = state_2.pose.position
position_x_diff = position_1.x - position_2.x
position_y_diff = position_1.y - position_2.y
position_z_diff = position_1.z - position_2.z
position_diff2 = \
position_x_diff * position_x_diff + \
position_y_diff * position_y_diff + \
position_z_diff * position_z_diff
return position_diff2
def getOrientationDiff2(self, state_1, state_2):
orientation_1 = state_1.pose.orientation
orientation_2 = state_2.pose.orientation
orientation_x_diff = orientation_1.x - orientation_2.x
orientation_y_diff = orientation_1.y - orientation_2.y
orientation_z_diff = orientation_1.z - orientation_2.z
orientation_w_diff = orientation_1.w - orientation_2.w
orientation_diff2 = \
orientation_x_diff * orientation_x_diff + \
orientation_y_diff * orientation_y_diff + \
orientation_z_diff * orientation_z_diff + \
orientation_w_diff * orientation_w_diff
return orientation_diff2
def getPosisionDiff(self, state_1, state_2):
position_diff2 = self.getPosisionDiff2(state_1, state_2)
return sqrt(position_diff2)
def getOrientationDiff(self, state_1, state_2):
orientation_diff2 = self.getOrientationDiff2(state_1, state_2)
return sqrt(orientation_diff2)
def isSameState(self, state_1, state_2):
position_diff2_max = 0.0001
orientation_diff2_max = 0.0001
position_diff2 = self.getPosisionDiff2(state_1, state_2)
if position_diff2 > position_diff2_max:
return False
orientation_diff2 = self.getOrientationDiff2(state_1, state_2)
if orientation_diff2 > orientation_diff2_max:
return False
return True
def startListenRobotState(self):
robot_wait_count_min_time = 10
log_start_time = time()
last_log_time = 0
last_robot_state_list = []
robot_wait_count_list = [0 for _ in range(self.robot_num)]
while True:
last_start_time = time()
sleep(0.1)
new_robot_state_list = self.getAllRobotState()
if new_robot_state_list is None:
if len(last_robot_state_list) == 0:
continue
print("[ERROR][RobotMoveStateManager::startListenRobotState]")
print("\t getAllRobotState failed!")
break
if len(new_robot_state_list) != self.robot_num:
print("[ERROR][RobotMoveStateManager::startListenRobotState]")
print("\t new_robot_state_list.size and robot_num not matched!")
break
if len(last_robot_state_list) == 0:
last_robot_state_list = new_robot_state_list
continue
for i in range(self.robot_num):
if self.isSameState(
last_robot_state_list[i],
new_robot_state_list[i]):
robot_wait_count_list[i] += 1
else:
robot_wait_count_list[i] = 0
self.robot_move_dist_list[i] += self.getPosisionDiff(
last_robot_state_list[i],
new_robot_state_list[i])
last_robot_state_list = new_robot_state_list
for i in range(len(robot_wait_count_list)):
if robot_wait_count_list[i] < robot_wait_count_min_time:
continue
new_wait_time = time() - last_start_time
self.robot_wait_time_list[i] += new_wait_time
new_log_time = time()
if new_log_time == last_log_time:
continue
last_log_time = new_log_time
for i in range(self.robot_num):
if not self.logScalar(
"RobotMoveStateManager/robot" + str(i) + "_move_dist",
new_log_time - log_start_time,
self.robot_move_dist_list[i]):
print("[ERROR][RobotMoveStateManager::startListenRobotState]")
print("\t logScalar for robot_" + str(i) + "_move_dist failed!")
break
if not self.logScalar(
"RobotMoveStateManager/robot" + str(i) + "_wait_time",
new_log_time - log_start_time,
self.robot_wait_time_list[i]):
print("[ERROR][RobotMoveStateManager::startListenRobotState]")
print("\t logScalar for robot_" + str(i) + "_wait_time failed!")
break
robot_move_dist_array = np.array(self.robot_move_dist_list)
robot_move_dist_mean = np.mean(robot_move_dist_array)
robot_move_dist_std = np.std(robot_move_dist_array)
robot_move_dist_load_balance = 0
if robot_move_dist_mean > 0:
robot_move_dist_load_balance = \
robot_move_dist_std / robot_move_dist_mean
if not self.logScalar(
"RobotMoveStateManager/robot_move_dist_load_balance",
new_log_time - log_start_time,
robot_move_dist_load_balance):
print("[ERROR][RobotMoveStateManager::startListenRobotState]")
print("\t logScalar for robot_move_dist_load_balance failed!")
break
robot_wait_time_array = np.array(self.robot_wait_time_list)
robot_wait_time_mean = np.mean(robot_wait_time_array)
robot_wait_time_percentage = 0
if robot_wait_time_mean > 0:
robot_wait_time_percentage = \
robot_wait_time_mean / (new_log_time - log_start_time)
if not self.logScalar(
"RobotMoveStateManager/robot_wait_time_percentage",
new_log_time - log_start_time,
robot_wait_time_percentage):
print("[ERROR][RobotMoveStateManager::startListenRobotState]")
print("\t logScalar for robot_wait_time_percentage failed!")
break
return True
if __name__ == "__main__":
rospy.init_node("RobotMoveStateManager")
robot_name = rospy.get_param("/robot_name")
robot_num = int(rospy.get_param("/robot_num"))
robot_move_state_manager = RobotMoveStateManager()
robot_move_state_manager.loadRobot(robot_name, robot_num)
robot_move_state_manager.startListenRobotState()
``` |
{
"source": "565353780/stable-baselines-explore-environment",
"score": 2
} |
#### File: stable-baselines-explore-environment/scripts/RobotScanning2DEnvironment.py
```python
from PIL import Image, ImageTk
import numpy as np
import matplotlib.pyplot as plt
from math import sin, cos, pi, ceil
from cv2 import namedWindow, imshow, WINDOW_AUTOSIZE, waitKey
import random
from random import randint
import gym
from gym import spaces
import os
from stable_baselines.common.env_checker import check_env
from stable_baselines.common.policies import MlpPolicy, CnnPolicy, CnnLstmPolicy, CnnLnLstmPolicy
from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv, VecNormalize, VecFrameStack
from stable_baselines.common import make_vec_env
from stable_baselines import PPO2
from stable_baselines import results_plotter
from stable_baselines.bench import Monitor
from stable_baselines.results_plotter import load_results, ts2xy
from stable_baselines.common.noise import AdaptiveParamNoiseSpec
from stable_baselines.common.callbacks import BaseCallback
from RobotScanning2DRewardStrategy import RobotScanning2DRewardStrategy as RewardStrategy
from RobotScanning2DDoneStrategy import RobotScanning2DDoneStrategy as DoneStrategy
class SaveOnBestTrainingRewardCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param check_freq: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
"""
def __init__(self, check_freq: int, log_dir: str, verbose=1):
super(SaveOnBestTrainingRewardCallback, self).__init__(verbose)
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, 'best_model')
self.best_mean_reward = -np.inf
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
x, y = ts2xy(load_results(self.log_dir), 'timesteps')
if len(x) > 0:
# Mean training reward over the last 100 episodes
mean_reward = np.mean(y[-100:])
if self.verbose > 0:
print("Num timesteps: {}".format(self.num_timesteps))
print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(self.best_mean_reward, mean_reward))
# New best model, you could save the agent here
if mean_reward > self.best_mean_reward:
self.best_mean_reward = mean_reward
# Example for saving best model
if self.verbose > 0:
print("Saving new best model to {}".format(self.save_path))
self.model.save(self.save_path)
return True
# label of environment[i][j]:
# [0]:
# 0:can not go to
# 1:can go to
# [1]:
# 0:do not know the message
# 1:got the messages
class RobotScanning2DEnvironment(gym.Env):
"""Custom Environment that follows gym interface"""
metadata = {'render.modes': ['human']}
def __init__(self, map_file, seed=None, angle=60.0, distance=5, delta_angle=0.01, SELF_RENDER=True, TRAIN_MODE=False, init_max_move_num=100):
super(RobotScanning2DEnvironment, self).__init__()
# Define action and observation space
# They must be gym.spaces objects
# Example when using discrete actions:
# self.action_space = spaces.Discrete(8)
# Example for using image as input:
# self.observation_space = spaces.Box(low=0, high=255,
# shape=(128, 128, 3), dtype=np.uint8)
self.max_w = 200
self.max_h = 200
self.init_max_move_num = init_max_move_num
self.add_move_time = 1
self.add_max_move_num_per_episode_num = 50
self.max_hit_obstacle_time = -1
self.unknown_color = [0, 0, 0]
self.free_color = [128, 128, 128]
self.obstacle_color = [255, 255, 255]
self.pose_color = [255, 0, 0]
self.init_pose_fixed = False
self.SHOW_IMAGE = False
self.seed(seed)
self.observation_space = None
self.SELF_RENDER = SELF_RENDER
self.TRAIN_MODE = TRAIN_MODE
self.total_reward = 0
self.raw_map_file = map_file
self.observation = None
self.global_map = None
self.output_image = None
self.sight_angle = angle
self.sight_distance = distance
self.delta_ray_angle = delta_angle
self.init_pose = np.array([1.0, 1.0, 0.0], dtype=np.float64)
self.current_pose = self.init_pose.copy()
self.action_space = None
self.action_spaces = None
self.load_map()
self.init_action_space()
self.reward_strategy = RewardStrategy(self.global_map)
self.done_strategy = DoneStrategy(self.global_map)
self.path = [self.current_pose.copy()]
self.namedWindow = None
self.hit_obstacle_time = 0
self.total_move_num = 0
self.finish_episode = 0
self.test_idx = 0
# 0:free 1:unknown 2:obstacle 3:position
self.observation_output = np.zeros(self.observation_space.shape)
self.free_label = [255, 0, 0, 0]
self.unknown_label = [0, 255, 0, 0]
self.obstacle_label = [0, 0, 255, 0]
self.pose_label = [0, 0, 0, 255]
# self.reward_range = [-1000, 1000]
def init_action_space(self):
self.action_space = spaces.Discrete(8)
self.action_spaces = np.empty((8, 3), dtype=np.float64)
self.action_spaces[0] = [1.0, 0.0, 90.0]
self.action_spaces[1] = [-1.0, 0.0, -90.0]
self.action_spaces[2] = [0.0, -1.0, 180.0]
self.action_spaces[3] = [0.0, 1.0, 0.0]
self.action_spaces[4] = [1.0, -1.0, 135.0]
self.action_spaces[5] = [1.0, 1.0, 45.0]
self.action_spaces[6] = [-1.0, -1.0, -135.0]
self.action_spaces[7] = [-1.0, 1.0, -45.0]
# self.action_spaces[8] = [2.0, 0.0, 90.0]
# self.action_spaces[9] = [-2.0, 0.0, -90.0]
# self.action_spaces[10] = [0.0, -2.0, 180.0]
# self.action_spaces[11] = [0.0, 2.0, 0.0]
def get_num_actions(self):
return len(self.action_spaces)
# return self.action_space.n
def update_observation_output(self):
for i in range(self.observation_output.shape[0]):
for j in range(self.observation_output.shape[1]):
if all(self.observation[i][j] == self.free_color):
self.observation_output[i][j] = self.free_label
elif all(self.observation[i][j] == self.unknown_color):
self.observation_output[i][j] = self.unknown_label
elif all(self.observation[i][j] == self.obstacle_color):
self.observation_output[i][j] = self.obstacle_label
# elif all(self.observation[i][j] == self.pose_color):
# self.observation_output[i][j] = self.pose_label
self.observation_output[:][:][3] = 0
path_num = len(self.path)
for i in range(min(path_num, 255)):
[x, y] = self.path[path_num - i - 1][:2]
if self.observation_output[int(x)][int(y)][3] == 0:
self.observation_output[int(x)][int(y)][3] = 255 - i
def step(self, action):
if action not in range(len(self.action_spaces)):
# if action not in range(self.action_space.n):
print("Error action!")
return (None, None, None, dict())
else:
self.total_move_num += 1
target_pose = self.current_pose.copy()
target_pose[:2] += self.action_spaces[action][:2]
if not 0 <= int(target_pose[0]) < self.global_map.shape[0] or not 0 <= int(target_pose[1]) < self.global_map.shape[1]:
self.hit_obstacle_time = 0
self.total_reward -= 100.0
self.debug()
self.output(False)
self.total_reward = 0
self.total_move_num = 0
self.finish_episode += 1
self.update_observation_output()
# return (self.observation, -100, True, dict())
return (self.observation_output, -100, True, dict())
if self.global_map[int(target_pose[0])][int(target_pose[1])][0] == 0 and (self.hit_obstacle_time < self.max_hit_obstacle_time or self.max_hit_obstacle_time == -1):
self.hit_obstacle_time += 1
self.total_reward -= 10.0
self.debug()
done = False
if self.total_move_num >= self.max_move_num:
done = True
if done:
self.hit_obstacle_time = 0
self.output(True)
self.total_reward = 0
self.total_move_num = 0
self.finish_episode += 1
self.update_observation_output()
# return (self.observation, -10.0, done, dict())
return (self.observation_output, -10.0, done, dict())
target_pose[2] = self.action_spaces[action][2]
self.update_observation(target_pose)
self.current_pose = target_pose.copy()
reward = self.reward_strategy.compute_reward(self.current_pose, self.observation)
done = self.done_strategy.is_done(self.current_pose, self.observation)
last_path = self.path[len(self.path) - 1]
self.path.append(self.current_pose.copy())
self.observation[int(last_path[0])][int(last_path[1])] = self.free_color
self.observation[int(self.current_pose[0])][int(self.current_pose[1])] = self.pose_color
if self.total_move_num >= self.max_move_num:
done = True
self.total_reward += reward
self.debug()
if done:
self.hit_obstacle_time = 0
self.output(True)
self.total_reward = 0
self.total_move_num = 0
self.finish_episode += 1
self.update_observation_output()
# return (self.observation, reward, done, dict())
return (self.observation_output, reward, done, dict())
def debug(self):
if False:
print('total reward : ', self.total_reward)
print('total_move_num : ', self.total_move_num)
def output(self, done):
if self.TRAIN_MODE:
with open('reward.txt', 'a+') as f:
f.write(str(int(self.total_reward)))
f.write('\n')
if not done:
print('')
print("Total Reward : ", self.total_reward)
print("Total Step : ", self.total_move_num)
print("======================================")
def reset(self):
self.observation = np.zeros(self.global_map.shape, dtype=np.int32)
# self.update_observation(self.init_pose)
if self.init_pose_fixed:
test_pose = [[int(0.45 * self.max_h), int(0.1 * self.max_w), 0], [int(0.55 * self.max_h), int(0.1 * self.max_w), 0]]
random_pose = test_pose[self.test_idx]
self.test_idx = (self.test_idx + 1) % len(test_pose)
else:
rand_w_l = int(0.1 * self.max_w)
rand_w_r = int(0.9 * self.max_w)
rand_h_l = int(0.1 * self.max_h)
rand_h_r = int(0.9 * self.max_h)
random_pose = np.array([randint(rand_h_l, rand_h_r), randint(rand_w_l, rand_w_r), 0], dtype=np.float64)
while self.global_map[int(random_pose[0])][int(random_pose[1])][0] == 0:
random_pose[0] = randint(rand_h_l, rand_h_r)
random_pose[1] = randint(rand_w_l, rand_w_r)
self.init_pose = random_pose.copy()
self.current_pose = self.init_pose.copy()
self.path = []
self.path.append(self.current_pose.copy())
if self.finish_episode % self.add_max_move_num_per_episode_num == 0:
self.max_move_num = self.init_max_move_num + self.add_move_time * int(self.finish_episode / self.add_max_move_num_per_episode_num)
if self.max_move_num > self.global_map.shape[0] * self.global_map.shape[1]:
self.max_move_num = self.global_map.shape[0] * self.global_map.shape[1]
print('max move num : ', self.max_move_num)
self.update_observation_output()
# return self.observation
return self.observation_output
def seed(self, seed=None):
if seed is not None:
random.seed(seed)
def load_map(self):
img = Image.open(self.raw_map_file)
img = img.convert("RGB")
max_scale = self.max_w / img.size[0]
if self.max_h / img.size[1] < max_scale:
max_scale = self.max_h / img.size[1]
if max_scale < 1:
img = img.resize((int(img.size[0]*max_scale), int(img.size[1]*max_scale)), resample=Image.LANCZOS)
self.max_w = img.size[0]
self.max_h = img.size[1]
self.global_map = self.transform_between_image_coordinate_and_map_coordinate(np.array(img))
for i in range(self.global_map.shape[0]):
for j in range(self.global_map.shape[1]):
if self.global_map[i][j][0] < 200 or self.global_map[i][j][1] < 200 or self.global_map[i][j][2] < 200:
self.global_map[i][j] = [0, 0, 0]
else:
self.global_map[i][j] = [255, 255, 255]
self.add_boundary()
# self.observation_space = spaces.Box(low=0, high=255, shape=(img.size[0], img.size[1], 3), dtype=np.uint8)
self.observation_space = spaces.Box(low=0, high=255, shape=(img.size[0], img.size[1], 4), dtype=np.uint8)
self.max_move_num = self.global_map.shape[0] * self.global_map.shape[1]
# imshow('test', self.global_map)
# waitKey()
def add_boundary(self):
self.global_map[0] = 0
self.global_map[-1] = 0
self.global_map[:, 0] = 0
self.global_map[:, -1] = 0
def transform_between_image_coordinate_and_map_coordinate(self, input_array):
output_array = np.empty(input_array.shape, dtype=np.uint8)
height = input_array.shape[0]
width = input_array.shape[1]
for i in range(height):
for j in range(width):
output_array[i][j] = np.uint8(input_array[height - 1 - i][j])
return output_array
def add_new_end_point(self, end_points, center_position, ray_angle):
new_end_point_x = int(center_position[0] + self.sight_distance * sin(ray_angle * pi / 180.0))
new_end_point_y = int(center_position[1] + self.sight_distance * cos(ray_angle * pi / 180.0))
if end_points is None or len(end_points) == 0:
return np.array([[new_end_point_x, new_end_point_y]], dtype=np.int32)
else:
if new_end_point_x != end_points[-1, 0] or new_end_point_y != end_points[-1, 1]:
return np.append(end_points, [[new_end_point_x, new_end_point_y]], axis=0)
else:
return end_points
def compute_ray_end_points(self, target_pose):
ray_angle_right = target_pose[2] - self.sight_angle / 2.0
ray_angle_left = target_pose[2] + self.sight_angle / 2.0
# ray_num = ceil((ray_angle_left - ray_angle_right) / self.delta_ray_angle) + 1
# end_points = self.add_new_end_point(None, target_pose[:2], ray_angle_right)
end_points = None
#for i in range(1, ray_num - 1):
for ray_angle in np.arange(ray_angle_right, ray_angle_left, self.delta_ray_angle):
end_points = self.add_new_end_point(end_points, target_pose[:2], ray_angle)
end_points = self.add_new_end_point(end_points, target_pose[:2], ray_angle_left)
return end_points
def end_points_based_ray_cast(self, target_pose):
end_points = self.compute_ray_end_points(target_pose)
for i in range(len(end_points)):
ray = end_points[i] - target_pose[:2]
self.single_ray_cast(target_pose[:2], ray)
def single_ray_cast(self, start_point, ray):
long_axis_length = np.max(np.abs(ray))
moving_unit = ray / long_axis_length
for j in range(int(long_axis_length) + 1):
if self.global_map[int(start_point[0] + moving_unit[0] * j)][int(start_point[1] + moving_unit[1] * j)][0] == 0:
self.observation[int(start_point[0] + moving_unit[0] * j)][int(start_point[1] + moving_unit[1] * j)] = self.obstacle_color
break
else:
self.observation[int(start_point[0] + moving_unit[0] * j)][int(start_point[1] + moving_unit[1] * j)] = self.free_color
def uniform_angle_based_ray_cast(self, target_pose):
ray_angle_right = target_pose[2] - self.sight_angle / 2.0
ray_angle_left = target_pose[2] + self.sight_angle / 2.0
for ray_angle in np.arange(ray_angle_right, ray_angle_left, self.delta_ray_angle):
ray = [self.sight_distance * sin(ray_angle * pi / 180.0), self.sight_distance * cos(ray_angle * pi / 180.0)]
self.single_ray_cast(target_pose[:2], ray)
ray = [self.sight_distance * sin(ray_angle_left * pi / 180.0), self.sight_distance * cos(ray_angle_left * pi / 180.0)]
self.single_ray_cast(target_pose[:2], ray)
def update_observation(self, target_pose):
if not 0 <= target_pose[0] < self.global_map.shape[0] or not 0 <= target_pose[1] < self.global_map.shape[1]:
# print("Target pose out of range!")
return
self.end_points_based_ray_cast(target_pose)
# self.uniform_angle_based_ray_cast(target_pose)
self.current_pose = target_pose.copy()
def paint_color(self):
self.output_image = self.global_map.copy()
# self.output_image[np.where(self.observation == 1)] = 255
for i in range(self.output_image.shape[0]):
for j in range(self.output_image.shape[1]):
if all(self.observation[i][j] == self.free_color):
self.output_image[i][j] = [0, 0, 255]
for pose in self.path:
self.output_image[int(pose[0])][int(pose[1])] = [0, 255, 0]
self.output_image = self.transform_between_image_coordinate_and_map_coordinate(self.output_image)
def render(self, mode='human'):
self.paint_color()
if self.SELF_RENDER:
if not self.SHOW_IMAGE:
self.namedWindow = namedWindow('test_environment', WINDOW_AUTOSIZE)
self.SHOW_IMAGE = True
imshow('test_environment', self.output_image)
waitKey(1)
return self.output_image
# plt.imshow(self.output_image)
# plt.axis('off')
# plt.show()
def close (self):
pass
def test(self, position, forward_direction, show_image=False):
if self.global_map is None:
self.reset()
target_pose = [position[0], position[1], forward_direction]
self.update_observation(target_pose)
if show_image:
# self.show_output_image()
self.render()
if __name__ == "__main__":
game_name = "My2DEnv"
# game_name = "CartPole-v1"
# game_name = "Breakout-v0"
train_mode = True
policy = CnnPolicy
init_max_move_num = 654
log_dir = "./tmp/"
os.makedirs(log_dir, exist_ok=True)
start_episode = 0
total_time_step =2500000
num_cpu = 16
global_seeds = 0
def set_global_seeds(seed=0):
global_seeds = seed
def make_env(rank, seed=0, SELF_RENDER=False):
def _init():
env = RobotScanning2DEnvironment(map_file="./32x32.png", seed=seed+rank, angle=60.0, distance=5, delta_angle=0.01, SELF_RENDER=SELF_RENDER, TRAIN_MODE=train_mode, init_max_move_num=init_max_move_num)
return env
set_global_seeds(seed)
return _init
def make_framestack_env(rank, seed=0, SELF_RENDER=False):
def _init():
env = RobotScanning2DEnvironment(map_file="./32x32.png", seed=seed+rank, angle=60.0, distance=5, delta_angle=0.01, SELF_RENDER=SELF_RENDER, TRAIN_MODE=train_mode, init_max_move_num=init_max_move_num)
env = DummyVecEnv([lambda : env])
env = VecFrameStack(env, n_stack=4)
return env
set_global_seeds(seed)
return _init
env = None
if game_name == "My2DEnv":
if train_mode:
if policy == CnnLnLstmPolicy or policy == CnnLstmPolicy:
env = SubprocVecEnv([make_env(10, i) for i in range(num_cpu)])
elif policy == CnnPolicy:
# env = DummyVecEnv([lambda : RobotScanning2DEnvironment(map_file="./32x32.png", seed=10, angle=60.0, distance=5, delta_angle=0.01, SELF_RENDER=False, TRAIN_MODE=train_mode)])
env = SubprocVecEnv([make_env(10, i) for i in range(num_cpu)])
# env = VecFrameStack(env, n_stack=4)
else:
if policy == CnnLnLstmPolicy or policy == CnnLstmPolicy:
env = SubprocVecEnv([make_env(10, i) for i in range(num_cpu)])
elif policy == CnnPolicy:
env = RobotScanning2DEnvironment(map_file="./32x32.png", seed=10, angle=60.0, distance=5, delta_angle=0.01, SELF_RENDER=True, TRAIN_MODE=train_mode, init_max_move_num=init_max_move_num)
# env = Monitor(env, log_dir, info_keywords=('as', 'ac', 'rc'))
env = DummyVecEnv([lambda : env])
# env = VecFrameStack(env, n_stack=4)
else:
if train_mode:
env = make_vec_env(game_name, n_envs=4)
else:
env = make_vec_env(game_name, n_envs=1)
# check_env(env)
model = None
start_step_num = 0
if start_episode > 0:
model = PPO2.load("PPO2_" + game_name + "_" + str(start_episode), env)
else:
file_list = os.listdir(os.getcwd())
for file_name in file_list:
if "PPO2_" + game_name + "_" in file_name:
current_step_num = int(file_name.split(".")[0].split("_")[2])
if current_step_num > start_step_num:
start_step_num = current_step_num
if start_step_num > 0:
model = PPO2.load("PPO2_" + game_name + "_" + str(start_step_num), env)
else:
if game_name == "My2DEnv":
model = PPO2(policy, env, verbose=1)
else:
model = PPO2(MlpPolicy, env, verbose=1)
# callback = SaveOnBestTrainingRewardCallback(check_freq=1000, log_dir=log_dir)
if train_mode:
round = 0
model.learning_rate = 1e-4
while True:
# model.learn(total_timesteps=total_time_step, callback=callback)
model.learn(total_timesteps=total_time_step)
try:
os.remove("PPO2_" + game_name + "_" + str(start_step_num + round * total_time_step) + ".zip")
except:
pass
round += 1
model.save("PPO2_" + game_name + "_" + str(start_step_num + round * total_time_step))
del model
else:
# results_plotter.plot_results([log_dir], total_time_step, results_plotter.X_TIMESTEPS, "PPO2 My2DEnv")
# plt.show()
while True:
obs = env.reset()
if policy == CnnLnLstmPolicy or policy == CnnLstmPolicy:
done = [False, False]
while not all(done):
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
env.render('human')
done = dones
else:
done = False
while not done:
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
env.render()
done = dones
# env.reset()
# env.test(position=[70, 10], forward_direction=0, show_image=True)
# env.reset()
# env.test(position=[7, 7], forward_direction=0, show_image=True)
# env.test(position=[40, 7], forward_direction=30, show_image=True)
'''
3.4GHz 每个线程训练速度为 3w steps / h
目前为 16线程 训练速度为 48w steps / h
每个完整 episode 从训练初期 100 steps 到后期 1024 steps,即
训练初期 4800 episodes / h
训练后期 168 episodes / h
平均下来测试一次需训练 20w episodes, 需要约 200 h 即 8.3 days
'''
``` |
{
"source": "565353780/tensorflow2-din",
"score": 2
} |
#### File: tensorflow2-din/DIEN/afm.py
```python
import tensorflow as tf
import itertools
class AFM(tf.keras.layers.Layer):
def __init__(self, mode, att_vector=8, activation='relu', dropout=0.5, embed_reg=1e-6):
super(AFM, self).__init__()
self.mode = mode
if self.mode == 'att':
self.attention_W = tf.keras.layers.Dense(att_vector, activation=activation, use_bias=True)
self.attention_dense = tf.keras.layers.Dense(1, activation=None)
self.dropout = tf.keras.layers.Dropout(dropout)
self.dense = tf.keras.layers.Dense(1, activation=None)
def call(self, inputs):
row = []
col = []
for r, c in itertools.combinations(range(inputs.shape[1]), 2):
row.append(r)
col.append(c)
p = tf.gather(inputs, row, axis=1)
q = tf.gather(inputs, col, axis=1)
bi_interaction = p * q
if self.mode == "max":
x = tf.reduce_max(bi_interaction, axis=1)
elif self.mode == 'avg':
x = tf.reduce_mean(bi_interaction, axis=1)
else:
x = self.attention(bi_interaction)
outputs = tf.nn.sigmoid(self.dense(x))
#outputs = self.dense(x)
return outputs
def attention(self, bi_interaction):
a = self.attention_W(bi_interaction)
a = self.attention_dense(a)
a_score = tf.nn.softmax(a, 1)
outputs = tf.reduce_sum(bi_interaction * a_score, 1)
return outputs
``` |
{
"source": "565353780/voc_image_cut",
"score": 2
} |
#### File: 565353780/voc_image_cut/auto_cut_and_merge.py
```python
import os
from Method.image_format_switcher import switchImageFormat
from Method.label_cutter import LabelCutter
from Method.label_merger import LabelMerger
from Method.yolo_builder import YOLOBuilder
def demo():
# Param
source_folder_path = "/home/chli/chLi/Download/DeepLearning/Dataset/WaterDrop/20220419_cap/rgb_data/1_1/"
target_folder_path = "/home/chli/waterdrop_data/1_1/"
source_format = ".png"
target_format = ".png"
cut_by_label_list = ["container"]
cut_save_label_list = ["drop"]
merge_save_label_list = ["drop"]
merge_row_image_num = 4
merge_col_image_num = 4
is_row_merge_first = True
merge_image_num = 16
merge_image_time = 300 * 16
merge_image_size = [416 * 4, 416 * 4]
classes = ["container", "drop", "zbar"]
# Algorithm
target_image_folder_path = \
target_folder_path + target_format.split(".")[1] + "/"
if source_format != target_format:
switchImageFormat(source_folder_path,
source_format,
target_image_folder_path,
target_format)
else:
target_image_folder_path = source_folder_path
label_cutter = LabelCutter()
label_cutter.setCutInfo(target_image_folder_path,
target_folder_path + "cut/",
cut_by_label_list,
cut_save_label_list)
label_cutter.cutAllImage(target_format)
label_merger = LabelMerger()
label_merger.setMergeInfo(target_folder_path + "cut/",
target_folder_path + "merge/",
merge_save_label_list,
merge_row_image_num,
merge_col_image_num,
is_row_merge_first)
label_merger.mergeAllImage(merge_image_num,
merge_image_time,
merge_image_size,
target_format)
yolo_builder = YOLOBuilder()
yolo_builder.setVOCInfo(classes,
target_folder_path + "merge/",
target_folder_path + "yolo/")
yolo_builder.transLabel(target_format)
return True
def demo_multi():
# Param
source_folder_root = "/home/chli/chLi/Download/DeepLearning/Dataset/WaterDrop/20220419_cap/rgb_data/"
source_folder_name_list = os.listdir(source_folder_root)
target_folder_root = "/home/chli/waterdrop_data/"
source_format = ".png"
target_format = ".png"
cut_by_label_list = ["container"]
cut_save_label_list = ["drop"]
merge_save_label_list = ["drop"]
merge_row_image_num = 4
merge_col_image_num = 4
is_row_merge_first = True
merge_image_num = 16
merge_image_time = 300 * 16
merge_image_size = [416 * 4, 416 * 4]
classes = ["container", "drop", "zbar"]
# Algorithm
all_folder_exist = True
for source_folder_name in source_folder_name_list:
source_folder_path = source_folder_root + source_folder_name + "/"
if not os.path.exists(source_folder_path):
all_folder_exist = False
print("[ERROR][auto_cut_and_merge::demo_multi]")
print("\t folder [" + source_folder_name + "] not exist!")
if not all_folder_exist:
return False
for source_folder_name in source_folder_name_list:
source_folder_path = source_folder_root + source_folder_name + "/"
target_folder_path = target_folder_root + source_folder_name + "/"
print("[INFO][auto_cut_and_merge::demo_multi]")
print("\t start trans: " + source_folder_name + " ...")
target_image_folder_path = \
target_folder_path + target_format.split(".")[1] + "/"
if source_format != target_format:
switchImageFormat(source_folder_path,
source_format,
target_image_folder_path,
target_format)
else:
target_image_folder_path = source_folder_path
label_cutter = LabelCutter()
label_cutter.setCutInfo(target_image_folder_path,
target_folder_path + "cut/",
cut_by_label_list,
cut_save_label_list)
label_cutter.cutAllImage(target_format)
label_merger = LabelMerger()
label_merger.setMergeInfo(target_folder_path + "cut/",
target_folder_path + "merge/",
merge_save_label_list,
merge_row_image_num,
merge_col_image_num,
is_row_merge_first)
label_merger.mergeAllImage(merge_image_num,
merge_image_time,
merge_image_size,
target_format)
yolo_builder = YOLOBuilder()
yolo_builder.setVOCInfo(classes,
target_folder_path + "merge/",
target_folder_path + "yolo/")
yolo_builder.transLabel(target_format)
merge_train_txt_path = target_folder_root + "train.txt"
with open(merge_train_txt_path, "w") as f:
for source_folder_name in source_folder_name_list:
target_folder_path = target_folder_root + source_folder_name + "/"
with open(target_folder_path + "yolo/train.txt", "r") as fr:
for line in fr.readlines():
f.write(line)
return True
def demo_multi_merge_and_cut_merge():
# Param
source_folder_root = "/home/chli/chLi/Download/DeepLearning/Dataset/WaterDrop/20220419_cap/rgb_data/"
source_folder_name_list = os.listdir(source_folder_root)
target_folder_root = "/home/chli/waterdrop_data/"
source_format = ".png"
target_format = ".png"
first_merge_save_label_list = ["container", "zbar"]
first_merge_row_image_num = 4
first_merge_col_image_num = 4
first_is_row_merge_first = True
first_merge_image_num = 16
first_merge_image_time = 300
first_merge_image_size = [416 * 4, 416 * 4]
cut_by_label_list = ["container"]
cut_save_label_list = ["drop"]
merge_save_label_list = ["drop"]
merge_row_image_num = 4
merge_col_image_num = 4
is_row_merge_first = True
merge_image_num = 16
merge_image_time = 300 * 16
merge_image_size = [416 * 4, 416 * 4]
classes = ["container", "drop", "zbar"]
# Algorithm
all_folder_exist = True
for source_folder_name in source_folder_name_list:
source_folder_path = source_folder_root + source_folder_name + "/"
if not os.path.exists(source_folder_path):
all_folder_exist = False
print("[ERROR][auto_cut_and_merge::demo_multi]")
print("\t folder [" + source_folder_name + "] not exist!")
if not all_folder_exist:
return False
for source_folder_name in source_folder_name_list:
source_folder_path = source_folder_root + source_folder_name + "/"
target_folder_path = target_folder_root + source_folder_name + "/"
print("[INFO][auto_cut_and_merge::demo_multi]")
print("\t start trans: " + source_folder_name + " ...")
target_image_folder_path = \
target_folder_path + target_format.split(".")[1] + "/"
if source_format != target_format:
switchImageFormat(source_folder_path,
source_format,
target_image_folder_path,
target_format)
else:
target_image_folder_path = source_folder_path
first_label_merger = LabelMerger()
first_label_merger.setMergeInfo(target_image_folder_path,
target_folder_path + "first_merge/",
first_merge_save_label_list,
first_merge_row_image_num,
first_merge_col_image_num,
first_is_row_merge_first)
first_label_merger.mergeAllImage(first_merge_image_num,
first_merge_image_time,
first_merge_image_size,
target_format)
first_yolo_builder = YOLOBuilder()
first_yolo_builder.setVOCInfo(classes,
target_folder_path + "first_merge/",
target_folder_path + "first_yolo/")
first_yolo_builder.transLabel(target_format)
label_cutter = LabelCutter()
label_cutter.setCutInfo(target_image_folder_path,
target_folder_path + "cut/",
cut_by_label_list,
cut_save_label_list)
label_cutter.cutAllImage(target_format)
label_merger = LabelMerger()
label_merger.setMergeInfo(target_folder_path + "cut/",
target_folder_path + "merge/",
merge_save_label_list,
merge_row_image_num,
merge_col_image_num,
is_row_merge_first)
label_merger.mergeAllImage(merge_image_num,
merge_image_time,
merge_image_size,
target_format)
yolo_builder = YOLOBuilder()
yolo_builder.setVOCInfo(classes,
target_folder_path + "merge/",
target_folder_path + "yolo/")
yolo_builder.transLabel(target_format)
merge_train_txt_path = target_folder_root + "train.txt"
with open(merge_train_txt_path, "w") as f:
for source_folder_name in source_folder_name_list:
target_folder_path = target_folder_root + source_folder_name + "/"
with open(target_folder_path + "first_yolo/train.txt", "r") as fr:
for line in fr.readlines():
f.write(line)
with open(target_folder_path + "yolo/train.txt", "r") as fr:
for line in fr.readlines():
f.write(line)
return True
if __name__ == "__main__":
# demo()
# demo_multi()
demo_multi_merge_and_cut_merge()
```
#### File: voc_image_cut/Method/yolo_builder.py
```python
import os
import xml.etree.ElementTree as ET
from shutil import copyfile
from tqdm import tqdm
class YOLOBuilder(object):
def __init__(self):
self.classes = []
self.xml_folder_path = None
self.save_folder_path = None
return
def setVOCInfo(self, classes, xml_folder_path, save_folder_path):
self.classes = classes
self.xml_folder_path = xml_folder_path
self.save_folder_path = save_folder_path
return True
def convert(self, size, box):
dw = 1./size[0]
dh = 1./size[1]
x = (box[0] + box[1])/2.0
y = (box[2] + box[3])/2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x,y,w,h)
def convertAnnotation(self, xml_file_basename):
tree = None
xml_file_path = self.xml_folder_path + xml_file_basename + ".xml"
if not os.path.exists(xml_file_path):
with open(self.xml_folder_path + xml_file_basename + ".txt", 'w') as out_file:
pass
return True
with open(xml_file_path, "r") as in_file:
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
with open(self.xml_folder_path + xml_file_basename + ".txt", 'w') as out_file:
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in self.classes or int(difficult) == 1:
continue
cls_id = self.classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text),
float(xmlbox.find('xmax').text),
float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = self.convert((w,h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
return True
def transLabel(self, image_format):
if not os.path.exists(self.xml_folder_path):
print("[ERROR][YOLOBuilder::transLabel]")
print("\t xml_folder_path not exist!")
return False
if os.path.exists(self.save_folder_path):
save_folder_filename_list = os.listdir(self.save_folder_path)
if len(save_folder_filename_list) > 0:
print("[ERROR][YOLOBuilder::transLabel]")
print("\t save_folder_path already exist and not empty!")
return False
else:
os.makedirs(self.save_folder_path)
xml_folder_filename_list = os.listdir(self.xml_folder_path)
xml_file_basename_list = []
for xml_folder_filename in xml_folder_filename_list:
xml_folder_filename_split_list = xml_folder_filename.split(".")
if "." + xml_folder_filename_split_list[1] != image_format:
continue
xml_file_basename_list.append(xml_folder_filename_split_list[0])
# print("[INFO][YOLOBuilder::transLabel]")
# print("\t start convert annotations...")
with open(self.save_folder_path + "train.txt", "w") as list_file:
# for image_file_basename in tqdm(xml_file_basename_list):
for image_file_basename in xml_file_basename_list:
list_file.write(self.xml_folder_path + image_file_basename + image_format + "\n")
self.convertAnnotation(image_file_basename)
return True
def demo():
classes = ["container", "drop", "zbar"]
xml_folder_path = "/home/chli/yolo/test/1_output/merge/"
save_folder_path = "/home/chli/yolo/test/1_output/yolo/"
image_format = ".png"
yolo_builder = YOLOBuilder()
yolo_builder.setVOCInfo(classes, xml_folder_path, save_folder_path)
yolo_builder.transLabel(image_format)
return True
if __name__ == "__main__":
demo()
``` |
{
"source": "5665750/owllook",
"score": 2
} |
#### File: owllook/fetcher/cache.py
```python
import re
import aiohttp
import async_timeout
from bs4 import BeautifulSoup
from aiocache.serializers import PickleSerializer,JsonSerializer
from urllib.parse import urlparse, parse_qs, urljoin
from owllook.database.mongodb import MotorBase
from owllook.fetcher.decorators import cached
from owllook.fetcher.function import target_fetch, get_time, get_html_by_requests, get_random_user_agent
from owllook.fetcher.extract_novels import extract_pre_next_chapter
from owllook.config import RULES, LATEST_RULES, LOGGER
@cached(ttl=300, key_from_attr='url', serializer=PickleSerializer(), namespace="main")
async def cache_owllook_novels_content(url, netloc):
headers = {
'user-agent': await get_random_user_agent()
}
html = await target_fetch(headers=headers, url=url)
if html:
soup = BeautifulSoup(html, 'html5lib')
selector = RULES[netloc].content_selector
if selector.get('id', None):
content = soup.find_all(id=selector['id'])
elif selector.get('class', None):
content = soup.find_all(class_=selector['class'])
else:
content = soup.find_all(selector.get('tag'))
if content:
# 提取出真正的章节标题
title_reg = r'(第?\s*[一二两三四五六七八九十○零百千万亿0-91234567890]{1,6}\s*[章回卷节折篇幕集]\s*.*?)[_,-]'
title = soup.title.string
extract_title = re.findall(title_reg, title, re.I)
if extract_title:
title = extract_title[0]
else:
title = soup.select('h1')[0].get_text()
if not title:
title = soup.title.string
# if "_" in title:
# title = title.split('_')[0]
# elif "-" in title:
# title = title.split('-')[0]
next_chapter = extract_pre_next_chapter(chapter_url=url, html=str(soup))
content = [str(i) for i in content]
data = {
'content': str(''.join(content)),
'next_chapter': next_chapter,
'title': title
}
else:
data = None
return data
return None
# @cached(ttl=300, key_from_attr='url', serializer=PickleSerializer(), namespace="main")
async def cache_owllook_novels_chapter(url, netloc):
headers = {
'user-agent': await get_random_user_agent()
}
html = await target_fetch(headers=headers, url=url)
if html:
soup = BeautifulSoup(html, 'html5lib')
selector = RULES[netloc].chapter_selector
if selector.get('id', None):
content = soup.find_all(id=selector['id'])
elif selector.get('class', None):
content = soup.find_all(class_=selector['class'])
else:
content = soup.find_all(selector.get('tag'))
# 防止章节被display:none
return str(content).replace('style', '') if content else None
return None
@cached(ttl=10800, key_from_attr='search_ranking', serializer=JsonSerializer(), namespace="ranking")
async def cache_owllook_search_ranking():
motor_db = MotorBase().get_db()
keyword_cursor = motor_db.search_records.find(
{'count': {'$gte': 50}},
{'keyword': 1, 'count': 1, '_id': 0}
).sort('count', -1).limit(35)
result = []
index = 1
async for document in keyword_cursor:
result.append({'keyword': document['keyword'], 'count': document['count'], 'index': index})
index += 1
return result
@cached(ttl=3600, key_from_attr='search_ranking', serializer=JsonSerializer(), namespace="ranking")
async def cache_others_search_ranking(spider='qidian', novel_type='全部类别'):
motor_db = MotorBase().get_db()
item_data = await motor_db.novels_ranking.find_one({'spider': spider, 'type': novel_type}, {'data': 1, '_id': 0})
return item_data
async def get_the_latest_chapter(chapter_url, timeout=15):
try:
with async_timeout.timeout(timeout):
url = parse_qs(urlparse(chapter_url).query).get('url', '')
novels_name = parse_qs(urlparse(chapter_url).query).get('novels_name', '')
data = None
if url and novels_name:
url = url[0]
novels_name = novels_name[0]
netloc = urlparse(url).netloc
if netloc in LATEST_RULES.keys():
headers = {
'user-agent': await get_random_user_agent()
}
try:
html = await target_fetch(url=url, headers=headers, timeout=timeout)
if html is None:
html = get_html_by_requests(url=url, headers=headers, timeout=timeout)
except TypeError:
html = get_html_by_requests(url=url, headers=headers, timeout=timeout)
except Exception as e:
LOGGER.exception(e)
return None
try:
soup = BeautifulSoup(html, 'html5lib')
except Exception as e:
LOGGER.exception(e)
return None
latest_chapter_name, latest_chapter_url = None, None
if LATEST_RULES[netloc].plan:
meta_value = LATEST_RULES[netloc].meta_value
latest_chapter_name = soup.select(
'meta[property="{0}"]'.format(meta_value["latest_chapter_name"])) or soup.select(
'meta[name="{0}"]'.format(meta_value["latest_chapter_name"]))
latest_chapter_name = latest_chapter_name[0].get('content',
None) if latest_chapter_name else None
latest_chapter_url = soup.select(
'meta[property="{0}"]'.format(meta_value["latest_chapter_url"])) or soup.select(
'meta[name="{0}"]'.format(meta_value["latest_chapter_url"]))
latest_chapter_url = urljoin(chapter_url, latest_chapter_url[0].get('content',
None)) if latest_chapter_url else None
else:
selector = LATEST_RULES[netloc].selector
content_url = selector.get('content_url')
if selector.get('id', None):
latest_chapter_soup = soup.find_all(id=selector['id'])
elif selector.get('class', None):
latest_chapter_soup = soup.find_all(class_=selector['class'])
else:
latest_chapter_soup = soup.select(selector.get('tag'))
if latest_chapter_soup:
if content_url == '1':
# TODO
pass
elif content_url == '0':
# TODO
pass
else:
latest_chapter_url = content_url + latest_chapter_soup[0].get('href', None)
latest_chapter_name = latest_chapter_soup[0].get('title', None)
if latest_chapter_name and latest_chapter_url:
time_current = get_time()
# print(latest_chapter_url)
data = {
"latest_chapter_name": latest_chapter_name,
"latest_chapter_url": latest_chapter_url,
"owllook_chapter_url": chapter_url,
"owllook_content_url": "/owllook_content?url={latest_chapter_url}&name={name}&chapter_url={chapter_url}&novels_name={novels_name}".format(
latest_chapter_url=latest_chapter_url,
name=latest_chapter_name,
chapter_url=url,
novels_name=novels_name,
),
}
# 存储最新章节
motor_db = MotorBase().get_db()
await motor_db.latest_chapter.update_one(
{"novels_name": novels_name, 'owllook_chapter_url': chapter_url},
{'$set': {'data': data, "finished_at": time_current}}, upsert=True)
return data
except Exception as e:
LOGGER.exception(e)
return None
async def update_all_books(loop, timeout=15):
try:
motor_db = MotorBase().get_db()
# 获取所有书架链接游标
books_url_cursor = motor_db.user_message.find({}, {'books_url.book_url': 1, '_id': 0})
book_urls = []
already_urls = set()
async for document in books_url_cursor:
if document:
books_url = document['books_url']
for book_url in books_url:
chapter_url = book_url['book_url']
if chapter_url not in already_urls:
try:
await get_the_latest_chapter(chapter_url, timeout)
except Exception as e:
LOGGER.exception(e)
already_urls.add(chapter_url)
# 一组书架链接列表数据
# book_urls += [book_url['book_url'] for book_url in books_url]
# url_tasks = [get_the_latest_chapter(each_url, loop) for each_url in set(book_urls)]
# tasks = [asyncio.ensure_future(i) for i in url_tasks]
# try:
# await asyncio.gather(*tasks)
# except asyncio.TimeoutError as e:
# pass
except Exception as e:
LOGGER.exception(e)
return False
``` |
{
"source": "56aaaaa/Physics-informed-neural-networks",
"score": 2
} |
#### File: main/continuous_time_identification (Navier-Stokes)/NavierStokes.py
```python
import sys
sys.path.insert(0, '../../Utilities/')
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import scipy.optimize
import latex
from scipy.interpolate import griddata
import time
import meshio
from itertools import product, combinations
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from plotting import newfig, savefig
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
import tensorflow_docs as tfdocs
import tensorflow_docs.plots
import tensorflow_docs.modeling
import tensorflow_probability as tfp
np.random.seed(1234)
tf.random.set_seed(1234)
class PhysicsInformedNN:
# Initialize the class
def __init__(self, x, y, t, u, v, layers):
X = np.concatenate([x, y, t], 1)
Y = np.concatenate([u, v], 1)
self.lb = X.min(0)
self.ub = X.max(0)
self.X = X
self.x = X[:,0:1]
self.y = X[:,1:2]
self.t = X[:,2:3]
self.u = u
self.v = v
self.layers = layers
self.weights, self.biases = self.initialize_NN(layers)
self.dtype = "float32"
# Descriptive Keras model
#print("weights",self.weights)
# tf.keras.backend.set_floatx(self.dtype)
# self.model = tf.keras.Sequential()
# self.model.add(tf.keras.layers.InputLayer(input_shape=(layers[0],)))
# self.model.add(tf.keras.layers.Lambda(
# lambda X: 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0))
# # Initialize NN
# for width in layers[1:-1]:
# self.model.add(tf.keras.layers.Dense(
# width, activation=tf.nn.tanh,
# kernel_initializer="glorot_normal"))
# self.model.add(tf.keras.layers.Dense(
# layers[-1], activation=None,
# kernel_initializer="glorot_normal"))
# Initialize parameters
self.lambda_1 = tf.Variable([0.0], dtype=tf.float32)
self.lambda_2 = tf.Variable([0.0], dtype=tf.float32)
# tf placeholders and graph
self.x = tf.Variable(X[:,0:1],dtype=tf.float32)
self.y = tf.Variable(X[:,1:2],dtype=tf.float32)
self.t = tf.Variable(X[:,2:3],dtype=tf.float32)
self.u_pred, self.v_pred, self.p_pred, self.f_u_pred, self.f_v_pred = self.net_NS(self.x, self.y, self.t)
self.u = tf.Variable(u,dtype=tf.float32)
self.v = tf.Variable(v,dtype=tf.float32)
self.Y = Y
# self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,
# method = 'L-BFGS-B',
# options = {'maxiter': 50000,
# 'maxfun': 50000,
# 'maxcor': 50,
# 'maxls': 50,
# 'ftol' : 1.0 * np.finfo(float).eps})
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = self.xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.random.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1
H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0
for l in range(0,num_layers-2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
def net_NS(self, x, y, t):
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
tape.watch(y)
tape.watch(t)
Xtmp=tf.concat([x,y,t], 1)
psi_and_p = self.neural_net(Xtmp,self.weights,self.biases)
psi = psi_and_p[:,0:1]
p = psi_and_p[:,1:2]
u = tape.gradient(psi, y)
#print("u :",np.shape(u))
v = -tape.gradient(psi, x)
#print("v :",np.shape(v))
u_t = tape.gradient(u, t)
#print("u_t :",np.shape(u_t))
u_x = tape.gradient(u, x)
#print("u_x :",np.shape(u_x))
u_y = tape.gradient(u, y)
#print("u_y :",np.shape(u_y))
u_xx = tape.gradient(u_x, x)
#print("u_xx :",np.shape(u_xx))
u_yy = tape.gradient(u_y, y)
#print("u_yy :",np.shape(u_yy))
v_t = tape.gradient(v, t)
#print("v_t :",np.shape(v_t))
v_x = tape.gradient(v, x)
#print("v_x :",np.shape(v_x))
v_y = tape.gradient(v, y)
#print("v_y :",np.shape(v_y))
v_xx = tape.gradient(v_x, x)
#print("v_xx :",np.shape(v_xx))
v_yy = tape.gradient(v_y, y)
#print("v_yy :",np.shape(v_yy))
p_x = tape.gradient(p, x)
#print("p_x :",np.shape(p_x))
p_y = tape.gradient(p, y)
#print("p_y :",np.shape(p_y))
f_u = u_t + lambda_1*(u*u_x + v*u_y) + p_x - lambda_2*(u_xx + u_yy)
f_v = v_t + lambda_1*(u*v_x + v*v_y) + p_y - lambda_2*(v_xx + v_yy)
del tape
return u, v, p, f_u, f_v
def lambda12(self, x, y, t):
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
tape.watch(y)
tape.watch(t)
Xtmp=tf.concat([x,y,t], 1)
psi_and_p = self.neural_net(Xtmp,self.weights,self.biases)
psi = psi_and_p[:,0:1]
p = psi_and_p[:,1:2]
u = tape.gradient(psi, y)
#print("u :",np.shape(u))
v = -tape.gradient(psi, x)
#print("v :",np.shape(v))
u_t = tape.gradient(u, t)
#print("u_t :",np.shape(u_t))
u_x = tape.gradient(u, x)
#print("u_x :",np.shape(u_x))
u_y = tape.gradient(u, y)
#print("u_y :",np.shape(u_y))
u_xx = tape.gradient(u_x, x)
#print("u_xx :",np.shape(u_xx))
u_yy = tape.gradient(u_y, y)
#print("u_yy :",np.shape(u_yy))
v_t = tape.gradient(v, t)
#print("v_t :",np.shape(v_t))
v_x = tape.gradient(v, x)
#print("v_x :",np.shape(v_x))
v_y = tape.gradient(v, y)
#print("v_y :",np.shape(v_y))
v_xx = tape.gradient(v_x, x)
#print("v_xx :",np.shape(v_xx))
v_yy = tape.gradient(v_y, y)
#print("v_yy :",np.shape(v_yy))
p_x = tape.gradient(p, x)
#print("p_x :",np.shape(p_x))
p_y = tape.gradient(p, y)
#print("p_y :",np.shape(p_y))
fu1 = (u*u_x + v*u_y)
fu2 = (u_xx + u_yy)
fu3 = u_t + p_x
fv1 = (u*v_x + v*v_y)
fv2 = (v_xx + v_yy)
fv3 = v_t + p_y
# f_u = self.lambda_1*fu1 - lambda_2*fu2+fu3
# f_v = self.lambda_1*fu1 - lambda_2*fu2+fu3
del tape
return fu1,fu2,fu3,fv1,fv2,fv3
def callback(self, loss, lambda_1, lambda_2):
print('Loss: %.3e, l1: %.3f, l2: %.5f' % (loss, lambda_1, lambda_2))
def grad(self, X, Y):
with tf.GradientTape() as tape:
loss_value = self.lossval
grads = tape.gradient(loss_value, self.wrap_training_variables())
return loss_value, grads
def train(self, nIter):
start_time = time.time()
trainable=[self.x, self.y, self.t,self.u, self.v, self.lambda_1, self.lambda_2]
for it in range(nIter):
with tf.GradientTape(persistent=True) as tape:
[fu1,fu2,fu3,fv1,fv2,fv3] = self.lambda12(self.x,self.y,self.t)
loss = lambda:tf.reduce_sum(tf.square(self.u - self.u_pred)) + \
tf.reduce_sum(tf.square(self.v - self.v_pred)) + \
tf.reduce_sum(tf.square(fu1*self.lambda_1+fu2*self.lambda_2+fu3)) + \
tf.reduce_sum(tf.square(fv1*self.lambda_1+fv2*self.lambda_2+fv3))
lossval = tf.reduce_sum(tf.square(self.u - self.u_pred)) + \
tf.reduce_sum(tf.square(self.v - self.v_pred)) + \
tf.reduce_sum(tf.square(self.f_u_pred)) + \
tf.reduce_sum(tf.square(self.f_v_pred))
grads = tape.gradient(lossval,trainable)
optimizer_Adam = tf.keras.optimizers.Adam()
optimizer_Adam.apply_gradients(zip(grads, trainable))
optimizer_Adam.minimize(loss,trainable)
del tape
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_value = lossval
lambda_1_value = self.lambda_1
lambda_2_value = self.lambda_2
print('It: %d, Loss: %.3e, l1: %.3f, l2: %.5f, Time: %.2f' %
(it, loss_value, lambda_1_value, lambda_2_value, elapsed))
start_time = time.time()
# optimizer_results=tfp.optimizer.lbfgs_minimize(
# self.loss,
# initial_position=np.random.randn(dim),f_relative_tolerance=1.0 * np.finfo(float).eps,
# max_iterations=50000,tolerance=1e-08
# )
# print(optimizer_results)
# print("TTTTTTTT",[lambda_1_value,lambda_2_value,self.x_tf,self.y_tf, self.t_tf])
# scipy.optimize.minimize(fun=self.loss,x0=[self.sess.run(self.lambda_1),self.sess.run(self.lambda_2),self.sess.run(self.x_tf), self.sess.run(self.y_tf), self.sess.run(self.t_tf)],
# method='l-bfgs-b',options = {'maxiter': 50000,
# 'maxfun': 50000,
# 'maxcor': 50,
# 'maxls': 50,
# 'ftol' : 1.0 * np.finfo(float).eps})
def predict(self, x_star, y_star, t_star):
tf_dict = {self.x_tf: x_star, self.y_tf: y_star, self.t_tf: t_star}
u_star = self.sess.run(self.u_pred, tf_dict)
v_star = self.sess.run(self.v_pred, tf_dict)
p_star = self.sess.run(self.p_pred, tf_dict)
return u_star, v_star, p_star
def plot_solution(X_star, u_star, index):
lb = X_star.min(0)
ub = X_star.max(0)
nn = 200
x = np.linspace(lb[0], ub[0], nn)
y = np.linspace(lb[1], ub[1], nn)
X, Y = np.meshgrid(x,y)
U_star = griddata(X_star, u_star.flatten(), (X, Y), method='cubic')
plt.figure(index)
plt.pcolor(X,Y,U_star, cmap = 'jet')
plt.colorbar()
def axisEqual3D(ax):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:,1] - extents[:,0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/4
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
if __name__ == "__main__":
N_train = 500
layers = [3, 100, 100, 100, 100, 100, 100, 100, 100, 100, 2]
# Load Data
# ——————————————————————————————————————————————————————————————————
mesh = meshio.read(
filename='/Users/howardxu/work/neuro-net/Physics-informed-neural-networks/main/Data/rec000068.vtu',
# string, os.PathLike, or a buffer/open file
# optional if filename is a path; inferred from extension
)
x=mesh.points[:,0]
y=mesh.points[:,1]
t=np.arange(0,68*1.467451833203625E-004,1.467451833203625E-004);
u=mesh.point_data['flds1']#x
v=mesh.point_data['flds2']#y
p=mesh.point_data['flds3']#pressure
N = x.shape[0]
T = t.shape[0]
# ——————————————————————————————————————————————————————————————————
x=x.flatten()[:,None]
y=y.flatten()[:,None]
t=t.flatten()[:,None]
XX = np.tile(x, (1,T)) # N x T
YY = np.tile(y, (1,T)) # N x T
TT = np.tile(t, (N,1)) # N x T
UU = np.tile(u, (1,T))
VV = np.tile(v, (1,T))
PP = np.tile(p, (1,T))
x = XX.flatten()[:,None] # NT x 1
y = YY.flatten()[:,None] # NT x 1
t = TT.flatten()[:,None] # NT x 1
u = UU.flatten()[:,None] # NT x 1
v = VV.flatten()[:,None] # NT x 1
p = PP.flatten()[:,None] # NT
######################################################################
######################## Noiseles Data ###############################
######################################################################
# Training Data
idx = np.random.choice(N*T, N_train, replace=False)
x_train = x[idx,:]
y_train = y[idx,:]
t_train = t[idx,:]
u_train = u[idx,:]
v_train = v[idx,:]
# Training
model = PhysicsInformedNN(x_train, y_train, t_train, u_train, v_train, layers)
model.train(50)
t=np.arange(0,68*1.467451833203625E-004,1.467451833203625E-004);
TT = np.tile(t, (N,1))
# Test Data
snap = np.array([10])
x_star = XX[:,snap]
y_star = YY[:,snap]
t_star = TT[:,snap]
u_star = UU[:,snap]
v_star = VV[:,snap]
p_star = PP[:,snap]
# Prediction
u_pred, v_pred, p_pred = model.predict(x_star, y_star, t_star)
lambda_1_value = model.sess.run(model.lambda_1)
lambda_2_value = model.sess.run(model.lambda_2)
# Error
error_u = np.linalg.norm(u_star-u_pred,2)/np.linalg.norm(u_star,2)
error_v = np.linalg.norm(v_star-v_pred,2)/np.linalg.norm(v_star,2)
error_p = np.linalg.norm(p_star-p_pred,2)/np.linalg.norm(p_star,2)
error_lambda_1 = np.abs(lambda_1_value - 1.0)*100
error_lambda_2 = np.abs(lambda_2_value - 0.01)/0.01 * 100
print('Error u: %e' % (error_u))
print('Error v: %e' % (error_v))
print('Error p: %e' % (error_p))
print('Error l1: %.5f%%' % (error_lambda_1))
print('Error l2: %.5f%%' % (error_lambda_2))
# Plot Results
# plot_solution(X_star, u_pred, 1)
# plot_solution(X_star, v_pred, 2)
# plot_solution(X_star, p_pred, 3)
# plot_solution(X_star, p_star, 4)
# plot_solution(X_star, p_star - p_pred, 5)
#Import again in case of override
x1=mesh.points[:,0]
y1=mesh.points[:,1]
# Predict for plotting
X_star=np.concatenate([x1.flatten()[:,None],y1.flatten()[:,None]],axis=1)
lb = X_star.min(0)
ub = X_star.max(0)
nn = 200
x = np.linspace(lb[0], ub[0], nn)
y = np.linspace(lb[1], ub[1], nn)
X, Y = np.meshgrid(x,y)
UU_star = griddata(X_star, u_pred.flatten(), (X, Y), method='cubic')
VV_star = griddata(X_star, v_pred.flatten(), (X, Y), method='cubic')
PP_star = griddata(X_star, p_pred.flatten(), (X, Y), method='cubic')
P_exact = griddata(X_star, p_star.flatten(), (X, Y), method='cubic')
######################################################################
########################### Noisy Data ###############################
######################################################################
# noise = 0.01
# u_train = u_train + noise*np.std(u_train)*np.random.randn(u_train.shape[0], u_train.shape[1])
# v_train = v_train + noise*np.std(v_train)*np.random.randn(v_train.shape[0], v_train.shape[1])
# # Training
# model = PhysicsInformedNN(x_train, y_train, t_train, u_train, v_train, layers)
# model.train(1000)
# lambda_1_value_noisy = model.sess.run(model.lambda_1)
# lambda_2_value_noisy = model.sess.run(model.lambda_2)
# error_lambda_1_noisy = np.abs(lambda_1_value_noisy - 1.0)*100
# error_lambda_2_noisy = np.abs(lambda_2_value_noisy - 0.01)/0.01 * 100
# print('Error l1: %.5f%%' % (error_lambda_1_noisy))
# print('Error l2: %.5f%%' % (error_lambda_2_noisy))
######################################################################
############################# Plotting ###############################
######################################################################
# Load Data
data_vort = scipy.io.loadmat('../Data/cylinder_nektar_t0_vorticity.mat')
x_vort = data_vort['x']
y_vort = data_vort['y']
w_vort = data_vort['w']
modes = np.asscalar(data_vort['modes'])
nel = np.asscalar(data_vort['nel'])
xx_vort = np.reshape(x_vort, (modes+1,modes+1,nel), order = 'F')
yy_vort = np.reshape(y_vort, (modes+1,modes+1,nel), order = 'F')
ww_vort = np.reshape(w_vort, (modes+1,modes+1,nel), order = 'F')
box_lb = np.array([1.0, -2.0])
box_ub = np.array([8.0, 2.0])
fig, ax = newfig(1.0, 1.2)
ax.axis('off')
####### Row 0: Vorticity ##################
gs0 = gridspec.GridSpec(1, 2)
gs0.update(top=1-0.06, bottom=1-2/4 + 0.12, left=0.0, right=1.0, wspace=0)
ax = plt.subplot(gs0[:, :])
for i in range(0, nel):
h = ax.pcolormesh(xx_vort[:,:,i], yy_vort[:,:,i], ww_vort[:,:,i], cmap='seismic',shading='gouraud', vmin=-3, vmax=3)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.plot([box_lb[0],box_lb[0]],[box_lb[1],box_ub[1]],'k',linewidth = 1)
ax.plot([box_ub[0],box_ub[0]],[box_lb[1],box_ub[1]],'k',linewidth = 1)
ax.plot([box_lb[0],box_ub[0]],[box_lb[1],box_lb[1]],'k',linewidth = 1)
ax.plot([box_lb[0],box_ub[0]],[box_ub[1],box_ub[1]],'k',linewidth = 1)
ax.set_aspect('equal', 'box')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_title('Vorticity', fontsize = 10)
####### Row 1: Training data ##################
######## u(t,x,y) ###################
gs1 = gridspec.GridSpec(1, 2)
gs1.update(top=1-2/4, bottom=0.0, left=0.01, right=0.99, wspace=0)
ax = plt.subplot(gs1[:, 0], projection='3d')
ax.axis('off')
r1 = [x_star.min(), x_star.max()]
r2 = [data['t'].min(), data['t'].max()]
r3 = [y_star.min(), y_star.max()]
for s, e in combinations(np.array(list(product(r1,r2,r3))), 2):
if np.sum(np.abs(s-e)) == r1[1]-r1[0] or np.sum(np.abs(s-e)) == r2[1]-r2[0] or np.sum(np.abs(s-e)) == r3[1]-r3[0]:
ax.plot3D(*zip(s,e), color="k", linewidth = 0.5)
ax.scatter(x_train, t_train, y_train, s = 0.1)
ax.contourf(X,UU_star,Y, zdir = 'y', offset = t_star.mean(), cmap='rainbow', alpha = 0.8)
ax.text(x_star.mean(), data['t'].min() - 1, y_star.min() - 1, '$x$')
ax.text(x_star.max()+1, data['t'].mean(), y_star.min() - 1, '$t$')
ax.text(x_star.min()-1, data['t'].min() - 0.5, y_star.mean(), '$y$')
ax.text(x_star.min()-3, data['t'].mean(), y_star.max() + 1, '$u(t,x,y)$')
ax.set_xlim3d(r1)
ax.set_ylim3d(r2)
ax.set_zlim3d(r3)
axisEqual3D(ax)
######## v(t,x,y) ###################
ax = plt.subplot(gs1[:, 1], projection='3d')
ax.axis('off')
r1 = [x_star.min(), x_star.max()]
r2 = [data['t'].min(), data['t'].max()]
r3 = [y_star.min(), y_star.max()]
for s, e in combinations(np.array(list(product(r1,r2,r3))), 2):
if np.sum(np.abs(s-e)) == r1[1]-r1[0] or np.sum(np.abs(s-e)) == r2[1]-r2[0] or np.sum(np.abs(s-e)) == r3[1]-r3[0]:
ax.plot3D(*zip(s,e), color="k", linewidth = 0.5)
ax.scatter(x_train, t_train, y_train, s = 0.1)
ax.contourf(X,VV_star,Y, zdir = 'y', offset = t_star.mean(), cmap='rainbow', alpha = 0.8)
ax.text(x_star.mean(), data['t'].min() - 1, y_star.min() - 1, '$x$')
ax.text(x_star.max()+1, data['t'].mean(), y_star.min() - 1, '$t$')
ax.text(x_star.min()-1, data['t'].min() - 0.5, y_star.mean(), '$y$')
ax.text(x_star.min()-3, data['t'].mean(), y_star.max() + 1, '$v(t,x,y)$')
ax.set_xlim3d(r1)
ax.set_ylim3d(r2)
ax.set_zlim3d(r3)
axisEqual3D(ax)
# savefig('./figures/NavierStokes_data')
fig, ax = newfig(1.015, 0.8)
ax.axis('off')
######## Row 2: Pressure #######################
######## Predicted p(t,x,y) ###########
gs2 = gridspec.GridSpec(1, 2)
gs2.update(top=1, bottom=1-1/2, left=0.1, right=0.9, wspace=0.5)
ax = plt.subplot(gs2[:, 0])
h = ax.imshow(PP_star, interpolation='nearest', cmap='rainbow',
extent=[x_star.min(), x_star.max(), y_star.min(), y_star.max()],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_aspect('equal', 'box')
ax.set_title('Predicted pressure', fontsize = 10)
######## Exact p(t,x,y) ###########
ax = plt.subplot(gs2[:, 1])
h = ax.imshow(P_exact, interpolation='nearest', cmap='rainbow',
extent=[x_star.min(), x_star.max(), y_star.min(), y_star.max()],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_aspect('equal', 'box')
ax.set_title('Exact pressure', fontsize = 10)
######## Row 3: Table #######################
gs3 = gridspec.GridSpec(1, 2)
gs3.update(top=1-1/2, bottom=0.0, left=0.0, right=1.0, wspace=0)
ax = plt.subplot(gs3[:, :])
ax.axis('off')
s = r'$\begin{tabular}{|c|c|}';
s = s + r' \hline'
s = s + r' Correct PDE & $\begin{array}{c}'
s = s + r' u_t + (u u_x + v u_y) = -p_x + 0.01 (u_{xx} + u_{yy})\\'
s = s + r' v_t + (u v_x + v v_y) = -p_y + 0.01 (v_{xx} + v_{yy})'
s = s + r' \end{array}$ \\ '
s = s + r' \hline'
s = s + r' Identified PDE (clean data) & $\begin{array}{c}'
s = s + r' u_t + %.3f (u u_x + v u_y) = -p_x + %.5f (u_{xx} + u_{yy})' % (lambda_1_value, lambda_2_value)
s = s + r' \\'
s = s + r' v_t + %.3f (u v_x + v v_y) = -p_y + %.5f (v_{xx} + v_{yy})' % (lambda_1_value, lambda_2_value)
s = s + r' \end{array}$ \\ '
s = s + r' \hline'
s = s + r' Identified PDE (1\% noise) & $\begin{array}{c}'
s = s + r' u_t + %.3f (u u_x + v u_y) = -p_x + %.5f (u_{xx} + u_{yy})' % (lambda_1_value_noisy, lambda_2_value_noisy)
s = s + r' \\'
s = s + r' v_t + %.3f (u v_x + v v_y) = -p_y + %.5f (v_{xx} + v_{yy})' % (lambda_1_value_noisy, lambda_2_value_noisy)
s = s + r' \end{array}$ \\ '
s = s + r' \hline'
s = s + r' \end{tabular}$'
ax.text(0.015,0.0,s)
# savefig('./figures/NavierStokes_prediction')
``` |
{
"source": "56kyle/bloons_auto",
"score": 3
} |
#### File: bloons_auto/api_creation/get_data_cleanly.py
```python
from collections.abc import Iterable
import re
def with_brackets(type_str: str) -> str:
return type_str.replace('<', '[').replace('>', ']')
def get_data(*lines):
game = {}
main_offset = 0
section = ''
dll_name = ''
class_name = ''
section_name = ''
for line in lines:
tabs = line.count('\t')
stripped = line.replace('\t', '').replace('\n', '')
address = None
if ' : ' in stripped:
address = stripped.split(' : ')[0]
if tabs == 0:
# Main exe offset
game = {'address': address, 'dlls': {}}
elif tabs == 1:
# Dll
dll_name = '.'.join(stripped.split(' : ')[-1].split('.')[:-1])
print(dll_name)
game['dlls'][dll_name] = {
'address': address,
'classes': {}
}
elif tabs == 2:
# Class
class_name = stripped.split(' : ')[-1]
game['dlls'][dll_name]['classes'][class_name] = {
'address': address,
'fields': {},
'static_fields': {},
'methods': {},
'base_class': {},
}
elif tabs == 3:
# Field labels
section_name = stripped
game['dlls'][dll_name]['classes'][class_name][section_name] = {}
elif tabs == 4:
# Field data
if section_name == 'static fields':
static_field_name = stripped.split(' : ')[-1].split(' (')[0]
static_field_type = stripped.split(' (type: ')[-1].replace(')', '')
static_field_type = with_brackets(static_field_type)
game['dlls'][dll_name]['classes'][class_name][section_name][static_field_name] = {
'address': address,
'type': static_field_type,
}
elif section_name == 'fields':
field_name = stripped.split(' : ')[-1].split(' (')[0]
field_type = stripped.split(' (type: ')[-1].replace(')', '')
field_type = with_brackets(field_type)
game['dlls'][dll_name]['classes'][class_name][section_name][field_name] = {
'address': address,
'type': field_type,
}
elif section_name == 'methods':
method_name = stripped.split(' : ')[-1].split(' (')[0]
method_parameters_whole: str = stripped.split(' -> ')[0].split(' (')[-1].replace(')', '')
if method_parameters_whole:
if ', ' in method_parameters_whole:
method_parameters = method_parameters_whole.split(', ')
else:
method_parameters = [method_parameters_whole]
else:
method_parameters = []
method_parameters_data = {}
for method_param in method_parameters:
try:
method_param_name, method_param_type = method_param.split(': ')
method_param_type = with_brackets(method_param_type)
method_parameters_data[method_param_name] = {
'type': method_param_type,
}
except Exception as e:
pass
method_return_type = stripped.split(' -> ')[-1]
method_return_type = with_brackets(method_return_type)
game['dlls'][dll_name]['classes'][class_name][section_name][method_name] = {
'address': address,
'params': method_parameters_data,
'return_type': method_return_type,
}
elif section_name == 'base_class':
base_class_name = stripped.split(' : ')[-1]
game['dlls'][dll_name]['classes'][class_name][section_name][base_class_name] = {
'address': address,
}
else:
print(f'no section provided - {section}')
return game
if __name__ == '__main__':
with open('./cheat_engine_output.TXT', 'r') as file:
data = get_data(*file.readlines())
with open('./cheat_engine_direct_2.txt', 'w') as new_file:
new_file.write(str(data))
```
#### File: 56kyle/bloons_auto/hitbox.py
```python
from maps import *
from map import Map
from towers import *
from tower import Tower
import pyscreeze
import os
from functools import cache
from PIL import Image
import numpy as np
class Hitbox:
def __init__(self, tower: Tower):
self.tower = tower
self.size = tower.size
self.width = tower.width
self.height = tower.height
self.file_path = os.path.join('.', 'towers', 'hitboxes', f'{self.size}')
self.img = Image.open(self.file_path + '.png')
@cache
def data(self):
return np.array(self.img)
def save(self):
np.save(self.file_path + '.npy', self.data())
if __name__ == '__main__':
for size in SIZES:
try:
hitbox = Hitbox(size[0])
new_img = hitbox.img.convert('1')
hitbox.img = new_img
new_data = []
for val in hitbox.img.getdata():
new_data.append(not val)
hitbox.img.putdata(new_data)
hitbox.img.save(hitbox.file_path + '_test.png')
np.save(hitbox.file_path, hitbox.data())
except FileNotFoundError:
pass
```
#### File: maps/infernal/infernal.py
```python
from map import Map
class Infernal(Map):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = 'infernal'
self.difficulty = 'expert'
self.page = 1
self.placement = [0, 0]
```
#### File: maps/peninsula/peninsula.py
```python
from map import Map
class Peninsula(Map):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = 'peninsula'
self.difficulty = 'advanced'
```
#### File: maps/workshop/workshop.py
```python
import keyboard
import mouse
import time
from map import Map
from towers import *
from tower import Point
from config import keybinds
class Workshop(Map):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = 'workshop'
self.difficulty = 'expert'
self.page = 1
self.placement = [2, 0]
def play_collection(self):
dart = DartMonkey(location=Point(1016, 501))
self.start()
self.toggle_fast_forward()
# Wait for round one to end
time.sleep(10)
# Place Ben
keyboard.press_and_release(keybinds['heroes'], .2)
time.sleep(.3)
mouse.move(1518, 987)
time.sleep(.2)
mouse.click()
# Wait to buy wiz
time.sleep(15)
wiz = WizardMonkey(location=Point(1016, 437))
# Upgrade wiz
time.sleep(15)
wiz.assign_upgrades([0, 1, 0])
time.sleep(15)
wiz.assign_upgrades([0, 2, 0])
# Wait to buy spike factory
time.sleep(20)
spike = SpikeFactory(location=Point(1580, 685))
# Upgrade spike factory.
time.sleep(20)
spike.assign_upgrades([1, 0, 0])
time.sleep(15)
spike.assign_upgrades([1, 1, 0])
time.sleep(15)
spike.assign_upgrades([1, 2, 0])
time.sleep(15)
spike.assign_upgrades([2, 2, 0])
# Waut to buy a
# Wait for round to finish out
time.sleep(144)
```
#### File: bloons_auto/towers/banana_farm.py
```python
from tower import Tower
from config import keybinds
class BananaFarm(Tower):
name = 'banana_farm'
range = 215
width = 162
height = 141
size = 'rectangle'
keybind = keybinds[name]
aquatic = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
```
#### File: bloons_auto/towers/heli_pilot.py
```python
from tower import Tower
from config import keybinds
class HeliPilot(Tower):
name = 'heli_pilot'
range = 118
width = 145
height = 127
size = 'rectangle'
keybind = keybinds[name]
aquatic = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
```
#### File: bloons_auto/towers/monkey_ace.py
```python
from tower import Tower
from config import keybinds
class MonkeyAce(Tower):
name = 'monkey_ace'
range = 118
width = 152
height = 85
size = 'rectangle'
keybind = keybinds[name]
aquatic = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
```
#### File: bloons_auto/towers/monkey_buccaneer.py
```python
from tower import Tower
from config import keybinds
class MonkeyBuccaneer(Tower):
name = 'monkey_buccaneer'
range = 323
width = 87
height = 75
size = 'large'
keybind = keybinds[name]
aquatic = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
```
#### File: bloons_auto/towers/monkey_village.py
```python
from tower import Tower
from config import keybinds
class MonkeyVillage(Tower):
name = 'monkey_village'
range = 215
width = 119
height = 103
size = 'xl'
keybind = keybinds[name]
aquatic = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
```
#### File: bloons_auto/towers/sniper_monkey.py
```python
from tower import Tower
from config import keybinds
class SniperMonkey(Tower):
name = 'sniper_monkey'
range = 107
width = 65
height = 57
size = 'small'
keybind = keybinds[name]
aquatic = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
```
#### File: bloons_auto/util/collection.py
```python
import time
import os
import psutil
from ReadWriteMemory import ReadWriteMemory
import mouse
import pyscreeze
import maps
from map import Map
from maps import *
from tower import Tower, Point
from towers import *
class Collector:
play_button = (820, 940)
expert_map_button = (1340, 980)
easy_button = (615, 420)
standard_button = (640, 585)
back_to_main_button = (60, 70)
def __init__(self):
self.current_map = None
def click_expert(self):
mouse.move(*self.expert_map_button)
time.sleep(1)
mouse.click()
def is_map(self, map):
os.path.join('./maps', map.name,)
def click_ideal_map(self):
self.click_expert()
page = 1
time.sleep(1)
idol = pyscreeze.locateCenterOnScreen('./collection_idol.png', confidence=.6)
print(f'Moving to idol at - {idol}')
if not idol:
self.click_expert()
page = 2
time.sleep(1)
idol = pyscreeze.locateCenterOnScreen('./collection_idol.png', confidence=.6)
print(idol)
time.sleep(.1)
mouse.move(*idol)
time.sleep(.1)
mouse.click()
time.sleep(.1)
return page, idol
def click_play(self):
mouse.move(*self.play_button)
time.sleep(.4)
mouse.click()
time.sleep(1)
def click_easy(self):
mouse.move(*self.easy_button)
time.sleep(.4)
mouse.click()
time.sleep(1)
def click_standard(self):
mouse.move(*self.standard_button)
time.sleep(.4)
mouse.click()
time.sleep(1)
if __name__ == '__main__':
time.sleep(5)
collector = Collector()
while True:
collector.click_play()
time.sleep(1)
collector.click_expert()
time.sleep(1)
#page, point_chosen = collector.click_ideal_map()
page = 1
point_chosen = Point(1400, 570)
mouse.move(*point_chosen)
time.sleep(.2)
mouse.click()
time.sleep(1)
collector.click_easy()
time.sleep(1)
collector.click_standard()
time.sleep(5)
'''
x, y = Map.point_to_map(point_chosen)
for map_class in maps.ALL:
if map_class.difficulty == 'Expert':
if map_class.page == page and map_class.placement == [x, y]:
map = map_class()
break
'''
collector.current_map = Workshop()
collector.current_map.play_collection()
```
#### File: bloons_auto/util/convert_cheat_engine.py
```python
import sys
import inflection
import os
import re
from typing import Union
from pymem import Pymem
from btd6_memory_info.cheat_engine_output_offsets import offsets
# Returns address, class path
class_re = re.compile(r"\t*([\w.\-<>]+) : ([\w.<>,=|{}-]+)\n")
# Returns offset from class base address, name, type
static_field_re = re.compile(r"\t*([\w\-<>]+) : ([^\s]+) \(type: ([\w.,<>{}-]+)\)\n")
# Returns offset from class base address, name, type
field_re = re.compile(r"\t*([\w\-<>]+) : ([^\s]+) \(type: ([\w.,<>{}-]+)\)\n")
# Returns address, function name, function parameters, return type
method_re = re.compile(r"\t*([\w<>-]+) : ([\w.,]+) \(([^)]*)\):([\w.,<>{}-]+)\n")
def convert_all():
data = {}
current_module_name = None
sys.setrecursionlimit(2000)
with open('../btd6_memory_info/cheat_engine_output.TXT', 'r') as old:
lines = old.readlines()
for i, line in enumerate(lines):
if line.startswith('\t'):
if line.startswith('\t\t'):
if line.startswith('\t\t\t'):
# Loops through in get_class_data anyway
pass
else:
# Class
class_data = get_class_data(current_module_base, lines, i-1)
if not class_data['name'].startswith('<'):
data['process']['modules'][current_module_name]['classes'][class_data['name']] = class_data
else:
# Module
match = re.fullmatch(class_re, line)
if match:
address, name = match.groups()
current_module_name = name
current_module_base = int(address, 16)
data['process']['modules'][current_module_name] = {'offset': base_address - int(address, 16), 'classes': {}}
else:
print("ONO")
print(line)
else:
# Base
base_address = int(line.replace('\n', ''), 16)
data['process'] = {'name': 'BloonsTD6.exe', 'base_address': base_address, 'modules': {}}
with open('../btd6_memory_info/cheat_engine_data.py', 'w') as new_file:
new_file.write('data = ' + str(data))
tab_re = re.compile('\t')
def get_class_data(module_base: int, lines: [str], base_class_tag_index: int) -> dict:
static_field_tag_index = None
field_tag_index = None
method_tag_index = None
base_class_tag_index_found = None
current_line: int = base_class_tag_index
data = {
'name': None,
'offset': None,
'static_fields': None,
'fields': None,
'methods': None,
'base_class': None,
}
def get_class_metadata(i: int) -> Union[dict, None]:
# same as has_base_class
if len(lines) == i + 1:
return None
match = re.fullmatch(class_re, lines[i+1])
if match:
address, name = match.groups()
if '<' in name:
print(name)
return {'offset_from_module': int(address, 16) - module_base, 'name': name}
return None
def get_static_field_data(i: int) -> Union[dict, None]:
# lines[i] should end with 'static fields'
match = re.fullmatch(static_field_re, lines[i+1])
if match:
offset_from_class, name, type = match.groups()
return {'offset_from_class': int(offset_from_class, 16), 'name': name, 'type': type}
return None
def get_field_data(i: int) -> Union[dict, None]:
# lines[i] should end with 'fields'
match = re.fullmatch(field_re, lines[i+1])
if match:
offset_from_class, name, field_type = match.groups()
return {'offset_from_class': int(offset_from_class, 16), 'name': name, 'type': field_type}
return None
def get_method_data(i: int) -> Union[dict, None]:
# lines[i] should end with 'methods'
match = re.fullmatch(method_re, lines[i+1])
def get_params(p):
new_params = {}
if p:
for pair in p.split('; '):
try:
arg, value = pair.split(': ')
new_params[arg] = value
except ValueError as e:
pass
return new_params
if match:
address, name, params, return_type = match.groups()
return {'offset_from_module': int(address, 16) - module_base, 'name': name, 'params': get_params(params), 'return_type': return_type}
return None
while not (static_field_tag_index and field_tag_index and method_tag_index and base_class_tag_index_found):
if not data['name'] or not data['offset']:
metadata = get_class_metadata(current_line)
data['offset'] = metadata['offset_from_module']
data['name'] = metadata['name']
if lines[current_line].endswith('static fields\n') or (static_field_tag_index and not field_tag_index):
new_field = get_static_field_data(current_line)
if new_field:
data['static_fields'] = data['static_fields'] if data['static_fields'] else {}
data['static_fields'][new_field['name']] = new_field
#print(new_field)
static_field_tag_index = current_line
if lines[current_line].endswith('fields\n') or (field_tag_index and not method_tag_index):
new_field = get_field_data(current_line)
if new_field:
data['fields'] = data['fields'] if data['fields'] else {}
data['fields'][new_field['name']] = new_field
#print(new_field)
field_tag_index = current_line
if lines[current_line].endswith('methods\n') or (method_tag_index and not base_class_tag_index):
new_method = get_method_data(current_line)
if new_method:
data['methods'] = data['methods'] if data['methods'] else {}
data['methods'][new_method['name']] = new_method
#print(new_method)
method_tag_index = current_line
if 'base class' in lines[current_line] and current_line != base_class_tag_index:
data['base_class'] = get_class_metadata(current_line)
base_class_tag_index_found = current_line
current_line += 1
return data
def handle_class(line):
klass = line.strip('\t').strip('\n')
klass_address, klass_path = klass.split(' : ')
path_sections = klass_path.split('.')
root = os.path.join('..', 'btd6_memory_info', 'generated')
name = path_sections[-1]
for folder in path_sections[:-1]:
root = os.path.join(root, folder)
try:
if not os.path.isdir(root):
os.mkdir(root)
new_file = os.path.join(root, inflection.underscore(folder)) + '.py'
if not os.path.isfile(new_file):
print(new_file)
with open(new_file, 'w') as new_klass:
new_klass.write(f'''class {folder}:\tpass''')
except OSError:
pass
return
def re_test():
test = re.compile(r"\t+(\w+)\s:\s(.+)\n")
data = {}
with open('../btd6_memory_info/cheat_engine_output.TXT', 'r') as old:
lines = old.readlines()
for line in lines:
match = re.fullmatch(test, line)
if match:
pass
if __name__ == '__main__':
convert_all()
```
#### File: bloons_auto/util/convert_data_to_api.py
```python
import inflection
import os
from btd6_memory_info.cheat_engine_data import data
def api_up():
for module_name, module_data in data['process']['modules'].items():
import_lines = []
import_names = []
classes = []
class_names = []
def get_class_lines(path, data):
name = path.split('.')[-1]
base_class = data.get('base_class')
parent = ''
class_imports = []
if base_class:
base_path = base_class['name'].replace('.dll', '')
base_name = base_path.split('.')[-1].replace('.dll', '')
if base_name != name:
import_names.append(base_path)
class_imports.append(base_path)
parent = f'({base_path})' if base_path else ''
print(parent)
methods = []
if data.get('methods'):
for method_name, method_data in data['methods'].items():
#print('method_name - ', method_name)
#print('method_data - ', method_data)
pass
try:
params = ', '.join([f'{field["name"]}: {field["type"]}' for field_name, field in data["fields"].items()])
except AttributeError:
params = ''
offsets = {}
try:
class_variables = '\n'.join([f'\t{static_name}: {static["type"]}' for static_name, static in data['static_fields'].items()])
except AttributeError:
class_variables = ''
try:
instance_variables = '\n'.join([f'\t\tself.{field["name"]} = {field["name"]}' for field_name, field in data["fields"].items()])
except AttributeError:
instance_variables = ''
if data['static_fields']:
for cls_var in data['static_fields'].values():
offsets[cls_var['name']] = cls_var['offset_from_class']
if cls_var['type'] not in class_names and cls_var['type'] not in import_names:
import_names.append(cls_var['type'])
class_imports.append(cls_var['type'])
if data['fields']:
for inst_var in data['fields'].values():
offsets[inst_var['name']] = inst_var['offset_from_class']
if inst_var['type'] not in class_names and inst_var['type'] not in import_names:
import_names.append(inst_var['type'])
class_imports.append(inst_var['type'])
class_lines = [
f'',
f'class {name}{parent}:',
f'{class_variables}',
f' offsets = {offsets}'
f' ',
f' def __init__(self, {params + ", " if params else ""}**kwargs):',
f' super().__init__(self, **kwargs)',
f'{instance_variables}'
f'',
]
classes.append('\n'.join(class_lines))
module_data['classes'][class_data['name']]['lines'] = class_lines
module_data['classes'][class_data['name']]['imports'] = class_imports
for class_path, class_data in module_data['classes'].items():
get_class_lines(class_path.replace('.dll', ''), class_data)
with open(os.path.join('.', 'gen', module_name + '.py'), 'w') as file:
file.write('\n'.join(import_lines) + '\n\n' + '\n\n'.join(classes))
make_folder_api(module_name.replace('.dll', ''), module_data['classes'])
def make_folder_api(module_name, classes_data):
gen_test = os.path.join('.', 'gen_test')
try_to_make(gen_test)
module_path_base: list = module_name.split('.')[:-2]
module_path_last: str = '.'.join(module_name.split('.')[-2:])
module_path = os.path.join(gen_test, *module_path_base, module_path_last)
module_init = os.path.join(module_path, '__init__.py')
try_to_make(module_path)
with open(module_init, 'w') as file:
pass
for class_name, class_data in classes_data.items():
class_path = os.path.join(module_path, *class_name.split('.'))
class_path = class_path.replace('<', '').replace('>', '').replace(',', '_').replace('|', '').replace('=', '_')
try_to_make(class_path)
class_init = os.path.join(class_path, '__init__.py')
try:
with open(class_init, 'w') as file:
import_lines = []
for imp in class_data['imports']:
import_lines.append(f'import {imp}')
imports = '\n'.join(import_lines)
file.write(imports + '\n'.join(class_data['lines']))
except Exception as e:
print(e)
def try_to_make(path):
try:
os.makedirs(path.replace('<', '').replace('>', '').replace(',', '_').replace('|', ''))
path = path.replace('<', '').replace('>', '').replace(',', '_').replace('|', '')
print(path)
for section in path.split('/'):
pass
except:
pass
def api_down():
os.removedirs('./gen')
if __name__ == '__main__':
api_up()
```
#### File: bloons_auto/util/find_range_area.py
```python
from maps import *
from map import Map
from towers import *
from tower import Tower
import exceptions
from PIL import Image
import numpy as np
import mouse
import keyboard
import pyscreeze
import random
import time
def find_range_area():
location_i = mouse.get_position()
mouse.move(1900, 1060)
time.sleep(.3)
img_i = pyscreeze.screenshot()
mouse.move(*location_i)
time.sleep(.3)
keyboard.press_and_release(DartMonkey.keybind, .3)
time.sleep(.6)
img_f = pyscreeze.screenshot()
np_i = np.array(img_i)
np_f = np.array(img_f)
diff = np_f - np_i
new_data = []
diff_img = Image.fromarray(diff)
for px in diff_img.getdata():
if px == (0, 0, 0):
new_data.append((0, 0, 0))
else:
new_data.append((255, 255, 255))
diff_img.putdata(new_data)
diff_img.putpixel(location_i, (0, 0, 0))
diff_img.convert('1').save(f'./data/{random.randint(1, 100000)}.png')
if __name__ == '__main__':
time.sleep(3)
find_range_area()
```
#### File: bloons_auto/util/gradually_map.py
```python
import os
import sys
from maps import *
from map import Map
import pyscreeze
import win32gui
import towers
from tower import Tower, Point
from hitbox import Hitbox
from dynamic_info.placeable import PlaceableHook
from dynamic_info.money import MoneyHook
from dynamic_info.session import Btd6Session
import collections
import keyboard
import numpy as np
import mouse
import random
import time
from config import keybinds
from PIL import Image
from window_input import Window, Key
Region = collections.namedtuple('Region', 'xi yi xf yf')
class MapScanner:
def __init__(self, a_map, tower, regions):
self.a_map = a_map
self.tower = tower
self.regions = regions
self.checked = np.full((1080, 1920), False)
self.placeable = np.full((1080, 1920), False)
def get_placement_map(self):
im = pyscreeze.screenshot()
img_file_name = f'../maps/{self.a_map.name}/placement/{"land" if not self.tower.aquatic else "sea"}/{self.tower.size if self.tower.size != "rectangle" else self.tower.name}.png'
im.save(f'../maps/{self.a_map.name}/{self.a_map.name}.png')
try:
img = Image.open(img_file_name)
except:
img = Image.new('RGB', (1920, 1080))
keyboard.press_and_release(self.tower.keybind)
for region in self.regions:
win = win32gui.GetActiveWindow()
xi, yi, xf, yf = region
for x in range(xi, xf, 16):
for y in range(yi, yf, 12):
if mouse.get_position() == (0, 0):
print('fuck')
return
if self.tower.can_place(Point(x, y), im):
a = False
b = False
if img.getpixel((x - 16, y)) == (255, 255, 255):
a = True
for dx in range(1, 17):
img.putpixel((x - dx, y), (255, 255, 255))
if img.getpixel((x, y - 12)) == (255, 255, 255):
b = True
for dy in range(1, 17):
img.putpixel((x, y - dy), (255, 255, 255))
if img.getpixel((x - 16, y - 12)) == (255, 255, 255) and a and b:
for dy in range(1, 17):
for dx in range(1, 17):
img.putpixel((x - dx, y - dy), (255, 255, 255))
img.putpixel((x, y), (255, 255, 255))
img.save(img_file_name)
def get_changing(self):
location_i = mouse.get_position()
mouse.move(1900, 1060)
time.sleep(.3)
img_i = pyscreeze.screenshot()
time.sleep(.3)
img_f = pyscreeze.screenshot()
np_i = np.array(img_i)
np_f = np.array(img_f)
diff = np_f - np_i
new_data = []
diff_img = Image.fromarray(diff)
for px in diff_img.getdata():
if px == (0, 0, 0):
new_data.append((0, 0, 0))
else:
new_data.append((255, 255, 255))
diff_img.putdata(new_data)
diff_img.putpixel(location_i, (0, 0, 0))
return np.array(diff_img.convert('1'))
def get_np_placement_map(self):
btd6_session = Btd6Session(hooks=[PlaceableHook])
img_file_name = f'./maps/{self.a_map.name}/placement/{"land" if not self.tower.aquatic else "sea"}/{self.tower.size if self.tower.size != "rectangle" else self.tower.name}'
keyboard.press_and_release(self.tower.keybind)
for region in self.regions:
xi, yi, xf, yf = region
#gx = int(16 / 2)
#gy = int(12 / 2)
gx = 16
gy = 12
while not (gx == 1 and gy == 1):
for x in range(xi, xf, gx):
for y in range(yi, yf, gy):
#print('=====')
if mouse.get_position() == (0, 0):
print('fuck')
return
while not btd6_session.hooks['PlaceableHook'].np_checked[y, x]:
mouse.move(x, y)
else:
quadrants = [
[gx, gy], # Top Left
[-gx, gy], # Top Right
[gx, -gy], # Bot Left
[-gx, -gy], # Bot Right
]
for pair in quadrants:
print(f'pair - {pair}')
if abs(gx) > 2 and abs(gy) > 2 and not btd6_session.hooks['PlaceableHook'].np_checked[y - int(abs(pair[1] / pair[1])), x - int(abs(pair[0] / pair[0]))]:
print('greater than 2')
if self.should_fill_quad(btd6_session.hooks['PlaceableHook'].np_checked, btd6_session.hooks['PlaceableHook'].np_placeable, x, y, pair[0], pair[1]):
if gx < 0:
xi = x
xf = x - gx
else:
xi = x - gx
xf = x
if gy < 0:
yi = y
yf = y - gy
else:
yi = y - gy
yf = y
btd6_session.hooks['PlaceableHook'].np_placeable[yi:yf, xi:xf] = btd6_session.hooks['PlaceableHook'].np_placeable[y, x]
btd6_session.hooks['PlaceableHook'].np_checked[yi:yf, xi:xf] = True
'''
for xx in range(xi, xf):
for yy in range(yi, yf):
print(f'fillled {(yy, xx)}')
btd6_session.hooks['PlaceableHook'].np_placeable[yy, xx] = btd6_session.hooks['PlaceableHook'].np_placeable[y, x]
btd6_session.hooks['PlaceableHook'].np_checked[yy, xx] = True
'''
else:
print('---------------------')
print('else')
print('---------------------')
np.save(img_file_name, btd6_session.hooks['PlaceableHook'].np_placeable)
Image.fromarray(btd6_session.hooks['PlaceableHook'].np_placeable).save(img_file_name + '.png')
gx = int(gx / 2) if gx != 1 else gx
gy = int(gy / 2) if gy != 1 else gy
def should_fill_quad(self, checked, placeable, x, y, gx, gy):
"""Checks if a given rectangle can be filled in and does so if possible"""
print('should_fill_quad')
point_one = checked[y - gy, x - gx] and placeable[y - gy, x - gx]
point_two = checked[y, x - gx] and placeable[y, x - gx]
point_three = checked[y - gy, x] and placeable[y - gy, x]
desired_result = placeable[y, x]
print('returning')
return point_one == desired_result and point_two == desired_result and point_three == desired_result
def get_np_placement_map_one(self):
manual_delay_locations = self.get_changing()
im = pyscreeze.screenshot()
img_file_name = f'../maps/{self.a_map.name}/placement/{"land" if not self.tower.aquatic else "sea"}/{self.tower.size if self.tower.size != "rectangle" else self.tower.name}'
im.save(f'../maps/{self.a_map.name}/{self.a_map.name}.png')
np_im = np.array(im)
if os.path.isfile(img_file_name + '.npy'):
placeable = np.load(img_file_name + '.npy')
else:
placeable = np.full((1080, 1920), False)
keyboard.press_and_release(self.tower.keybind)
for region in self.regions:
xi, yi, xf, yf = region
#gx = int(16 / 2)
#gy = int(12 / 2)
for x in range(xi, xf):
for y in range(yi, yf):
#print('=====')
if mouse.get_position() == (0, 0):
print('fuck')
return
placeable[y, x] = self.tower.can_place_quick(Point(x, y), np_im, manual_delay_locations)
np.save(img_file_name, placeable)
Image.fromarray(placeable).save(img_file_name + '.png')
def get_map_img(self):
mouse.move(1910, 1070)
time.sleep(.3)
im = pyscreeze.screenshot()
im.save(f'../maps/{self.a_map.name}/{self.a_map.name}.png')
def get_map_v2(self):
btd6_session = Btd6Session(hooks=[PlaceableHook])
print('post session')
img_file_name = f'../maps/{self.a_map.name}/placement/{"land" if not self.tower.aquatic else "sea"}/{self.tower.size if self.tower.size != "rectangle" else self.tower.name}'
if os.path.isfile(img_file_name + '.npy'):
placeable = np.load(img_file_name + '.npy')
else:
placeable = np.full((1080, 1920), False)
checked = np.full((1080, 1920), False)
keyboard.press_and_release(self.tower.keybind)
gx = 16
gy = 12
for region in self.region:
xi, yi, xf, yf = region
for x in range(xi, xf, gx):
for y in range(yi, yf, gy):
while not checked[y, x]:
try:
while btd6_session.location[0] != x or btd6_session.location[1] != y:
mouse.move(x, y)
else:
placeable[y, x] = btd6_session.placeable
checked[y, x] = True
except:
mouse.move(x, y)
else:
desired = placeable[y, x]
self.should_fill_quad()
np.save(img_file_name, placeable)
img = Image.fromarray(placeable)
img.save(img_file_name + '.png')
def get_map_v3(self):
btd6_session = Btd6Session(hooks=[PlaceableHook])
print('post session')
img_file_name = f'./maps/{self.a_map.name}/placement/{"land" if not self.tower.aquatic else "sea"}/{self.tower.size if self.tower.size != "rectangle" else self.tower.name}'
if os.path.isfile(img_file_name + '.npy'):
placeable = np.load(img_file_name + '.npy')
else:
placeable = np.full((1080, 1920), False)
btd6_session.hooks['PlaceableHook'].np_out = placeable
keyboard.press_and_release(self.tower.keybind)
for region in self.regions:
xi, yi, xf, yf = region
for x in range(xi, xf):
for y in range(yi, yf):
mouse.move(x, y)
time.sleep(.005)
np.save(img_file_name, btd6_session.hooks['PlaceableHook'].np_placeable)
img = Image.fromarray(btd6_session.hooks['PlaceableHook'].np_placeable)
img.save(img_file_name + '.png')
if __name__ == '__main__':
time.sleep(4)
infernal_regions = [
Region(56, 424, 164, 714),
Region(394, 154, 548, 300),
Region(760, 262, 912, 410),
Region(1106, 136, 1286, 308),
Region(388, 752, 562, 932),
Region(752, 664, 920, 818),
Region(1126, 766, 1278, 926),
Region(1502, 426, 1638, 710),
]
dark_castle_regions = [
#Region(30, 30, 1048, 1050),
Region(1264, 30, 1640, 1050)
]
bloody_puddles_regions = [
Region(58, 28, 1890, 1050)
]
ravine_regions = [
Region(32, 36, 1600, 1050)
]
quad_regions = [
Region(32, 36, 1600, 1050)
]
ouch_regions = [
Region(62, 36, 1600, 1050),
#Region(32, 36, 360, 305),
]
scanner = MapScanner(FloodedValley(), towers.SMALL[0], ouch_regions)
scanner.get_map_v3()
# get_placement_map(towers.MEDIUM[0])
# get_placement_map(towers.SpikeFactory)
# get_placement_map(towers.XL[0])
# for tower in towers.RECTANGLE:
# get_placement_map(tower)
```
#### File: bloons_auto/util/manual_place_data.py
```python
from maps import *
from tower import Tower, Point
from towers import *
from map import Map
import mouse
import keyboard
import pyscreeze
import time
import win32gui
def give_info(*args):
x, y = mouse.get_position()
img = pyscreeze.screenshot()
win = win32gui.GetActiveWindow()
keyboard.press_and_release(DartMonkey.keybind, .2)
print('===============================')
print('= Can Place Setup =')
print('===============================')
can_place_setup = DartMonkey.can_place(Point(x, y), img)
print(f'= Result - {can_place_setup}')
print('===============================')
DartMonkey.deselect()
time.sleep(1)
print('===============================')
print('= Can Place Not Setup =')
print('===============================')
can_place_new = DartMonkey.can_place(Point(x, y))
print(f'= Result - {can_place_new}')
print('===============================')
if __name__ == '__main__':
keyboard.on_press_key('control', give_info)
while True:
pass
```
#### File: bloons_auto/util/manual_scan.py
```python
from maps import *
from map import Map
import towers
from tower import Tower, Point
from PIL import Image
import mouse
import keyboard
from window_input import Window, Key
import collections
import time
from config import keybinds
Region = collections.namedtuple('Region', 'xi yi xf yf')
def can_place(cls, location, img, win):
win = Window()
while not mouse.get_position() == location:
mouse.move(location.x, location.y)
print(f'mouse moved to {location}')
if location.x > 1100:
measuring_point = Point(location.x - (cls.range - 2), location.y)
else:
measuring_point = Point(location.x + (cls.range - 2), location.y)
print(f'measuring point is at {measuring_point}')
before = img.getpixel(measuring_point)
print(f'before = {before}')
after = win.pixel(measuring_point.x, measuring_point.y)
print(f'after = {after}')
if (after[0] - before[0]) >= 8:
return False
else:
return True
if __name__ == '__main__':
time.sleep(5)
win = Window()
tower = towers.SMALL[0]
a_map = DarkCastle()
im = win.capture(((0, 0), (1920, 1080)))
im.save(f'../maps/{a_map.name}/{a_map.name}.png')
img = Image.new('RGB', (1920, 1080))
keyboard.press_and_release(keybinds.get(tower.name))
while True:
if mouse.get_position() == (0, 0):
break
x, y = mouse.get_position()
if can_place(tower, Point(x, y), im, win):
img.putpixel((x, y), (255, 255, 255))
img.save(f'../maps/{a_map.name}/placement/{"land" if not tower.aquatic else "sea"}/{tower.size}.png')
``` |
{
"source": "56kyle/cheat_engine_py",
"score": 3
} |
#### File: cheat_engine_py/src/frida_plz_help.py
```python
import sys
import frida
import keyboard
main_script = """
var ranges = Process.enumerateRangesSync({protection: 'r--', coalesce: true});
var range;
function processNext(){
range = ranges.pop();
if(!range){
// we are done
return;
}
// due to the lack of blacklisting in Frida, there will be
// always an extra match of the given pattern (if found) because
// the search is done also in the memory owned by Frida.
Memory.scan(range.base, range.size, '%s', {
onMatch: function(address, size){
console.log('[+] Pattern found at: ' + address.toString());
},
onError: function(reason){
console.log('[!] There was an error scanning memory');
},
onComplete: function(){
processNext();
}
});
}
processNext();
"""
class Injector:
def __init__(self):
self.addresses = []
def on_message(self, message, data):
self.addresses.append(data)
print("[%s] -> %s" % (message, data))
print(self.addresses)
def load(self, target_process, pattern):
session = frida.attach(target_process)
script = session.create_script("""
var ranges = Process.enumerateRangesSync({protection: 'r--', coalesce: true});
var range;
function processNext(){
range = ranges.pop();
if(!range){
// we are done
return;
}
// due to the lack of blacklisting in Frida, there will be
// always an extra match of the given pattern (if found) because
// the search is done also in the memory owned by Frida.
Memory.scan(range.base, range.size, '%s', {
onMatch: function(address, size){
console.log(address.toString());
},
onError: function(reason){
console.log('[!] There was an error scanning memory');
},
onComplete: function(){
processNext();
}
});
}
processNext();
""" % pattern)
script.on('message', self.on_message)
script.load()
while not keyboard.is_pressed('`'):
pass
script.detach()
if __name__ == '__main__':
try:
target_process = int(sys.argv[1])
except ValueError:
target_process = sys.argv[1]
except IndexError:
target_process = 'BloonsTD6.exe'
try:
pattern = sys.argv[2]
except IndexError:
pattern = '48 8B 43 28 F2 0F11 73'
inj = Injector()
inj.load(target_process, pattern)
```
#### File: cheat_engine_py/src/process.py
```python
import ctypes
import os
import psutil
import pymem
import re
import struct
import time
import win32api
import win32con
import frida
from pymem import Pymem
from ReadWriteMemory import ReadWriteMemory
rwm = ReadWriteMemory()
def test():
base = 0x7FFB697E33F0
static_offset = 0x020F2858
dumb_base = 0x1D753805980
pointer_static_address = base + static_offset
offsets = [0x190, 0x1E8, 0x10, 0x548]
btd6 = rwm.get_process_by_id(Pymem('BloonsTD6.exe').process_id)
btd6.open()
pointer = btd6.get_pointer(dumb_base, offsets=offsets)
pointer_value = btd6.read(pointer)
print(pointer_value)
def main():
btd6 = Pymem('BloonsTD6.exe')
find(btd6)
#bytes = b'\x40\x53\x48\x83\xEC\x30\x00\xF29\x74\x24\x20\x45\x33C0\x00\xF28\xF1\x48\x08\x0B\xD9\xE8\xC7FDFFFF\x48\x08\x0B\x43\x28\xF2\x00\xF11\x73\x28\x00\xF28\x74\x24\x20\x48\x89\x43\x30\x48\x83\xC4\x30\x05\x0B\xC3'
#string_form = '40 53 48 83 EC 30 00 F29 74 24 20 45 33 C0 00 F28 F1 48 08 0B D9 E8 C7FDFFFF 48 08 0B 43 28 F2 00 F11 73 28 00 F28 74 24 20 48 89 43 30 48 83 C4 30 05 0B C3'
#print(btd6.read_double(0x23a7455c3ed))
def find(btd6: Pymem):
hwnd = ctypes.c_void_p(btd6.process_handle)
game_assembly = pymem.process.module_from_name(btd6.process_handle, 'GameAssembly.dll')
print(hex(0x7FFF36A59760 - game_assembly.lpBaseOfDll))
def scan(btd6):
hwnd = ctypes.c_void_p(btd6.process_handle)
print(pymem.memory.read_bytes(hwnd, 0x1D72381E028, pymem.struct.calcsize('d')))
for i in range(btd6.base_address, btd6.base_address + btd6.process_base.SizeOfImage, 1024):
chunk = pymem.memory.read_bytes(hwnd, i, 1024)
match = re.match(b'\x90\x8a@', chunk)
if match:
print(match)
print(chunk)
for module in btd6.list_modules():
for i in range(module.lpBaseOfDll, module.lpBaseOfDll + module.SizeOfImage, 1024):
chunk = pymem.memory.read_bytes(hwnd, i, 1024)
match = re.match(rb'\x90\x8a', chunk)
if match:
print(i)
print(match)
def immediately_find(btd6: Pymem):
btd6.read_bytes
if __name__ == '__main__':
main()
``` |
{
"source": "56kyle/SQRL",
"score": 3
} |
#### File: SQRL/web_app/web_app.py
```python
from flask import Flask, render_template, url_for
app = Flask(__name__)
@app.route('/')
def home():
return render_template(url_for('home'))
@app.route('/connections')
def connections():
return render_template(url_for('connections'))
@app.route('/connections/<conn>', methods=['GET', 'POST'])
def a_conn(conn):
return render_template(url_for(conn))
@app.route('/admin')
def admin():
return render_template(url_for('home'))
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "5703863336/electriity",
"score": 2
} |
#### File: apps/users/utils.py
```python
import re
from django.contrib.auth.backends import ModelBackend
from itsdangerous import BadData
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from apps.users.models import User
from meiduo_mall import settings
class UsernameMobileModelBackend(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
if re.match(r'1[3-9]\d{9}',username):
user = User.objects.get(mobile=username)
else:
username = User.objects.get(username=username)
if user and user.check_password(password):
return user
return None
def generic_active_email_url(id,email):
s = Serializer(secret_key=settings.SECRET_KEY,expires_in=3600)
data = {
'id':id,
'email':email,
}
serect_data = s.dumps(data)
return 'http://www.meiduo.site:8000/emailsactive/?token=%s'%serect_data.decode()
def check_active_token(token):
s = Serializer(secret_key=settings.SECRET_KEY,expires_in=3600)
try:
data = s.loads(token)
except BadData:
return None
else:
return data
def generic_access_token_url(username,mobile):
s = Serializer(secret_key=settings.SECRET_KEY,expires_in=3600)
data = {
'username':username,
'mobile':mobile,
}
serect_data = s.dumps(data)
return serect_data.decode()
def check_access_token_token(token):
s = Serializer(secret_key=settings.SECRET_KEY,expires_in=3600)
try:
data = s.loads(token)
except BadData:
return None
else:
return data
```
#### File: meiduo_mall/utils/storage.py
```python
from django.core.files.storage import Storage
class MyStorage(Storage):
def _open(self,name,mode='rb'):
pass
def _save(self,name,content,max_length=None):
pass
def url(self,name):
return "http://192.168.203.130:8888/" + name
``` |
{
"source": "572314705/meiduo_24",
"score": 2
} |
#### File: meiduo_admin/views/groups.py
```python
from django.contrib.auth.models import Group, Permission
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from meiduo_admin.serializer.groups import GroupSerializer
from meiduo_admin.serializer.permission import PermissionSerializer
from meiduo_admin.utils import PageNum
class GroupView(ModelViewSet):
# 指定序列化器
serializer_class = GroupSerializer
# 指定查询集
queryset = Group.objects.all()
# 指定分页器
pagination_class = PageNum
# 指定权限
permission_classes = [IsAdminUser]
# 获取权限表数据
def simple(self, request):
# 获取权限
permission = Permission.objects.all()
# 序列化返回
ser = PermissionSerializer(permission, many=True)
return Response(ser.data)
```
#### File: meiduo_admin/views/statistical.py
```python
from datetime import date, timedelta
from rest_framework import serializers
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from goods.models import GoodsVisitCount
from users.models import User
from rest_framework.views import APIView
class UserTotalCountView(APIView):
# 用户总数统计
def get(self, request):
# 获取当前日期
now_date = date.today()
# 获取用户总数
count = User.objects.filter(is_staff=False).count()
# 返回结果
return Response({
'count': count,
'date': now_date,
})
class UserDayCountView(APIView):
# 日增用户统计
def get(self, request):
# 获取当前日期
now_date = date.today()
# 查询当日新增用户
count = User.objects.filter(is_staff=False, date_joined__gte=now_date).count()
# 返回结果
return Response({
'count': count,
'date': now_date,
})
class UserActiveCountView(APIView):
# 查询日活跃用户
def get(self, request):
# 获取当前日期
now_date = date.today()
# 查询当日活跃用户用户 last_login记录最后登录时间
count = User.objects.filter(is_staff=False, last_login__gte=now_date).count()
# 返回结果
return Response({
'count': count,
'date': now_date,
})
class UserOrderCountView(APIView):
# 查询日下单用户
# 指定权限
permission_classes = [IsAdminUser]
def get(self, request):
# 获取当前日期
now_date = date.today()
# 查询当日下单用户 关联过滤查询 以订单表数据做为用户表查询条件
# count = User.objects.filter(is_staff=False,orders__create_time__gte=now_date).count()
users = User.objects.filter(is_staff=False, orders__create_time__gte=now_date)
# 下单用户数
user = set(users)
count = len(user)
# 返回结果
return Response({
'count': count,
'date': now_date,
})
class UserMonthCountView(APIView):
permission_classes = [IsAdminUser]
# 月增用户
def get(self, request):
# 当天日期
now_date = date.today()
# 一个月前的日期
old_date = now_date - timedelta(30)
# 一个月的新增用户
user_date = []
for i in range(31):
# 一个月前的日期
index_date = old_date + timedelta(i)
# 一个月前的下一甜日期
next_date = old_date + timedelta(i + 1)
count = User.objects.filter(is_staff=False, date_joined__gte=index_date, date_joined__lt=next_date).count()
user_date.append({
'count': count,
'date': next_date,
})
return Response(user_date)
# class GoodsDayView(APIView):
# # 日商品分类访问量
# def get(self,request):
# # 当天日期
# now_date = date.today()
# # 获取对象查询集
# goods = GoodsVisitCount.objects.filter(date=now_date)
# date_list = []
# for good in goods:
# date_list.append({
# 'count':good.count,
# 'category':good.category.name,
# })
# return Response(date_list)
class GoodsSerializer(serializers.ModelSerializer):
# 指定返回分类名称
category=serializers.StringRelatedField(read_only=True)
class Meta:
model=GoodsVisitCount
fields=('count','category')
class GoodsDayView(APIView):
def get(self,request):
# 获取当天日期
now_date=date.today()
# 获取当天访问的商品分类数量信息
data=GoodsVisitCount.objects.filter(date=now_date)
# 序列化返回分类数量
ser=GoodsSerializer(data,many=True)
return Response(ser.data)
``` |
{
"source": "573009114/configmanager",
"score": 2
} |
#### File: configmanager/home_application/query_model.py
```python
from home_application.models import *
import HTMLParser
class QuerySet:
"""
写入类
"""
def __init__(self,**kwargs):
self.ip=kwargs.get('ip')
self.server_room=kwargs.get('serverRoom')
self.service_type=kwargs.get('serviceType')
self.serverip=kwargs.get('serverip')
self.group_name=kwargs.get('groupName')
self.group_id=kwargs.get('groupId')
self.project_name=kwargs.get('projectName')
self.sid=kwargs.get('sid')
self.group_token=kwargs.get('groupToken')
self.domain=kwargs.get('domain')
def creat_group(self):
"""
创建组
"""
result=Groups.objects.create(name=self.group_name,typed=self.service_type,
token=self.group_token,nginx_conf='')
result.save()
return result.id
def creat_ipaddr(self):
"""
IP和组进行关联创建
"""
list=[]
for i in self.serverip.split(','):
if i not in list:
list.append(i)
for ip in list:
getExist=IpAddr.objects.filter(ip=ip).values('id')
if getExist:
IpAddr.objects.filter(ip=ip).delete()
result=IpAddr.objects.create(ip=ip,gid_id=self.sid)
result.save()
return result.id
def creat_project(self):
result=Project.objects.create(name=self.project_name,domain=self.domain,
gid_id=self.group_id,
vhost_conf='',rewrite_conf='')
result.save()
return result.id
class QueryUpdate:
"""
更新数据
"""
def __init__(self,**kwargs):
self.id=kwargs.get('id')
self.vhost=kwargs.get('vhost')
self.rewrite=kwargs.get('rewrite')
self.ngx=kwargs.get('nginx')
self.groupname=kwargs.get('group_name')
self.gid=kwargs.get('gid')
self.pid=kwargs.get('pid')
def update_project(self):
"""
更新虚拟主机配置和跳转规则配置
"""
html_parser = HTMLParser.HTMLParser()
vhost=html_parser.unescape(self.vhost)
rewrite=html_parser.unescape(self.rewrite)
result=Project.objects.filter(id=self.id).update(vhost_conf=vhost,rewrite_conf=rewrite)
return result
def update_group(self):
"""
更新nginx主配置
"""
result=Groups.objects.filter(id=self.id).update(name=self.groupname,nginx_conf=self.ngx)
return result
def update_project_group_bond(self):
"""
更新域名和项目绑定
"""
result=Project.objects.filter(id=self.pid).update(gid_id=self.gid)
return result
class QueryGet:
"""
查询类
"""
def select_group_all(self):
"""
查询组信息
"""
result=Groups.objects.all().values('id','name','typed','token')
return result
def select_project_all_list(self,gid):
"""
查询所有项目列表
"""
result=Project.objects.filter(gid_id=gid).values('id','name','vhost_conf','rewrite_conf',
'gid_id__name','gid_id__typed','gid_id__token',
'gid_id__nginx_conf','domain')
return result
def select_project_all(self):
result=Project.objects.all().values('id','domain')
return result
def select_project_conf(self,id):
"""
查询项目配置
"""
result=Project.objects.filter(id=id).values('vhost_conf','rewrite_conf','gid_id__token','domain')
return result
def select_ipaddr_group(self,gid):
"""
根据组id查询组信息
"""
result=IpAddr.objects.filter(gid_id=gid).values('id','ip','gid_id__name','gid_id__nginx_conf','gid_id__token')
return result
def select_ipaddr_nginx(self,gid):
"""
根据IP所属组ID查询nginx主配置
"""
result=Groups.objects.filter(id=gid).values('nginx_conf','name')
return result
def select_group_nginx(self,gid):
"""
根据组id查找nginx配置和token
"""
result=Groups.objects.filter(id=gid).values('nginx_conf','token')
return result
def select_group_domain(self,domain):
"""
根据域名查找业务组类型
"""
result=Project.objects.filter(domain=domain).values('gid_id__typed')
return result
class QueryDel:
def __init__(self,**kwargs):
self.id=kwargs.get('id')
def delete_project(self):
"""
删除项目
"""
result=Project.objects.filter(id=self.id).delete()
return result
def delete_ip(self):
"""
删除ip
"""
result=IpAddr.objects.filter(id=self.id).delete()
return result
def delete_group(self):
"""
删除项目组
"""
result=Groups.objects.filter(id=self.id).delete()
return result
``` |
{
"source": "573009114/webvirtmgr",
"score": 2
} |
#### File: webvirtmgr/create/remote.py
```python
import uuid
import time
from fabric import Connection
class ServerConn:
def __init__(self,**kwargs):
self.ip=kwargs.get('ip')
self.port=kwargs.get('port')
self.user=kwargs.get('user')
self.memorysize=kwargs.get('memorysize')
self.cpusize=kwargs.get('cpusize')
self.maxvcpus=kwargs.get('maxvcpus')
self.disksize=kwargs.get('disksize')
self.version=kwargs.get('version')
self.uuid=str(uuid.uuid4())
self.os=kwargs.get('os')
self.vmip=kwargs.get('vmip')
self.vmnetmask=kwargs.get('vmnetmask')
self.vmgateway=kwargs.get('vmgateway')
arch=kwargs.get('arch')
if arch =='64':
self.arch='x86_64'
else:
self.arch='i386'
self.vmname=self.os+self.version+'-'+self.arch+'-'+self.vmip
self.c = Connection(
host='%s' % self.ip,
user='%s' % self.user,
port = self.port,
connect_timeout=30,
connect_kwargs={"password": ")<PASSWORD>"})
def install_vm(self):
'''
创建虚机 以及 磁盘
'''
self.c.run('virt-install --connect qemu:///system --name=%s \
--uuid=%s --ram=%s --vcpus %s,maxvcpus=%s\
--disk path=/export/kvm_images/volume-%s.qcow2,bus=virtio,size=30,format=qcow2 \
--accelerate --location=http://10.52.8.251/cobbler/ks_mirror/%s-%s-%s/ \
--extra-args "ks=http://10.52.8.251/cblr/svc/op/ks/profile/%s-%s-%s" \
--graphics vnc --network bridge=br0,model=virtio --force \
--autostart --noautoconsole \
' %(self.vmname,self.uuid,self.memorysize,self.cpusize,self.maxvcpus,self.uuid,self.os,self.version,self.arch,self.os,self.version,self.arch))
if (len(self.disksize) >= 2):
self.c.run('qemu-img create /export/kvm_images/volume-%s-1.qcow2 -f qcow2 %sG' % (self.uuid,self.disksize))
self.c.run('sed -i "/<\/disk>/a\ <disk type=\'file\' device=\'disk\'><driver name=\'qemu\' type=\'qcow2\' cache=\'none\'\/><source file=\'\/export\/kvm_images\/volume-%s-1.qcow2\'\/><target dev=\'vdb\' bus=\'virtio\'\/><\/disk>" /etc/libvirt/qemu/%s.xml' % (self.uuid,self.vmname))
msg='虚机开始创建'
return msg
def init_vm(self):
''' 初始化虚拟机配置 '''
xes_status=self.c.run("virsh list --all |grep %s |awk '{print $NF}'" %(self.vmname))
if xes_status != 'running':
self.c.run('sed -i "/<boot dev=\'hd\'\/>/a\ <smbios mode=\'sysinfo\'\/>" /etc/libvirt/qemu/%s.xml' % (self.vmname))
self.c.run('sed -i "/<\/vcpu>/a\ <sysinfo type=\'smbios\'><bios><entry name=\'vendor\'>Fenghua</entry></bios><system><entry name=\'manufacturer\'>XCloud</entry><entry name=\'product\'>XCloud ECS</entry><entry name=\'version\'>pc-i440fx-2.1</entry><entry name=\'serial\'>Not Specified</entry><entry name=\'family\'>Not Specified</entry></system></sysinfo>" /etc/libvirt/qemu/%s.xml' % (self.vmname))
self.c.run('sed -i "s/clock offset=\'utc\'/clock offset=\'localtime\'/" /etc/libvirt/qemu/%s.xml' % (self.vmname))
self.c.run('sed -i "/<\/features>/a\ <cpu mode=\'host-passthrough\'>" /etc/libvirt/qemu/%s.xml' %(self.vmname))
self.c.run('sed -i "/<cpu mode=\'host-passthrough\'>/a\ </cpu>" /etc/libvirt/qemu/%s.xml' %(self.vmname))
else:
msg = '初始化失败,请稍后再试'
return msg
def template_vm(self):
'''
创建模板
'''
ipconfig='''
DEVICE="eth0"
BOOTPROTO="static"
ONBOOT="yes"
TYPE="Ethernet"
IPADDR={ip}
NETMASK={netmask}
GATEWAY={gateway}
'''.format(ip=self.vmip,netmask=self.vmnetmask,gateway=self.vmgateway)
self.c.run('mkdir -p /opt/kvm_install/config/%s/' % self.vmip)
self.c.run('echo "%s" >/opt/kvm_install/config/%s/ifcfg-eth0' %(ipconfig.replace(' ','').lstrip(),self.vmip))
rc_local='''\
#!/bin/sh
#
# This script will be executed *after* all the other init scripts.
# You can put your own initialization stuff in here if you don't
# want to do the full Sys V style init stuff.
touch /var/lock/subsys/local
curl http://10.52.8.252/chushihua.sh |bash
'''
self.c.run('echo "%s" >/opt/kvm_install/config/rc.local'%(rc_local.strip()))
def setting_vm(self):
''' 设置虚机参数 '''
#virt_check= self.c.run('which virt-copy-in')
#print virt_check
#if virt_check != 0:
self.c.run('yum install -y libguestfs-tools-c')
self.c.run('virt-copy-in -d %s /opt/kvm_install/config/%s/ifcfg-eth0 /etc/sysconfig/network-scripts/' %(self.vmname,self.vmip))
self.c.run('chmod 755 /opt/kvm_install/config/rc.local && virt-copy-in -d %s /opt/kvm_install/config/rc.local /etc/rc.d/' % (self.vmname))
self.c.run('cp /etc/libvirt/qemu/%s.xml /opt/kvm_install/%s.xml' % (self.vmname,self.vmname))
self.c.run('virsh undefine %s' %(self.vmname))
self.c.run('virsh define /opt/kvm_install/%s.xml' %(self.vmname))
self.c.run('mv /opt/kvm_install/%s.xml /opt/kvm_install/config/%s/%s.xml' %(self.vmname,self.vmip,self.vmname))
def start_vm(self):
''' 启动虚机 '''
status=self.c.run("virsh list --all |grep %s |awk '{print $NF}'" %(self.vmname))
if status != 'running':
self.c.run('virsh start %s' % (self.vmname))
else:
msg='虚机已启动'
return msg
``` |
{
"source": "5730289021-NN/ros_pkg_gen",
"score": 3
} |
#### File: src/package_generator/enhanced_object.py
```python
import inspect
from termcolor import colored
class EnhancedObject(object):
"""Enhanced Object with advanced log tool
Attributes:
name_ (str): name of the object
"""
def __init__(self, name="EnhancedObject"):
"""class constructor
Args:
name (str, optional): name of the object
"""
self.name_ = name
def log(self, text):
"""display log message with the class name in parameter
text the string to display
Args:
text (str): message to print
"""
print "[{}::{}] ".format(self.name_, inspect.stack()[1][3]) + text
def log_warn(self, text):
"""display warn message with the class name in parameter
text the string to display
Args:
text (str): warn message
"""
msg = "[{}::{}] ".format(self.name_, inspect.stack()[1][3]) + text
print colored(msg, 'yellow')
def log_error(self, text):
"""display warn message with the class name in parameter
text the string to display
Args:
text (str): error message
"""
msg = "[{}::{}] ".format(self.name_, inspect.stack()[1][3]) + text
print colored(msg, 'red')
```
#### File: src/package_generator/generate_package.py
```python
import os
import datetime
import shutil
import sys
import rospkg
from package_generator.code_generator import CodeGenerator
from package_generator.jinja_generator import JinjaGenerator
from package_generator.package_xml_parser import PackageXMLParser
from package_generator.file_update_management import GeneratedFileAnalysis
from package_generator.enhanced_object import EnhancedObject
from package_generator.template_spec import TemplateSpec
from termcolor import colored
class PackageGenerator(EnhancedObject):
"""Handle the genration of a whole package
Attributes:
file_generator_ (CodeGenerator): custom generator
jinja_generator_ (JinjaGenerator): generator based on jinja
package_path_ (str): base location of the package to create
path_pkg_backup_ (str): if the package already existed, location of the package backup
spec_ (TemplateSpec): configuration of the template model
template_path_ (str): path to the template to use
xml_parser_ (PackageXMLParser): parser of the package description
"""
def __init__(self, name="PackageGenerator"):
"""Intialisation of the object
Args:
name (str, optional): Name of the component, for printing aspect
"""
# call super class constructor
super(PackageGenerator, self).__init__(name)
# path to the template to use
self.template_path_ = None
# base location of the package to create
self.package_path_ = None
# parser of the package description
self.xml_parser_ = None
# config parameter provide with the template
self.spec_ = None
# generic file generator
self.file_generator_ = None
# jinja-based generator
self.jinja_generator_ = None
# if the package already existed, location of the package backup
self.path_pkg_backup_ = None
def check_template_structure(self, template_path):
"""Check a provided path refers to a valid template structure
Args:
template_path (str): path to the package template
Returns:
Bool: True if basic sanity checks are successful
"""
if not os.path.exists(template_path):
msg = "Template path ({}) is incorrect ".format(template_path)
self.log_error(msg)
return False
if not os.path.isdir(template_path):
msg = "Template path ({}) is not a directory ".format(template_path)
self.log_error(msg)
return False
# check if minimum information is present.
details = """A template should contain:
* config/dictionary.yaml : the dictionary to be used
* config/functions.py [optional] : additional functions used in the generation
* config/generator.py [optional] : generator list (custom, jinja) default is custom
* template/* set of elements to be generated
Revise the template, and compare to examples
"""
is_ok = True
# check for directories
required_folders = ["config", "template"]
for item in required_folders:
req_folder = template_path + "/" + item
if not os.path.isdir(req_folder):
msg_err = "Error \n Expecting to have folder " + item
msg_err += " in " + template_path
self.log_error(msg_err)
is_ok = False
# check for files
required_files = ["config/dictionary.yaml"]
for item in required_files:
req_file = template_path + "/" + item
if not os.path.isfile(req_file):
msg_err = "Error.\n Expecting to have file " + item
msg_err += " in " + template_path
self.log_error(msg_err)
is_ok = False
if not is_ok:
self.log_error("\n{}".format(details))
return False
return True
def get_template_info(self):
"""Get information about the available package templates
Returns:
list: tuple with [absolute package path, list of package names]
"""
rospack = rospkg.RosPack()
path_template = rospack.get_path('package_generator_templates')
path_template += "/templates/"
template_names = os.listdir(path_template)
return [path_template, template_names]
def generate_package(self, package_desc, output_path):
"""launches the package generation
Args:
package_desc (str): xml file containing the package description
output_path (str): directory into which the package is created
Returns:
Bool: True if the operation succeeded
"""
if not os.path.exists(output_path):
msg_err = "Incorrect desired package path ({})".format(output_path)
self.log_error(msg_err)
return False
if not os.path.isdir(output_path):
msg_err = "Desired package path ({}) not a directory ".format(output_path)
self.log_error(msg_err)
return False
# Initialising needed components
# todo bring it to the constructor?
self.spec_ = TemplateSpec()
self.xml_parser_ = PackageXMLParser()
self.file_generator_ = CodeGenerator()
self.jinja_generator_ = JinjaGenerator()
# Start finding the template
template = self.xml_parser_.get_template(package_desc)
if template is None:
return False
# Locate template location
try:
[all_template_path, template_names] = self.get_template_info()
except rospkg.common.ResourceNotFound as error:
msg = "Package package_generator_templates not found in rospack"
self.log_error(msg)
self.log_error(error)
return False
except OSError as error:
msg = "No template dounf in package_generator_templates"
self.log_error(msg)
self.log_error(error)
return False
if template not in template_names:
msg = "Template requested: {} unknown".format(template)
self.log_error(msg)
msg = "Available templates: {}".format(template_names)
self.log_error(msg)
return False
template_path = all_template_path + "/" + template
# confirm this is a template...
if not self.check_template_structure(template_path):
msg = "Please revise template structure"
self.log_error(msg)
return False
# template localized, ready to work!
self.template_path_ = template_path
self.path_pkg_backup_ = None
dir_template_spec = self.template_path_ + "/config/"
if not self.spec_.load_spec(dir_template_spec):
self.log_error("Could not load the template spec")
return False
if not self.xml_parser_.set_template_spec(self.spec_):
msg_err = "Package spec not compatible with xml parser expectations"
self.log_error(msg_err)
return False
if not self.xml_parser_.load(package_desc):
msg_err = "Prb while parsing xml file {}".format(package_desc)
self.log_error(msg_err)
return False
# todo why only the custom generator is configured?
if not self.file_generator_.configure(self.xml_parser_, self.spec_):
return False
package_name = self.xml_parser_.get_package_spec()["name"]
self.package_path_ = output_path + "/" + package_name
if os.path.exists(self.package_path_):
self.log_warn("Package {} already exists".format(self.package_path_))
# moving preexisting code.
# generating dir name using date
now = datetime.datetime.now()
str_date = now.strftime("%Y_%m_%d_%H_%M_%S")
self.path_pkg_backup_ = "/tmp/{}_{}".format(os.path.basename(self.package_path_), str_date)
self.log_warn("Original package temporally stored in {}".format(self.path_pkg_backup_))
# TODO check if the move succeeded
shutil.move(self.package_path_, self.path_pkg_backup_)
else:
self.log("Package to be created in {}".format(self.package_path_))
os.makedirs(self.package_path_)
nb_comp = self.xml_parser_.get_number_comps()
self.log("Number of components defined: {}".format(nb_comp))
if not self.generate_content():
return False
# we store the model into the directory model
path = self.package_path_ + "/model"
if not os.path.exists(path):
os.makedirs(path)
path += "/" + package_name + ".ros_package"
if self.xml_parser_.is_dependency_complete_:
try:
if os.path.abspath(package_desc) == os.path.abspath(path):
# self.log_warn("Using generated model...")
stored_desc = self.path_pkg_backup_ + "/model/" + package_name + ".ros_package"
# self.log("check {} is absolute: {}".format(package_desc, os.path.abspath(package_desc)))
shutil.copyfile(stored_desc, path)
else:
shutil.copyfile(package_desc, path)
self.log("Package model saved in: {}".format(path))
except IOError as error:
self.log_error("Could not store model file: {}".format(error))
else:
# some dependencies were automatically added
# model needs to be rewritten
try:
self.xml_parser_.write_xml(path)
self.log("Package model updated & saved in: {}".format(path))
except IOError as error:
self.log_error("Could not store model file: {}".format(error))
is_ok = self.handle_maintained_files()
return is_ok
def generate_one_file(self, template_file, result_file, force_write):
"""Generate a template file, depending on the generators to be used
Args:
template_file (str): template filename
result_file (str): filename to store the result (unless is None)
force_write (str): force the writting of empty files (if not, files is not written)
Returns:
Bool: True on success
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
if len(self.spec_.generators_) == 1:
return generator[self.spec_.generators_[0]].generate_disk_file(template_file,
result_file,
force_write)
# two generators are to be used
gen_one = generator[self.spec_.generators_[0]]
gen_two = generator[self.spec_.generators_[1]]
is_ok = gen_one.generate_disk_file(template_file)
if not is_ok:
return False
return gen_two.generate_open_file(gen_one.rendered_,
result_file,
force_write)
def check_template_file(self, template_file):
"""Generate a template file, depending on the generators to be used
Args:
template_file (str): template filename
result_file (str): filename to store the result (unless is None)
force_write (str): force the writting of empty files (if not, files is not written)
Returns:
Bool: True on success
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
if len(self.spec_.generators_) == 1:
# self.log("Check with Generator {}".format(self.spec_.generators_[0]))
return generator[self.spec_.generators_[0]].check_template_file(template_file)
# two generators are to be used
gen_one = generator[self.spec_.generators_[0]]
gen_two = generator[self.spec_.generators_[1]]
# self.log("Check with Generator {}".format(self.spec_.generators_[0]))
is_ok = gen_one.check_template_file(template_file)
if not is_ok:
return False
# self.log("Check with Generator {}".format(self.spec_.generators_[1]))
if self.spec_.generators_[1] == "jinja":
return gen_two.check_template_file(gen_one.rendered_, is_filename=False)
return gen_two.check_template_file(template_file)
def write_generated_file(self, result_file):
"""Write a generated file
Args:
result_file (str): filename to store the file.
Returns:
Bool: True on success
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
return generator[self.spec_.generators_[-1]].write_rendered_file(result_file)
def get_generated_file(self):
"""Get the generated files
Returns:
list: list of of each line of the generated file
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
return generator[self.spec_.generators_[-1]].rendered_
def set_generated_file(self, l_file):
"""set the generated file
Args:
l_file (list): list of of each line of the generated file
"""
generator = dict()
generator["custom"] = self.file_generator_
generator["jinja"] = self.jinja_generator_
generator[self.spec_.generators_[-1]].rendered_ = l_file
def handle_maintained_files(self):
"""Restore file Developer requests to maintain
Assuming these patterns are defined in file .gen_maintain
Returns:
Bool: True on sucess
"""
# check for files to be maintained
if self.path_pkg_backup_ is None:
# package just created, no maintained file
return True
filename_rel = ".gen_maintain"
filename_abs = self.path_pkg_backup_ + "/" + filename_rel
if os.path.exists(filename_abs) and os.path.isfile(filename_abs):
self.log("Checking content to maintain after update")
else:
self.log("no maintained file defined in previous package version")
return True
with open(filename_abs) as open_file:
for line in open_file:
line = line.rstrip('\n')
if not line:
continue
path_abs = self.path_pkg_backup_ + "/" + line
if not os.path.exists(path_abs):
msg = "Content {} not found. Revise {} content"
self.log_error(msg.format(line, filename_abs))
continue
new_path = self.package_path_ + "/" + line
if os.path.isfile(path_abs):
try:
self.log("Restoring file {}".format(line))
# check if directories needs to be created
dirname = os.path.dirname(line)
# self.log("dirname is : {}".format(dirname))
if dirname:
path_abs_dir = self.package_path_ + "/" + dirname
if not os.path.isdir(path_abs_dir):
os.makedirs(path_abs_dir)
shutil.copyfile(path_abs, new_path)
except IOError as error:
msg = "Could not restore a file: {}"
self.log_error(msg.format(error))
continue
if os.path.isdir(path_abs):
try:
self.log("Restoring folder {}".format(line))
shutil.copytree(path_abs, new_path)
except IOError as error:
msg = "Could not restore folder: {}"
self.log_error(msg.format(error))
continue
self.log_error("Unkown statement {}".format(line))
# restoring the maintained content file
try:
self.log("Restoring file {}".format(filename_rel))
new_path = self.package_path_ + "/" + filename_rel
shutil.copyfile(filename_abs, new_path)
except IOError as error:
msg = "Could not restore file: {}"
self.log_error(msg.format(error))
return True
def handle_status_and_advise(self, input_file, output_file, gen_flag):
"""Depending on the file generation process outcome,
Adjust file status and inform user
Args:
input_file (str): path of the template file used
output_file (str): path of the generated file
gen_flag (Bool): Success of the generation process
Returns:
Bool: True on success of the file generation
"""
if not gen_flag:
msg = "Prb while generating file {}".format(output_file)
self.log_error(msg)
return False
# so the file generation went well
if self.file_generator_.get_len_gen_file() == 0:
# Only file __init__.py is kept empty
if os.path.basename(output_file) != '__init__.py':
msg = "File {} not written since empty".format(output_file)
self.log_warn(msg)
self.log_warn("Check: {}".format(os.path.basename(output_file)))
return True
# file has content
file_status = os.stat(input_file)
os.chmod(output_file, file_status.st_mode)
# self.log("File {} handled".format(input_file))
self.log("File handled")
self.log("*********************************")
return True
def generate_content(self):
"""Generation and storage of all content
Returns:
Bool -- True on success
"""
# Extracting all components from the template
file_list = list()
dir_list = list()
path_root_template = self.template_path_ + "/template"
for (root, dirs, files) in os.walk(path_root_template):
# print "check {}: dir {}, files: {}".format(root, dirs, files)
if os.path.samefile(root, path_root_template):
for item in files:
file_list.append(item)
for item in dirs:
dir_list.append(item)
else:
rel_path = os.path.relpath(root, path_root_template)
for item in files:
file_list.append(rel_path + "/" + item)
for item in dirs:
dir_list.append(rel_path + "/" + item)
# Looking at final directory and filenames
package_name = self.xml_parser_.get_package_spec()["name"]
nb_comp = self.xml_parser_.get_number_comps()
comps_name = [self.xml_parser_.data_comp_[id_comp]["attributes"]["name"] for id_comp in range(nb_comp)]
self.log("Generating all folders")
tmp = list()
for item in dir_list:
item = item.replace('package_name', package_name)
if 'component' in item:
for one_name in comps_name:
tmp.append(item.replace('component', one_name))
else:
tmp.append(item)
dir_list = tmp
for item in dir_list:
path_folder = self.package_path_ + "/" + item
if not os.path.exists(path_folder):
os.makedirs(path_folder)
generation_list = list()
# File preparation: storing [template filename, new filename, comp id]
for item in file_list:
new_item = item.replace('package_name', package_name)
if 'component' in item:
for num, one_name in enumerate(comps_name):
generation_list.append([item,
new_item.replace('component',
one_name),
num])
else:
# todo if no component active I should not set one
generation_list.append([item, new_item, 0])
is_ok = True
# self.log("\nFiles generation plan: ")
for item in generation_list:
[template_file, result_file, comp_id] = item
self.log("{} --> {}".format(template_file, result_file))
if not self.xml_parser_.set_active_comp(comp_id):
return False
# reconfiguring the generator to adjust to the new active component
# todo configure already called in generate_package function. Check why
if not self.file_generator_.configure(self.xml_parser_, self.spec_):
return False
if not self.jinja_generator_.configure(self.xml_parser_, self.spec_):
return False
# Normally an empty file should not be written
# The exception is currently only for the special python file __init__.py
is_write_forced = (os.path.basename(result_file) == '__init__.py')
result_file = self.package_path_ + "/" + result_file
template_file = self.template_path_ + '/template/' + template_file
if self.path_pkg_backup_ is None:
self.log("Generating file {}".format(result_file))
is_ok = self.generate_one_file(template_file,
result_file,
is_write_forced)
if self.handle_status_and_advise(template_file,
result_file,
is_ok):
continue
else:
return False
# A previous version of the package exists
# Checking if an update is necessary
rel_path = os.path.relpath(result_file, package_name)
previous_filename = os.path.join(self.path_pkg_backup_, rel_path)
# Check 1: does this file exist?
if not os.path.isfile(previous_filename):
msg = "File {} not previously existing. Just write it"
self.log_warn(msg.format(rel_path))
is_ok = self.generate_one_file(template_file,
result_file,
is_write_forced)
if self.handle_status_and_advise(template_file,
result_file,
is_ok):
continue
else:
return False
# File already existing. Processing previous version
is_update_needed = False
file_analyzor = GeneratedFileAnalysis()
is_ok = file_analyzor.extract_protected_region(previous_filename)
if is_ok:
# Check if Developer inserted any contribution
if file_analyzor.extracted_areas_:
# contribution found, merge needed
is_update_needed = True
else:
self.log("No Developer contribution found")
else:
msg = "prb while extracting protected area in {}"
self.log_error(msg.format(previous_filename))
self.log_error("Previous file to be manually merged, sorry")
# now we know if an update is needed
if is_ok and is_update_needed:
# self.log("Updating file {} in {}".format(rel_path, output_item))
self.log("Updating file {}".format(rel_path))
is_ok = self.generate_one_file(template_file, None, None)
if not is_ok:
return False
# todo handle this in case jinja is involved.
l_gen = self.get_generated_file()
if not l_gen:
msg = "New generated file empty. No code maintained from previous version"
self.log_warn(msg)
# we write it if forced
if is_write_forced:
is_ok = self.write_generated_file(result_file)
else:
self.log("Merging with previous version")
l_gen = file_analyzor.update_file(l_gen)
self.set_generated_file(l_gen)
is_ok = self.write_generated_file(result_file)
if self.handle_status_and_advise(template_file,
result_file,
is_ok):
continue
else:
return False
# Although the file existed before, we do not have to maintain it
is_ok = self.generate_one_file(template_file, result_file, is_write_forced)
if self.handle_status_and_advise(template_file, result_file, is_ok):
continue
else:
return False
return True
def template_sanity_check(self, template):
"""Perform the package sanity check
Returns:
Bool: True on success
"""
# Locate template location
try:
[all_template_path, template_names] = self.get_template_info()
except rospkg.common.ResourceNotFound as error:
msg = "Package package_generator_templates not found in rospack"
self.log_error(msg)
self.log_error(error)
return False
except OSError as error:
msg = "No template found in package_generator_templates"
self.log_error(msg)
self.log_error(error)
return False
is_template_found = False
template_path = None
if template in template_names:
is_template_found = True
template_path = all_template_path + "/" + template
else:
self.log("Could not find template {} in {}".format(template, all_template_path))
# check if the template provided is a relative path, and not a package in the repo
if os.path.isabs(template):
self.log("Loading template from absolute path {}".format(template))
is_template_found = True
template_path = template
else:
# relative path ?
template_path = os.getcwd() + "/" + template
if os.path.isdir(template_path):
self.log("Loading template from path {}".format(template_path))
is_template_found = True
if not is_template_found:
msg = "Template requested: {} unknown".format(template)
self.log_error(msg)
msg = "Available templates: {}".format(template_names)
self.log_error(msg)
return False
# confirm this is a template...
if not self.check_template_structure(template_path):
msg = "Please revise template structure"
self.log_error(msg)
return False
# TODO list number of files in template
# Extracting all components from the template
file_list = list()
dir_list = list()
path_root_template = template_path + "/template"
for (root, dirs, files) in os.walk(path_root_template):
# print "check {}: dir {}, files: {}".format(root, dirs, files)
if os.path.samefile(root, path_root_template):
for item in files:
file_list.append(item)
for item in dirs:
dir_list.append(item)
else:
rel_path = os.path.relpath(root, path_root_template)
for item in files:
file_list.append(rel_path + "/" + item)
for item in dirs:
dir_list.append(rel_path + "/" + item)
# print ("Dirs: ")
# print("\n".join(dir_list))
# print("Files: ")
# print("\n".join(file_list))
# setting the needed component.
self.spec_ = TemplateSpec()
self.xml_parser_ = PackageXMLParser()
self.file_generator_ = CodeGenerator()
self.jinja_generator_ = JinjaGenerator()
dir_template_spec = template_path + "/config/"
if not self.spec_.load_spec(dir_template_spec):
self.log_error("Could not load the template spec")
return False
if not self.xml_parser_.set_template_spec(self.spec_):
msg_err = "Package spec not compatible with xml parser expectations"
self.log_error(msg_err)
return False
if not self.xml_parser_.set_empty_spec():
msg_err = "Failed generating empty spec"
self.log_error(msg_err)
return False
if not self.file_generator_.configure(self.xml_parser_, self.spec_):
return False
if not self.jinja_generator_.configure(self.xml_parser_, self.spec_):
return False
is_ok = True
for item in file_list:
self.log("Checking file: {}".format(item))
item_abs = path_root_template + "/" + item
is_ok = self.check_template_file(item_abs)
if not is_ok:
break
if is_ok:
self.log("No error detected")
else:
self.log_error("Revise the template")
return is_ok
# todo complete the usage description with available templates
# and with existing commands
USAGE_GEN = """ usage: generate_package [package_spec]
package_spec: xml description of the component(s) interface
"""
def main():
"""
@brief Entry point of the package.
Generates a package, given a specified structure
@return nothing
Returns:
int: negative value on error
"""
gen = PackageGenerator()
if len(sys.argv) != 2:
print colored("Wrong input parameters !", "red")
print colored(USAGE_GEN, "yellow")
try:
[_, template_names] = gen.get_template_info()
except rospkg.common.ResourceNotFound as error:
msg = "Package package_generator_templates not found in rospack"
print colored(msg, 'red')
print colored(error, 'red')
return -1
except OSError as error:
msg = "No template found in package_generator_templates"
print colored(msg, 'red')
print colored(error, 'red')
return -1
msg = "Available templates are: {}"
print colored(msg.format(template_names), 'yellow')
print "Bye bye"
return -1
package_spec = sys.argv[1]
path_current = os.getcwd()
if not gen.generate_package(package_spec, path_current):
print colored("Prb while generating the package", "red")
return -1
else:
print colored("Package generated", "green")
print "Bye bye"
return 0
USAGE_CHECK = """ usage: check_template package_template
package_template: name of the template to check
Packages template: either one defined in package `package_generator_templates`,
either a path to a local one.
"""
def main_check():
"""
@brief Entry point of the package.
Check a template structure, as provided
Returns:
int: negative value on error
"""
gen = PackageGenerator()
if len(sys.argv) != 2:
print colored("Wrong input parameters !", "red")
print colored(USAGE_CHECK, "yellow")
try:
[_, template_names] = gen.get_template_info()
except rospkg.common.ResourceNotFound as error:
msg = "Package package_generator_templates not found in rospack"
print colored(msg, 'red')
print colored(error, 'red')
return -1
except OSError as error:
msg = "No template found in package_generator_templates"
print colored(msg, 'red')
print colored(error, 'red')
return -1
msg = "Available templates are: {}"
print colored(msg.format(template_names), 'yellow')
print "Bye bye"
return -1
template_name = sys.argv[1]
if not gen.template_sanity_check(template_name):
print colored("Issue detected in template", "red")
return -1
else:
print colored("No issue detected", "green")
print "Bye bye"
return 0
```
#### File: package_generator/tests/test_template_config.py
```python
import unittest
import os
import rospkg
from package_generator.template_spec import TemplateSpec
class GenerateDictTest(unittest.TestCase):
def setUp(self):
rospack = rospkg.RosPack()
node_path = rospack.get_path('package_generator_templates')
self.dir_template_spec = node_path + "/templates/cpp_node_update/config/"
self.spec = TemplateSpec()
# creating a temporary repo for trials
self.dir_name = "/tmp/test_template_spec"
if not os.path.exists(self.dir_name):
print "Creating the repo {}".format(self.dir_name)
os.makedirs(self.dir_name)
def test_undef_files(self):
self.assertFalse(self.spec.load_yaml_desc("unknown_file.yaml"))
self.assertFalse(self.spec.load_functions("unknown_file.py"))
def test_yaml_error(self):
bad_file = """bad_content: ["name", "author","""
filename = self.dir_name + "/bad_dico.yaml"
with open(filename, 'w') as open_file:
open_file.write(bad_file)
self.assertFalse(self.spec.load_yaml_desc(filename))
def test_correct_folder(self):
self.assertTrue(self.spec.load_spec(self.dir_template_spec))
if __name__ == '__main__':
print "test_dic_generator -- begin"
unittest.main()
print "test_dic_generator -- end"
``` |
{
"source": "573dev/pakdump",
"score": 3
} |
#### File: pakdump/pakdump/pakdump_base.py
```python
import argparse
import logging
from pathlib import Path
from typing import Optional, Sequence
from pakdump.dumper import PakDumper
from pakdump.filegen import DEFAULT_FILELIST_PATH, load_filelist
from pakdump.utils.ap import FullDirPath, FullPath
logger = logging.getLogger(__name__)
"""pakdump.pakdump_base log object"""
def main(args: Optional[Sequence[str]] = None) -> None:
"""
Dump data from GFDM V8 '.pak' files
Args:
args (Optional[Sequence[str]]) = None: Arguments List
"""
p_args = parse_args(args)
logging.basicConfig(
level=p_args.log_level,
format="[ %(asctime)s | %(levelname)-8s | %(name)s ] %(message)s",
)
# Create a dumper object, and dump the data
dumper = PakDumper(p_args.input, p_args.output, p_args.force)
if p_args.test_filepath != []:
for filepath in p_args.test_filepath:
exists = dumper.file_exists(filepath)
if exists:
print(f"Filepath exists: {filepath}")
else:
print(f"Filepath does not exist: {filepath}")
elif p_args.extract_filepath != []:
for filepath in p_args.extract_filepath:
_ = dumper.file_exists(filepath)
dumper.dump()
else:
# Gen all the files and dump
load_filelist(dumper, filepath=p_args.filelist_path)
# Dump only if this isn't a dry run
if not p_args.dryrun:
dumper.dump()
else:
found = len(
[1 for k in dumper.entries if dumper.entries[k].filename is not None]
)
print(f"Total files: {len(dumper.entries)}")
print(f"Files found: {found}")
print(f" Missing: {len(dumper.entries) - found}")
def parse_args(args: Optional[Sequence[str]] = None) -> argparse.Namespace:
"""
Parse the arguments
Args:
args (Optional[Sequence[str]]) = None: Arguments List
Returns:
:class:`argparse.Namespace`: Namespace object of all parsed arguments
Raises:
:class:`argparse.ArgumentTypeError`: If input path doesn't point to the `data`
dir
"""
parser = argparse.ArgumentParser(
description="Dump data from GFDM V8 '.pak' files",
)
parser.add_argument(
"-i",
"--input",
action=FullDirPath,
type=Path,
required=True,
help="Path to GFDM Data directory",
)
parser.add_argument(
"-o",
"--output",
action=FullDirPath,
type=Path,
default=Path.cwd(),
help="Path to output directory. Defaults to your current working directory",
)
parser.add_argument(
"-t",
"--test-filepath",
type=Path,
default=[],
nargs="+",
help="Test one or more file paths to see if they exist in the pack data",
)
parser.add_argument(
"-e",
"--extract-filepath",
type=Path,
default=[],
nargs="+",
help="Extract one or more file paths if they exist",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
help="write out all extracted files even if they already exist",
)
parser.add_argument(
"-p",
"--filelist-path",
action=FullPath,
type=Path,
default=DEFAULT_FILELIST_PATH,
help="Path to list of files to extract",
)
parser.add_argument(
"-r",
"--dryrun",
action="store_true",
help="Perform a dry run. Don't actually extract any files",
)
logger_group_parent = parser.add_argument_group(
title="logging arguments",
description="Control what log level the log outputs (default: ERROR)",
)
logger_group = logger_group_parent.add_mutually_exclusive_group()
default_log_level = logging.ERROR
logger_group.add_argument(
"-d",
"--debug",
dest="log_level",
action="store_const",
const=logging.DEBUG,
default=default_log_level,
help="Set log level to DEBUG",
)
logger_group.add_argument(
"-v",
"--verbose",
dest="log_level",
action="store_const",
const=logging.INFO,
default=default_log_level,
help="Set log level to INFO",
)
parsed_args = parser.parse_args(args)
if parsed_args.input.parts[-1] != "data":
raise argparse.ArgumentTypeError("input must be in the GFDM `data` directory")
return parsed_args
if __name__ == "__main__":
main()
``` |
{
"source": "5743rhodesm/PySabermetrics",
"score": 4
} |
#### File: 5743rhodesm/PySabermetrics/pullData.py
```python
__author__ = 'mrhodes'
# Import libraries
import requests
import json
import bs4 as bs
# Function to pull data
def get_games_summary_data(s_month, s_day, s_year, e_month = None, e_day = None, e_year = None):
"""
Returns a JSON file containing the summary of the games played on a specified day. Currently working on
allowing the user to specify a date range.
:param s_month: Start month
:param s_day: Start day
:param s_year: Start year
:param e_month: End month
:param e_day: End day
:param e_year: End year
:return:
"""
# Specify the URL to pull data from. Months and days must have leading zeroes if single digit
mlbam_url = 'http://gd2.mlb.com/components/game/mlb/year_' + str(s_year) + \
'/month_' + str("%02d" % s_month) +\
'/day_' + str("%02d" % s_day) + '/miniscoreboard.json'
# Get the request
mlbam_response = requests.get(mlbam_url)
return json.loads(mlbam_response.content)
``` |
{
"source": "574411705/FlowMonitoringAndAnalysis",
"score": 3
} |
#### File: src/models/unsupervised.py
```python
import pytorch_lightning as pl
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, random_split
class EncoderLSTM(pl.LightningModule):
def __init__(self, input_dim, hidden_dim, num_layers):
super(EncoderLSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.lstm = nn.LSTM(input_dim,
hidden_dim,
num_layers,
batch_first=True)
self.activation - nn.ReLU()
# weight initialization
nn.init.xavier_uniform(self.lstm.weight_ih_l0, gain=np.sqrt(2))
nn.init.xavier_uniform(self.lstm.weight_hh_l0, gain=np.sqrt(2))
def forward(self, x):
pass
def cross_entropy_loss(self, y_hat, y):
return nn.CrossEntropy()(y_hat, y)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
return {"train_loss": self.cross_entropy_loss(y_hat, y)}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
return {"val_loss": self.cross_entropy_loss(y_hat, y)}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
tensorboard_logs = {"val_loss": avg_loss}
return {"val_loss": avg_loss, "log": tensorboard_logs}
def prepare_data(self):
# get_data
# create train_dataset
# create val_dataset
# possibly store them
# return train_DS, test_DS
pass
def train_dataloader(self):
# return Dataloader(get_dataset)
pass
def val_dataloader(self):
pass
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=1.0e-3)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3)
return [optimizer], [scheduler]
``` |
{
"source": "575-genetic-opt/MarbleOptimizer",
"score": 3
} |
#### File: 575-genetic-opt/MarbleOptimizer/marble_coaster.py
```python
import random
import math
import sys
import csv
import matplotlib.pyplot as plt
sys.setrecursionlimit(200)
gene_per_section = 2
maximum_length = 0
corresponding_cost = 0
num_div_x = 10 # NUMBER OF COLUMNS
num_div_y = 10 # NUMBER OF ROWS
num_div_z = 10 # NUMBER OF FLOORS
dz = .1 # HEIGHT OF PIECES (METERS)
dx = .1 # X LENGTH OF PIECES (METERS)
dy = .1 # Y LENGTH OF PIECES (METERS)
v_start = .1 # STARTING VELOCITY OF MARBLE (M/S)
mass = 0.00127 # MASS OF A MARBLE (KG)
lpl = .01 # percent of energy lost due to normal track use
g = 9.81 # GRAVITY (M/S^2)
parts = [{'cost': 1., 'length': dz, 'loss': lpl*dz, 'cool': 90, 'e1': 'top', 'e2': 'bottom'},
{'cost': 3., 'length': (dz/2 + dy/2)*.8, 'loss': lpl*(dz/2 + dy/2)*.8, 'cool': 50, 'e1': 'top', 'e2': 1},
{'cost': 1., 'length': dy, 'loss': lpl*dy, 'cool': 70, 'e1': 1, 'e2': 3},
{'cost': 3., 'length': (dy/2 + dx/2)*.8, 'loss': lpl*(dy/2 + dx/2)*.8, 'cool': 50, 'e1': 1, 'e2': 4},
{'cost': 3., 'length': (dy/2 + dz/2)*.8, 'loss': lpl*(dy/2 + dx/2)*.8, 'cool': 50, 'e1': 1, 'e2': 'bottom'}]
def calc_length(design):
# RECORD MAX POSSIBLE PATH
max_path = 0
# LIST OF GLOBAL PIECE NUMBERS IN BEST DESIGN
max_loc_list = []
max_part_list = []
max_rot_list = []
max_en_his = []
# LOOP OVER PIECES ON TOP
for i in range(0, num_div_x*num_div_y*gene_per_section, gene_per_section):
for in_dir in ['e1', 'e2']:
# SET STARTING ENERGIES
up = num_div_y * dy * mass * g # POTENTIAL ENERGY
uk = .5 * mass * math.pow(v_start, 2) # KINETIC ENERGY
# SET STARTING DESIGN VALUES
loc_his = []
part_his = []
rot_his = []
en_his = []
# GET LOCATION ID OF PIECE
piece_number = int(i / gene_per_section) + 1 + int(num_div_x*num_div_y*(num_div_z-1))
length = traverse_length(design, piece_number, loc_his, part_his, rot_his, en_his, in_dir, uk, up)
if length > max_path:
max_path = length
max_loc_list = loc_his
max_part_list = part_his
max_rot_list = rot_his
max_en_his = en_his
return max_path, max_loc_list, max_part_list, max_rot_list, max_en_his
def locate_piece(piece_number):
floor = int(math.ceil(float(piece_number)/float(num_div_x*num_div_y))) % num_div_z
if floor == 0:
floor = num_div_z
local_num = piece_number % (num_div_x * num_div_y) # THIS IS THE PIECE NUMBER LOCAL TO IT'S OWN FLOOR
row = int(math.ceil(float(local_num) / float(num_div_x))) % num_div_y
if row == 0:
row = num_div_y
col = piece_number % num_div_x
if col == 0:
col = num_div_x
return row, col, floor
def inlet_outlet(design, g_piece_id, in_direction):
# GET PIECE INFORMATION
piece_gene_index = (g_piece_id - 1) * gene_per_section
piece_rot = design[piece_gene_index + 1]
piece_num = design[piece_gene_index] - 1
piece_type = parts[piece_num]
if in_direction == 'e1':
# GET OUTLET FACE ID
outlet = piece_type['e2']
if type(piece_type['e2']) == int:
outlet = (piece_type['e2'] + piece_rot) % 4
if outlet == 0:
outlet = 4
# GET INLET FACE ID
inlet = piece_type['e1']
if type(piece_type['e1']) == int:
inlet = (piece_type['e1'] + piece_rot) % 4
if inlet == 0:
inlet = 4
else:
# GET OUTLET FACE ID
outlet = piece_type['e1']
if type(piece_type['e1']) == int:
outlet = (piece_type['e1'] + piece_rot) % 4
if outlet == 0:
outlet = 4
# GET INLET FACE ID
inlet = piece_type['e2']
if type(piece_type['e2']) == int:
inlet = (piece_type['e2'] + piece_rot) % 4
if inlet == 0:
inlet = 4
# GET ROW AND COLUMN ID OF PIECE
row, col, floor = locate_piece(g_piece_id)
location = (row, col, floor)
out_neighbor = 0
in_neighbor = 0
if outlet == 'bottom':
if floor > 1:
out_neighbor = g_piece_id - num_div_x * num_div_y
elif outlet == 'top':
if floor < num_div_z:
out_neighbor = g_piece_id + num_div_x * num_div_y
if inlet == 'top':
if floor < num_div_z:
in_neighbor = g_piece_id + num_div_x * num_div_y
elif inlet == 'bottom':
if floor > 1:
in_neighbor = g_piece_id - num_div_x * num_div_y
if row == 1: # ON BOTTOM FACE
if col == 1: # ON LEFT FACE
if outlet == 1: # INTERIOR FACE
out_neighbor = g_piece_id + num_div_x
elif outlet == 2:
out_neighbor = g_piece_id + 1 # INTERIOR FACE
if inlet == 1: # INTERIOR FACE
in_neighbor = g_piece_id + num_div_x
elif inlet == 2:
in_neighbor = g_piece_id + 1 # INTERIOR FACE
elif col == num_div_x: # ON RIGHT FACE
if outlet == 1: # INTERIOR FACE
out_neighbor = g_piece_id + num_div_x
elif outlet == 4: # INTERIOR FACE
out_neighbor = g_piece_id - 1
if inlet == 1: # INTERIOR FACE
in_neighbor = g_piece_id + num_div_x
elif inlet == 4: # INTERIOR FACE
in_neighbor = g_piece_id - 1
else: # MIDDLE COLUMN
if outlet == 1: # INTERIOR FACE
out_neighbor = g_piece_id + num_div_x
elif outlet == 2: # INTERIOR FACE
out_neighbor = g_piece_id + 1
elif outlet == 4: # INTERIOR FACE
out_neighbor = g_piece_id - 1
if inlet == 1: # INTERIOR FACE
in_neighbor = g_piece_id + num_div_x
elif inlet == 2: # INTERIOR FACE
in_neighbor = g_piece_id + 1
elif inlet == 4: # INTERIOR FACE
in_neighbor = g_piece_id - 1
elif row == num_div_y: # ON TOP FACE
if col == 1: # ON LEFT FACE
if outlet == 3: # INTERIOR FACE
out_neighbor = g_piece_id - num_div_x
elif outlet == 2: # INTERIOR FACE
out_neighbor = g_piece_id + 1
if inlet == 3: # INTERIOR FACE
in_neighbor = g_piece_id - num_div_x
elif inlet == 2: # INTERIOR FACE
in_neighbor = g_piece_id + 1
elif col == num_div_x: # ON RIGHT FACE
if outlet == 3: # FACING INTERIOR
out_neighbor = g_piece_id - num_div_x
elif outlet == 4: # INTERIOR FACE
out_neighbor = g_piece_id - 1
if inlet == 3: # FACING INTERIOR
in_neighbor = g_piece_id - num_div_x
elif inlet == 4: # INTERIOR FACE
in_neighbor = g_piece_id - 1
else: # MIDDLE COLUMN
if outlet == 3: # FACING INTERIOR
out_neighbor = g_piece_id - num_div_x
elif outlet == 4: # INTERIOR FACE
out_neighbor = g_piece_id - 1
elif outlet == 2: # INTERIOR FACE
out_neighbor = g_piece_id + 1
if inlet == 3: # FACING INTERIOR
in_neighbor = g_piece_id - num_div_x
elif inlet == 4: # INTERIOR FACE
in_neighbor = g_piece_id - 1
elif inlet == 2: # INTERIOR FACE
in_neighbor = g_piece_id + 1
else: # IN MIDDLE ROW
if col == 1: # ON LEFT FACE
if outlet == 1: # FACING INTERIOR
out_neighbor = g_piece_id + num_div_x
elif outlet == 3: # INTERIOR FACE
out_neighbor = g_piece_id - num_div_x
elif outlet == 2:
out_neighbor = g_piece_id + 1
if inlet == 1: # FACING INTERIOR
in_neighbor = g_piece_id + num_div_x
elif inlet == 3: # INTERIOR FACE
in_neighbor = g_piece_id - num_div_x
elif inlet == 2:
in_neighbor = g_piece_id + 1
elif col == num_div_x: # ON RIGHT FACE
if outlet == 1: # FACING INTERIOR
out_neighbor = g_piece_id + num_div_x # FACING INTERIOR
elif outlet == 3:
out_neighbor = g_piece_id - num_div_x # FACING INTERIOR
elif outlet == 4: # FACING INTERIOR
out_neighbor = g_piece_id - 1
if inlet == 1: # FACING INTERIOR
in_neighbor = g_piece_id + num_div_x # FACING INTERIOR
elif inlet == 3:
in_neighbor = g_piece_id - num_div_x # FACING INTERIOR
elif inlet == 4: # FACING INTERIOR
in_neighbor = g_piece_id - 1
else: # INTERIOR PIECE
if outlet == 1: # FACING EXTERIOR
out_neighbor = g_piece_id + num_div_x
elif outlet == 3: # FACING INTERIOR
out_neighbor = g_piece_id - num_div_x
elif outlet == 2: # FACING INTERIOR
out_neighbor = g_piece_id + 1
elif outlet == 4: # FACING INTERIOR
out_neighbor = g_piece_id - 1
if inlet == 1: # FACING EXTERIOR
in_neighbor = g_piece_id + num_div_x
elif inlet == 3: # FACING INTERIOR
in_neighbor = g_piece_id - num_div_x
elif inlet == 2: # FACING INTERIOR
in_neighbor = g_piece_id + 1
elif inlet == 4: # FACING INTERIOR
in_neighbor = g_piece_id - 1
# CHECK IF NEIGHBORS HAVE ALIGNING FACES
if in_neighbor > 0:
in_gene_index = (in_neighbor - 1) * gene_per_section
in_piece_rot = design[in_gene_index + 1]
in_piece_num = design[in_gene_index] - 1
in_piece_type = parts[in_piece_num]
in_n_e1 = in_piece_type['e1']
if type(in_piece_type['e1']) == int:
in_n_e1 = (in_piece_type['e1'] + in_piece_rot) % 4
if in_n_e1 == 0:
in_n_e1 = 4
in_n_e2 = in_piece_type['e2']
if type(in_piece_type['e2']) == int:
in_n_e2 = (in_piece_type['e2'] + in_piece_rot) % 4
if in_n_e2 == 0:
in_n_e2 = 4
in_neighbor_1 = in_neighbor
in_neighbor_2 = in_neighbor
if inlet == 'top':
if in_n_e1 != 'bottom':
in_neighbor_1 = 0
if in_n_e2 != 'bottom':
in_neighbor_2 = 0
elif inlet == 'bottom':
if in_n_e1 != 'top':
in_neighbor_1 = 0
if in_n_e2 != 'top':
in_neighbor_2 = 0
else:
if type(in_n_e1) is int:
if math.fabs(inlet - in_n_e1) != 2:
in_neighbor_1 = 0
else:
in_neighbor_1 = -1
if type(in_n_e2) is int:
if math.fabs(inlet - in_n_e2) != 2:
in_neighbor_2 = 0
else:
in_neighbor_2 = 0
if in_neighbor_2 == 0:
in_neighbor = in_neighbor_1
in_direction = 'e1'
if out_neighbor:
out_gene_index = (out_neighbor - 1) * gene_per_section
out_piece_rot = design[out_gene_index + 1]
out_piece_num = design[out_gene_index] - 1
out_piece_type = parts[out_piece_num]
out_n_e1 = out_piece_type['e1']
if type(out_piece_type['e1']) == int:
out_n_e1 = (out_piece_type['e1'] + out_piece_rot) % 4
if out_n_e1 == 0:
out_n_e1 = 4
out_n_e2 = out_piece_type['e2']
if type(out_piece_type['e2']) == int:
out_n_e2 = (out_piece_type['e2'] + out_piece_rot) % 4
if out_n_e2 == 0:
out_n_e2 = 4
out_neighbor_1 = out_neighbor
out_neighbor_2 = out_neighbor
if outlet == 'bottom':
if out_n_e1 != 'top':
out_neighbor_1 = 0
if out_n_e2 != 'top':
out_neighbor_2 = 0
elif outlet == 'top':
if out_n_e1 != 'bottom':
out_neighbor_1 = 0
if out_n_e2 != 'bottom':
out_neighbor_2 = 0
else:
if type(out_n_e1) is int:
if math.fabs(outlet - out_n_e1) != 2:
out_neighbor_1 = 0
else:
out_neighbor_1 = 0
if type(out_n_e2) is int:
if math.fabs(outlet - out_n_e2) != 2:
out_neighbor_2 = 0
else:
out_neighbor_2 = 0
if out_neighbor_2 > 0:
in_direction = 'e2'
else:
out_neighbor = out_neighbor_1
return in_neighbor, out_neighbor, location, in_direction
def traverse_length(design, g_piece_id, path_his, part_his, rot_his, en_his, in_dir, uk, up):
piece_gene_index = (g_piece_id - 1) * gene_per_section
piece_rot = design[piece_gene_index + 1]
piece_num = design[piece_gene_index] - 1
piece_type = parts[piece_num]
length = piece_type['length']
friction_loss = piece_type['loss']*uk
in_neighbor, out_neighbor, location, in_dir = inlet_outlet(design, g_piece_id, in_dir)
# Subtract friction loss from kinetic energy
uk -= friction_loss
# Subtract potential energy losses
if len(path_his) > 0:
uk -= (location[2] - path_his[-1][2]) * dz * g * mass
up += (location[2] - path_his[-1][2]) * dz * g * mass
if out_neighbor > 0 and location not in path_his and uk > 0:
path_his.append(location)
part_his.append(piece_num)
rot_his.append(piece_rot)
en_his.append(uk)
length += traverse_length(design, out_neighbor, path_his, part_his, rot_his, en_his, in_dir, uk, up)
else:
path_his.append(location)
part_his.append(piece_num)
rot_his.append(piece_rot)
en_his.append(uk)
return length
def calc_cost(design):
cost_sum = 0
for part_num in design:
cost_sum += parts[part_num]['cost']
return cost_sum
def solve_track(design):
max_path, max_loc_list, max_part_list, max_rot_list, en_his = calc_length(design)
cost = calc_cost(max_part_list)
return max_path, cost, max_part_list, max_loc_list, max_rot_list, en_his
def good_design():
design = [1]*(num_div_x*num_div_y*num_div_z*gene_per_section)
p_gene = num_div_x*num_div_y*(num_div_z-1)*gene_per_section
design[p_gene] = 1
design[p_gene + 1] = 1
p_gene -= num_div_x*num_div_y*gene_per_section
design[p_gene] = 2
design[p_gene + 1] = 1
p_gene += 1*gene_per_section
design[p_gene] = 3
design[p_gene + 1] = 1
p_gene += 1*gene_per_section
design[p_gene] = 3
design[p_gene + 1] = 3
p_gene += 1*gene_per_section
design[p_gene] = 3
design[p_gene + 1] = 1
p_gene += 1*gene_per_section
design[p_gene] = 4
design[p_gene + 1] = 4
p_gene += num_div_x*gene_per_section
design[p_gene] = 3
design[p_gene + 1] = 4
p_gene += num_div_x*gene_per_section
design[p_gene] = 3
design[p_gene + 1] = 2
p_gene += num_div_x*gene_per_section
design[p_gene] = 5
design[p_gene + 1] = 2
p_gene -= num_div_x*num_div_y*gene_per_section
design[p_gene] = 1
design[p_gene + 1] = 2
p_gene -= num_div_x * num_div_y*gene_per_section
design[p_gene] = 1
design[p_gene + 1] = 3
p_gene -= num_div_x * num_div_y*gene_per_section
design[p_gene] = 1
design[p_gene + 1] = 4
p_gene -= num_div_x * num_div_y*gene_per_section
design[p_gene] = 2
design[p_gene + 1] = 3
p_gene -= 1*gene_per_section
design[p_gene] = 2
design[p_gene + 1] = 1
p_gene += num_div_x*num_div_y*gene_per_section
design[p_gene] = 5
design[p_gene + 1] = 3
p_gene -= 1*gene_per_section
design[p_gene] = 5
design[p_gene + 1] = 1
p_gene -= num_div_x * num_div_y*gene_per_section
design[p_gene] = 1
design[p_gene + 1] = 1
return design
if __name__ == '__main__':
gen_design = good_design()
t_length, t_cost, p_list, p_loc, r_list, e_list = solve_track(gen_design)
speeds = []
for e in e_list:
speeds.append(math.sqrt(e/(.5*mass)))
print(t_length)
print(t_cost)
print(p_list)
print(p_loc)
print(r_list)
print(len(p_list))
print(e_list)
print(speeds)
with open('good_design.csv','w') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(p_list)
location_list = []
for location in p_loc:
for item in location:
location_list.append(item)
spamwriter.writerow(location_list)
spamwriter.writerow(r_list)
```
#### File: 575-genetic-opt/MarbleOptimizer/MultiObjectiveOptimizer.py
```python
import numpy as np
import copy
class MultiObjectiveOptimizer:
def __init__(self, x, fitness, n_generations=5, population_size=5, n_objectives=2, constraint=None,
generation_func=None, constraint_func_input="design", crossover_type="uniform",
mutation_type="unifom", use_genocide=False):
"""Initialize the optimizer
x: list that contains a series of dictionaries that define the x values for the optimizer where
type is either continuous or integer. and min and max are the minimum and maximum values of the variable this
should follow the form [{'type': ('integer' or 'continuous'), 'bounds': (min, max)]] with a new dict for each
x value
fitness: function that accepts x array in the same order as given in the x input, and returns the fitness of
each design.
n_generations: the number of generations to carry out
population_size: the number of members in the population at each generation
n_objectives: the number of objectives that are returned by the fitness function
constraint: function that accepts x array in the same order as given in x and returns a vector that contains
the constraint violation state of each constraint where >0 suggests that the constraint is violated and <=0
suggests that the constraint is satisfied.
generation_func: function that is called with each passing generation. calls the generation function with
the current population of the algorithm. This is useful for things like plotting the generation at each
iteration of the algorithm, but could be used for other fun things
constraint_func_input: string, either "design" or "full" if design the constraint function is called with just
the design, if full the constraint function is called with a 1d array where the 0th value is the maximin fitness
of the design, the next n values are the n objectives returned by the fitness function, and the last n values
are the n values that define the design. It is useful to define full if the fitness and constraints are based
on the same values and they are somewhat expensive to obtain.
crossover_type: string, either "transitional" or "uniform" if transitional a transitional crossover is performed
if uniform a uniform crossover, meaning that the values are simply swapped is performed
mutation_type: string, either "uniform" or "dynamic" this defines the type of mutation that will occur.
dynamic mutation changes more at the beginning of the optimization and less toward the end. Uniform mutation
randomly selects a value in the bounds.
"""
self.num_generations = n_generations
self.num_population = np.trunc(population_size)
if self.num_population % 2 != 0:
self.num_population += 1
self.num_population = int(self.num_population)
self.num_x = len(x)
self.fitness_func = fitness
self.constraint_func = constraint
self.x_def = x
self.num_objectives = n_objectives
self.generation_call = generation_func
self.constraint_func_input = constraint_func_input.lower()
self.crossover_type = crossover_type.lower()
self.mutation_type = mutation_type.lower()
self.use_genocide = use_genocide
self.tournament_size = 2
self.crossover_prob = 0.5
self.mutation_prob = 0.13
self.cross_eta = 0.5
self.mutation_beta = 0.13
self.no_diversity_counter = 0
self.population = self.generate_population()
return
def generate_population(self):
# initialize population array
# stored in the form ([fitness, objective values, x1, x2, x3, ..., xn])
population = np.zeros((self.num_population, self.num_x + self.num_objectives + 1))
for i in range(self.num_population):
x_new = np.zeros(self.num_x)
for j, val in enumerate(self.x_def):
if val['type'] == 'integer':
x_new[j] = np.random.randint(val['bounds'][0], val['bounds'][1] + 1)
elif val['type'] == 'continuous':
x_new[j] = np.random.uniform(val['bounds'][0], val['bounds'][1])
# else:
# print("error unknown variable type")
population[i, 1:self.num_objectives + 1] = self.fitness_func(x_new)
population[i, self.num_objectives + 1:] = x_new
population = sort_array_by_col(population, 0)
population = self.calc_fitness(population)
population = self.apply_constraints(population)
if self.generation_call is not None:
self.generation_call(population)
return population
def select_parents(self):
"""select the parents from the current population of the optimization"""
# randomize the order of the population
np.random.shuffle(self.population)
# preallocate the array to hold the parents
parents = np.zeros_like(self.population)
# self.population = self.calc_maximin(self.population)
# select random people from the population for tournament selection
for row in range(parents.shape[0]):
rand_indicies = np.random.randint(parents.shape[0], size=self.tournament_size)
competitors = self.population[rand_indicies]
sorted_competitors = sort_array_by_col(competitors, 0)
parents[row, :] = sorted_competitors[0]
return parents
def calc_fitness(self, population):
"""calculates the maximin values for each point of the supplied population. Uses a bunch of information stored
in the class, so probably not a good idea to pass in random populations, unless you know what you're doing."""
if self.num_objectives > 1:
for idx in range(population.shape[0]):
# get function values
fVals = copy.deepcopy(population[:, 1:self.num_objectives+1])
for col_idx in range(self.num_objectives):
test_val = fVals[idx, col_idx]
fVals[:, col_idx] = -(fVals[:, col_idx] - test_val)
fVals = np.delete(fVals, idx, 0)
population[idx, 0] = np.nanmax(np.nanmin(fVals, 1))
else:
population[:, 0] = population[:, 1]
return population
def check_diversity(self, population):
"""
Checks that a population is diverse. If it is not the best members of the population are kept and the rest
of the population is randomly regenerated.
:param population: the population whose diversity is to be checked.
:return: the new population
"""
fitness_vals = population[:, 0]
num_unique = len(np.unique(fitness_vals))
if num_unique == 1:
self.no_diversity_counter += 1
if self.no_diversity_counter > 20:
self.no_diversity_counter = 0
num_to_save = int(self.num_population * 0.2)
# sort the designs
population = sort_array_by_col(population, 0)
# save the best 10% of designs
best_designs = copy.deepcopy(population[0:num_to_save, :])
# regenerate the population
new_population = self.generate_population()
# replace the best designs
new_population[0:num_to_save, :] = best_designs
np.random.shuffle(new_population)
return new_population
else:
self.no_diversity_counter = 0
return population
def apply_constraints(self, population):
""" applies appropriate penalties for designs that are outside of the permissible bounds. Requires that a
constraint function be defined that returns the constraints in a row vector"""
if self.constraint_func is None:
return population
max_fitness = np.nanmax(population[:, 0])
for row in population:
design = row[self.num_objectives+1:]
if self.constraint_func_input == "design":
cons = self.constraint_func(design)
elif self.constraint_func_input == "full":
cons = self.constraint_func(row)
# else:
# print("unrecognized constraint input term check constraint_func_input argument at initialization")
# quit()
if np.max(cons) > 0:
row[0] = max_fitness + np.max(cons)
return population
def find_min(self):
"""
Runs the optimizer.
:return: the population at the end of the optimization routine.
"""
generations = []
for generation in range(self.num_generations):
# select reproducing parents
parents = self.select_parents()
children = np.zeros_like(parents)
# for each set of parents
for idx in range(0, parents.shape[0], 2):
child1 = copy.deepcopy(parents[idx, self.num_objectives+1:])
child2 = copy.deepcopy(parents[idx+1, self.num_objectives+1:])
for x_idx in range(len(child1)):
crossover = np.random.random()
mutate1 = np.random.random()
mutate2 = np.random.random()
if crossover < self.crossover_prob:
# perform the crossover
if self.crossover_type == "transitional":
self.crossover_transitional(child1, child2, x_idx)
else:
self.crossover_uniform(child1, child2, x_idx)
if mutate1 < self.mutation_prob:
if self.mutation_type == "dynamic":
child1 = self.mutate_dynamic(child1, x_idx, self.x_def[x_idx]['bounds'],
self.x_def[x_idx]['type'], generation)
else:
child1 = self.mutate_uniform(child1, x_idx, self.x_def[x_idx]['bounds'],
self.x_def[x_idx]['type'])
if mutate2 < self.mutation_prob:
if self.mutation_type == "dynamic":
child2 = self.mutate_dynamic(child2, x_idx, self.x_def[x_idx]['bounds'],
self.x_def[x_idx]['type'], generation)
else:
child1 = self.mutate_uniform(child1, x_idx, self.x_def[x_idx]['bounds'],
self.x_def[x_idx]['type'])
# put the children into the children array
child1_fitness = self.fitness_func(child1)
child2_fitness = self.fitness_func(child2)
children[idx, 1:self.num_objectives+1] = child1_fitness
children[idx, self.num_objectives+1:] = child1
children[idx + 1, 1:self.num_objectives+1] = child2_fitness
children[idx + 1, self.num_objectives+1:] = child2
# perform elitism
population_pool = np.append(parents, children, axis=0)
population_pool = self.calc_fitness(population_pool)
population_pool = self.apply_constraints(population_pool)
sorted_pool = sort_array_by_col(population_pool, 0)
self.population = sorted_pool[0:self.num_population]
# generations.append(copy.deepcopy(self.population))
# print(generation)
if self.use_genocide:
self.population = self.check_diversity(self.population)
if self.generation_call is not None:
self.generation_call(self.population)
return self.population
def crossover_transitional(self, child1, child2, x_idx):
"""
Performs a transitional crossover from uniform to blend between the two children at the specifiec index.
The children must be numpy arrays or some other object that is mutable so that the changes persist
:param child1: Child 1 to be crossed a numpy array of the values
:param child2: Child 2 to be crossed a numpy array of the values
:param x_idx: Index location for the crossover.
:return: none
"""
r = np.random.random()
if r <= 0.5:
a = ((2 * r) ** (1 / self.cross_eta)) / 2
else:
a = 1 - ((2 - 2 * r) ** (1 / self.cross_eta)) / 2
child1_val = child1[x_idx]
child2_val = child2[x_idx]
y1 = a * child1_val + (1 - a) * child2_val
y2 = (1 - a) * child1_val + a * child2_val
child1[x_idx] = y1
child2[x_idx] = y2
# truncate the values if needed
if self.x_def[x_idx]['type'] == 'integer':
child1[x_idx] = int(np.round(child1[x_idx]))
child2[x_idx] = int(np.round(child2[x_idx]))
def crossover_uniform(self, child1, child2, x_idx):
"""
Performs a uniform crossover between the children at the specified x index
:param child1: design 1 to be crossed
:param child2: design 2 to be crossed
:param x_idx: Index location for the crossover
:return: none
"""
r = np.random.random()
if r <= 0.5:
temp = child1[x_idx]
child1[x_idx] = child2[x_idx]
child2[x_idx] = temp
def mutate_dynamic(self, child, idx, bounds, type, generation):
"""
Perform a dynamic mutation on the child at the specified location
meaning that the mutation amount decreases as the generation number increases
:param child: array of values that represent the child to be mutated
:param idx: the index where the mutation should occur
:param bounds: tuple of the bounds of the value that is being mutated
:param type: Type of the variable
:param generation: generation number for the mutation.
:return: the mutated child
"""
min = bounds[0]
max = bounds[1]
r = np.random.uniform(min, max)
alpha = (1 - (generation) / self.num_generations) ** self.mutation_beta
if r <= child[idx]:
child[idx] = min + (r - min) ** alpha * (child[idx] - min) ** (1 - alpha)
else:
child[idx] = max - (max - r) ** alpha * (max - child[idx]) ** (1 - alpha)
if type == 'integer':
child[idx] = np.round(child[idx])
return child
def mutate_uniform(self, child, idx, bounds, type):
"""
Perform a mutation of the child at the specified index. The mutation is uniform meaning that it will
be randomly assigned in the bounds of the value.
:param child: the design that is to be mutated
:param idx: the index of the value where the mutation should occur
:param bounds: tuple of the bounds on the variable that we are mutating
:param type: the type of the variable
:return: the mutated child
"""
min = bounds[0]
max = bounds[1]
if type == 'integer':
child[idx] = np.random.randint(min, max+1)
else:
child[idx] = np.random.uniform(min, max+0.000001)
return child
def sort_array_by_col(array, sort_col=0):
"""take an array and sort it by the specified column"""
new_array = array[np.argsort(array[:, sort_col])]
return new_array
``` |
{
"source": "577145213/bolg",
"score": 2
} |
#### File: blog/home/views.py
```python
from django.core.paginator import EmptyPage
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views import View
from home.models import ArticleCategory, Article
from django.http.response import HttpResponseNotFound
# Create your views here.
# 首页分类栏目# 首页分类栏目# 首页分类栏目# 首页分类栏目# 首页分类栏目# 首页分类栏目
class IndexView(View):
def get(self,request):
# 获取所有分类信息
categories = ArticleCategory.objects.all()
# 接收用户点击的分类id
cat_id = request.GET.get('cat_id', 1)
# 根据分类id进行分类查询
try:
category = ArticleCategory.objects.get(id=cat_id)
except ArticleCategory.DoesNotExist:
return HttpResponseNotFound('没有此分类')
# 获取分页参数
page_num=request.GET.get('page_num', 1)
page_siza=request.GET.get('page_size', 10)
# 根据分类信息 查询文章数据
article=Article.objects.filter(category=category)
# 创建分页器
from django.core.paginator import Paginator
paginator=Paginator(article,per_page=page_siza)
# 进行分页处理
try:
page_articles=paginator.page(page_num)
except EmptyPage:
return HttpResponseNotFound('empty page')
# 总页数
total_page = paginator.num_pages
# 组织数据传递给模板
context = {
'categories': categories,
'category': category,
'articles':page_articles,
'page_size':page_siza,
'total_page':total_page,
'page_num':page_num,
}
return render(request, 'index.html', context=context)
from home.models import Comment
# 第八点一节 文章详情
class DetailView(View):
def get(self,request):
# 接收文章id信息
id=request.GET.get('id')
# 根据文章id进行文章数据的查询
try:
article = Article.objects.get(id=id)
except Article.DoesNotExist:
return render(request,'404.html')
else:
# 让浏览量加1
article.total_view+=1
article.save()
# 查询分类数据
categories = ArticleCategory.objects.all()
# 查询流量量前十文章
hot_articles=Article.objects.order_by('-total_view')[:9]
page_size=request.GET.get('page_size',10)
page_num=request.GET.get('page_num',1)
comments=Comment.objects.filter(article=article).order_by('-created')
total_count=comments.count()
from django.core.paginator import Paginator,EmptyPage
paginator=Paginator(comments,page_size)
try:
page_comments=paginator.page(page_num)
except EmptyPage:
return HttpResponseNotFound('empty page')
total_page=paginator.num_pages
# 组织模板数据
context ={
'categories': categories,
'category': article.category,
'article': article,
'hot_articles':hot_articles,
'total_count':total_count,
'comments':page_comments,
'page_size':page_size,
'total_page':total_page,
'page_num':page_num
}
return render(request, 'detail.html',context=context)
def post(self,request):
# 先接收用户信息
user = request.user
# 判断用户是否登陆
if user and user.is_authenticated:
# 登录用户可以接受form数据
# 接收评论数据
id=request.POST.get('id')
content=request.POST.get('content')
try: # 验证文章是否存在
article=Article.objects.get(id=id)
except Article.DoesNotExist:
return HttpResponseNotFound('没有此文章')
# 保存评论数据
Comment.objects.create(
content=content,
article=article,
user=user
)
article.comments_count+=1 # 修改评论的数量
article.save()
path=reverse('home:detail')+'?id={}'.format(article.id)
return redirect(path)
else:
return render(reverse('users:login'))
``` |
{
"source": "5783354/awokado",
"score": 2
} |
#### File: awokado/awokado/auth.py
```python
from typing import Dict
from sqlalchemy.sql import Selectable
from awokado.exceptions import (
CreateResourceForbidden,
ReadResourceForbidden,
UpdateResourceForbidden,
DeleteResourceForbidden,
)
class BaseAuth:
"""
CREATE = {
'ROLE NAME': Boolean value
}
Example:
'ADMIN': True,
'GUEST': False
READ,
UPDATE,
DELETE
"""
CREATE: Dict[str, bool] = {
# 'ROLE NAME': Boolean value,
# Example:
# 'ADMIN': True,
# 'GUEST': False,
}
READ: Dict[str, bool] = {}
UPDATE: Dict[str, bool] = {}
DELETE: Dict[str, bool] = {}
@classmethod
def can_create(cls, session, payload, user_id: int, skip_exc=False):
if skip_exc:
return False
raise CreateResourceForbidden()
@classmethod
def can_read(cls, ctx, query: Selectable, skip_exc=False):
if skip_exc:
return False
raise ReadResourceForbidden()
@classmethod
def can_update(cls, session, user_id: int, obj_ids: list, skip_exc=False):
if skip_exc:
return False
raise UpdateResourceForbidden()
@classmethod
def can_delete(cls, session, user_id: int, obj_ids: list, skip_exc=False):
if skip_exc:
return False
raise DeleteResourceForbidden()
@classmethod
def _get_read_query(cls, ctx, query: Selectable):
return query
```
#### File: awokado/documentation/generate.py
```python
from typing import Dict, List, Union, FrozenSet, Type, Sequence
from awokado.consts import BULK_UPDATE, BULK_CREATE
from awokado.documentation.routes import Route
from awokado.documentation.utils import parse_doc_string
from awokado.resource import BaseResource
from awokado.utils import log
INNER_METHOD_MAP = {
"patch": "update",
"post": "create",
"get": "on_get", # not taking here
"delete": "delete",
}
class APIDocs:
def __init__(
self,
routes: list,
models: dict,
servers: Union[str, Sequence[str]] = "/",
public_resources: FrozenSet[Type[BaseResource]] = frozenset(),
):
self.doc = models
self.routes = routes
self.public_resources = public_resources
self.servers = [servers] if isinstance(servers, str) else servers
def collect_path_info(self, r: Route):
resource_ext = r.uri.split("/")[2]
resource_name = resource_ext.replace("-", "_")
route_method = r.method_name.lower()
is_patch = route_method == "patch"
is_post = route_method == "post"
is_bulk = False
route_w_id = "{resource_id}" in r.uri
if is_patch:
is_bulk = BULK_UPDATE in r.resource.Meta.methods and not route_w_id
if not route_w_id and is_patch and not is_bulk:
log.info(
f"skip bulk patch with resource id "
f"{r.uri}:{resource_name}:{route_method}"
)
return None, None
if route_w_id and is_post:
log.info(
f"skip post with resource id "
f"{r.uri}:{resource_name}:{route_method}"
)
return None, None
path_info = self.get_common_info(r, resource_name)
path_info[r.method_name]["parameters"] = self.get_parameters(
route_method, route_w_id
)
if is_patch or is_post:
rb = {
"required": True,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/" + resource_name
}
}
},
}
path_info[r.method_name]["requestBody"] = rb
if type(r.resource) not in self.public_resources:
security: List[dict] = []
path_info[r.method_name]["security"] = security
security.append({"bearerAuth": []})
return r.uri, path_info
@staticmethod
def get_common_info(r: Route, name: str):
inner_method = INNER_METHOD_MAP[r.method_name.lower()]
method_doc_str = getattr(r.resource, inner_method).__doc__
if not method_doc_str:
method_doc_str = r.method.__doc__
summary, description = parse_doc_string(method_doc_str)
if r.method_name == "post" and BULK_CREATE in r.resource.Meta.methods:
summary += ". Supports bulk create"
if description:
description = description.replace("---", "")
path_info = {
r.method_name: {
"tags": [r.uri.replace("{resource_id}", "")],
"summary": summary,
"description": description,
"responses": {
200: {
"description": "OK",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/" + name
}
}
},
}
},
}
}
return path_info
@staticmethod
def get_parameters(route_method, route_with_id):
parameters = []
if route_with_id:
parameters.append(
{
"name": "resource_id",
"in": "path",
"description": "ID of resource",
"required": True,
"schema": {"type": "integer", "format": "int64"},
}
)
else:
if route_method == "get":
params_description = (
("limit", "The numbers of items to return", "integer"),
(
"offset",
"The number of items to skip before "
"starting to collect the result set",
"integer",
),
(
"sort",
"Sorting fields. Use '-' before field name "
"to use DESC sorting example: full_name,-birth_date",
"string",
),
)
for name, description, q_type in params_description:
parameters.append(
{
"name": f"{name}",
"in": "query",
"description": f"{description}",
"schema": {"type": f"{q_type}"},
}
)
return parameters
def set_components(self):
security_schemes = {
"bearerAuth": {
"type": "http",
"scheme": "bearer",
"bearerFormat": "JWT",
}
}
if "components" in self.doc:
self.doc["components"]["securitySchemes"] = security_schemes
else:
self.doc["components"] = {"securitySchemes": security_schemes}
def run(self):
for r in self.routes:
route_key, route_result = self.collect_path_info(r)
if not route_result:
continue
if route_key not in self.doc["paths"]:
self.doc["paths"][route_key] = route_result
else:
self.doc["paths"][route_key].update(route_result)
self.doc["servers"] = [{"url": server} for server in self.servers]
self.set_components()
return self.doc
```
#### File: awokado/documentation/routes.py
```python
import functools
from dataclasses import dataclass
from typing import List, Callable
from falcon import API
from falcon.routing.compiled import CompiledRouterNode
if False:
from awokado.resource import BaseResource
METHOD_MAPPING = {
"POST": {"create", "bulk_create"},
"GET": {"read"},
"PATCH": {"update", "bulk_update"},
"DELETE": {"delete"},
}
@dataclass
class Route:
uri: str
var_name: str
method_name: str
method: Callable
resource: "BaseResource"
@dataclass
class RawRoute:
uri: str
var_name: str
method_map: dict
resource: "BaseResource"
def __init__(self, route: CompiledRouterNode):
self.uri = route.uri_template
self.var_name = route.var_name
self.method_map = route.method_map
self.resource = route.resource
def get_routes(raw_routes: List[RawRoute]) -> List[Route]:
routes = []
for raw_route in raw_routes:
if not raw_route.method_map:
continue
for method_name, method in raw_route.method_map.items():
if raw_route.resource.Meta.skip_doc:
continue
valid_methods = METHOD_MAPPING.get(method_name, ())
if not any(
m in raw_route.resource.Meta.methods for m in valid_methods
):
continue
if not isinstance(method, functools.partial):
routes.append(
Route(
uri=raw_route.uri,
var_name=raw_route.var_name,
method_name=method_name.lower(),
method=method,
resource=raw_route.resource,
)
)
return routes
def collect_routes(falcon_app: API) -> List[Route]:
raw_routes = []
for root_r in falcon_app._router._roots:
for children_r in root_r.children:
raw_routes.append(RawRoute(children_r))
for a in children_r.children:
for a2 in a.children or ():
raw_routes.append(RawRoute(a2))
raw_routes.append(RawRoute(a))
return get_routes(raw_routes)
```
#### File: awokado/awokado/meta.py
```python
from dataclasses import dataclass, field
from typing import Union, Tuple, Any, Type, Optional
from sqlalchemy.sql import Join
from awokado.auth import BaseAuth
@dataclass
class ResourceMeta:
"""
:param name: Resource name. Used for two resources connection by relation
:param model: represents sqlalchemy model or cte
:param methods: tuple of methods you want to allow
:param auth: awokado `BaseAuth <#awokado.auth.BaseAuth>`_ class for embedding authentication logic
:param skip_doc: set true if you don't need to add the resource to documentation
:param disable_total: set false, if you don't need to know returning objects amount in read-requests
:param id_field: you can specify your own primary key if it's different from the 'id' field. Used in reading requests (GET)
:param select_from: provide data source here if your resource use another's model fields (for example sa.outerjoin(FirstModel, SecondModel, FirstModel.id == SecondModel.first_model_id))
"""
name: str = "base_resource"
methods: Tuple[str, ...] = ()
model: Any = None # type: ignore
auth: Optional[Type[BaseAuth]] = None
skip_doc: bool = False
disable_total: bool = False
id_field: str = "id"
select_from: Optional[Join] = None
def __post_init__(self):
if not self.methods and self.name not in ("base_resource", "_resource"):
raise Exception(
f"ResourceMeta[{self.name}] object must have methods"
)
@classmethod
def from_class(cls, t: Type):
return cls(
**{k: v for k, v in t.__dict__.items() if not k.startswith("_")}
)
```
#### File: awokado/awokado/resource.py
```python
import json
import sys
from typing import Dict, List, Optional, Tuple, Union, Type
import bulky
import falcon
import sqlalchemy as sa
from cached_property import cached_property
from clavis import Transaction
from marshmallow import utils, Schema, ValidationError
from sqlalchemy.orm import Session
from awokado.consts import (
AUDIT_DEBUG,
BULK_CREATE,
BULK_UPDATE,
CREATE,
DELETE,
OP_IN,
UPDATE,
)
from awokado.custom_fields import ToMany, ToOne
from awokado.db import DATABASE_URL, persistent_engine
from awokado.exceptions import BadRequest, MethodNotAllowed
from awokado.filter_parser import FilterItem
from awokado.meta import ResourceMeta
from awokado.request import ReadContext
from awokado.response import Response
from awokado.utils import (
get_ids_from_payload,
get_read_params,
get_id_field,
M2MMapping,
AuthBundle,
)
class BaseResource(Schema):
RESOURCES: Dict[str, Type["BaseResource"]] = {}
Response = Response
Meta: ResourceMeta
def __new__(cls: Type["BaseResource"]):
if cls.Meta.name not in ("base_resource", "_resource"):
cls.RESOURCES[cls.Meta.name] = cls
return super().__new__(cls)
def __init__(self):
super().__init__()
cls_name = self.__class__.__name__
class_meta = getattr(self, "Meta", None)
if isinstance(class_meta, type):
print(
"resourse.Meta as class will be deprecated soon",
file=sys.stderr,
)
self.Meta = ResourceMeta.from_class(class_meta)
if not isinstance(self.Meta, ResourceMeta):
raise Exception(
f"{cls_name}.Meta must inherit from ResourceMeta class"
)
if not self.Meta.name or self.Meta.name in (
"base_resource",
"_resource",
):
raise Exception(f"{cls_name} must have Meta.name")
resource_id_name = get_id_field(self, name_only=True, skip_exc=True)
if resource_id_name:
resource_id_field = self.fields.get(resource_id_name)
resource_id_field = resource_id_field.metadata.get("model_field")
if not resource_id_field:
raise Exception(
f"Resource's {cls_name} id field {resource_id_name}"
f" must have model_field."
)
###########################################################################
# Marshmallow validation methods
###########################################################################
def validate_create_request(self, req: falcon.Request, is_bulk=False):
methods = self.Meta.methods
payload = json.load(req.bounded_stream)
if isinstance(payload.get(self.Meta.name), list):
request_method = BULK_CREATE
is_bulk = True
else:
request_method = CREATE
if request_method not in methods:
raise MethodNotAllowed()
data = payload.get(self.Meta.name)
if not data:
raise BadRequest(
f"Invalid schema, resource name is missing at the top level. "
f"Your POST request has to look like: "
f'{{"{self.Meta.name}": [{{"field_name": "field_value"}}] '
f'or {{"field_name": "field_value"}} }}'
)
try:
deserialized = self.load(data, many=is_bulk)
except ValidationError as exc:
raise BadRequest(exc.messages)
req.stream = {self.Meta.name: deserialized}
def validate_update_request(self, req: falcon.Request):
methods = self.Meta.methods
if UPDATE not in methods and BULK_UPDATE not in methods:
raise MethodNotAllowed()
payload = json.load(req.bounded_stream)
data = payload.get(self.Meta.name)
try:
deserialized = self.load(data, partial=True, many=True)
except ValidationError as exc:
raise BadRequest(exc.messages)
req.stream = {self.Meta.name: deserialized}
###########################################################################
# Falcon methods
###########################################################################
def on_patch(
self, req: falcon.Request, resp: falcon.Response, *args, **kwargs
):
"""
Falcon method. PATCH-request entry point.
Here is a database transaction opening.
This is where authentication takes place
(if auth class is pointed in `resource <#awokado.meta.ResourceMeta>`_)
Then update method is run.
"""
with Transaction(DATABASE_URL, engine=persistent_engine) as t:
session = t.session
user_id, _ = self.auth(session, req, resp)
self.validate_update_request(req)
payload = req.stream
data = payload[self.Meta.name]
ids = get_ids_from_payload(self.Meta.model, data)
if self.Meta.auth:
self.Meta.auth.can_update(session, user_id, ids)
self.audit_log(
f"Update: {self.Meta.name}", payload, user_id, AUDIT_DEBUG
)
result = self.update(session, payload, user_id)
resp.body = json.dumps(result, default=str)
def on_post(self, req: falcon.Request, resp: falcon.Response):
"""
Falcon method. POST-request entry point.
Here is a database transaction opening.
This is where authentication takes place
(if auth class is pointed in `resource <#awokado.meta.ResourceMeta>`_)
Then create method is run.
"""
with Transaction(DATABASE_URL, engine=persistent_engine) as t:
session = t.session
user_id, token = self.auth(session, req, resp)
self.validate_create_request(req)
payload = req.stream
if self.Meta.auth:
self.Meta.auth.can_create(
session, payload, user_id, skip_exc=False
)
self.audit_log(
f"Create: {self.Meta.name}", payload, user_id, AUDIT_DEBUG
)
result = self.create(session, payload, user_id)
resp.body = json.dumps(result, default=str)
def on_get(
self,
req: falcon.Request,
resp: falcon.Response,
resource_id: int = None,
):
"""
Falcon method. GET-request entry point.
Here is a database transaction opening.
This is where authentication takes place
(if auth class is pointed in `resource <#awokado.meta.ResourceMeta>`_)
Then read_handler method is run.
It's responsible for the whole read workflow.
"""
with Transaction(DATABASE_URL, engine=persistent_engine) as t:
session = t.session
user_id, token = self.auth(session, req, resp)
params = get_read_params(req, self.__class__)
params["resource_id"] = resource_id
result = self.read_handler(session, user_id, **params)
resp.body = json.dumps(result, default=str)
def on_delete(
self,
req: falcon.Request,
resp: falcon.Response,
resource_id: int = None,
):
"""
Falcon method. DELETE-request entry point.
Here is a database transaction opening.
This is where authentication takes place
(if auth class is pointed in `resource <#awokado.meta.ResourceMeta>`_)
Then delete method is run.
"""
with Transaction(DATABASE_URL, engine=persistent_engine) as t:
session = t.session
user_id, token = self.auth(session, req, resp)
if DELETE not in self.Meta.methods:
raise MethodNotAllowed()
ids_to_delete = req.get_param_as_list("ids")
data = [ids_to_delete, resource_id]
if not any(data) or all(data):
raise BadRequest(
details=(
"It should be a bulk delete (?ids=1,2,3) or delete"
" of a single resource (v1/resource/1)"
)
)
if not ids_to_delete:
ids_to_delete = [resource_id]
if self.Meta.auth:
self.Meta.auth.can_delete(session, user_id, ids_to_delete)
result = self.delete(session, user_id, ids_to_delete)
resp.body = json.dumps(result, default=str)
def auth(self, *args, **kwargs) -> AuthBundle:
"""This method should return (user_id, token) tuple"""
return AuthBundle(0, "")
def audit_log(self, *args, **kwargs):
return
def _check_model_exists(self):
if not self.Meta.model:
raise Exception(
f"{self.__class__.__name__}.Meta.model field not set"
)
###########################################################################
# Resource methods
###########################################################################
def update(
self, session: Session, payload: dict, user_id: int, *args, **kwargs
) -> dict:
"""
First of all, data is prepared for updating:
Marshmallow load method for data structure deserialization and then preparing data for SQLAlchemy update query.
Updates data with bulk_update_mappings sqlalchemy method. Saves many-to-many relationships.
Returns updated resources with the help of read_handler method.
"""
self._check_model_exists()
data = payload[self.Meta.name]
data_to_update = self._to_update(data)
ids = get_ids_from_payload(self.Meta.model, data_to_update)
session.bulk_update_mappings(self.Meta.model, data_to_update)
self._save_m2m(session, data, update=True)
result = self.read_handler(
session=session,
user_id=user_id,
filters=[FilterItem.create("id", OP_IN, ids)],
)
return result
def create(self, session: Session, payload: dict, user_id: int) -> dict:
"""
Create method
You can override it to add your logic.
First of all, data is prepared for creating:
Marshmallow load method for data structure deserialization and then preparing data for SQLAlchemy create a query.
Inserts data to the database
(Uses bulky library if there is more than one entity to create). Saves many-to-many relationships.
Returns created resources with the help of read_handler method.
"""
self._check_model_exists()
# prepare data to insert
data = payload[self.Meta.name]
if isinstance(data, list):
return self.bulk_create(session, user_id, data)
data_to_insert = self._to_create(data)
# insert to DB
resource_id = session.execute(
sa.insert(self.Meta.model)
.values(data_to_insert)
.returning(self.Meta.model.id)
).scalar()
data["id"] = resource_id
self._save_m2m(session, data)
return self.read_handler(
session=session, user_id=user_id, resource_id=resource_id
)
def bulk_create(self, session: Session, user_id: int, data: list) -> dict:
self._check_model_exists()
data_to_insert = [self._to_create(i) for i in data]
# insert to DB
resource_ids = bulky.insert(
session,
self.Meta.model,
data_to_insert,
returning=[self.Meta.model.id],
)
ids = [r.id for r in resource_ids]
result = self.read_handler(
session=session,
user_id=user_id,
filters=[FilterItem.create("id", OP_IN, ids)],
)
return result
def delete(self, session: Session, user_id: int, obj_ids: list):
"""
Simply deletes objects with passed identifiers
"""
self._check_model_exists()
session.execute(
sa.delete(self.Meta.model).where(self.Meta.model.id.in_(obj_ids))
)
return {}
def _to_update(self, data: list) -> list:
"""
Prepare resource data for SQLAlchemy update query
"""
to_update_list = []
for data_line in data:
to_update = {}
for fn, v in data_line.items():
f = self.fields[fn]
if isinstance(f, ToMany):
continue
model_field = f.metadata.get("model_field")
if not model_field:
continue
to_update[model_field.key] = v
to_update_list.append(to_update)
return to_update_list
def _to_create(self, data: dict) -> dict:
"""
Prepare resource data for SQLAlchemy create query
"""
to_create = {}
for fn, v in data.items():
f = self.fields[fn]
if isinstance(f, ToMany):
continue
model_field = f.metadata["model_field"]
to_create[model_field.key] = v
return to_create
def read_handler(
self,
session: Session,
user_id: int,
include: list = None,
filters: Optional[List[FilterItem]] = None,
sort: list = None,
resource_id: int = None,
limit: int = None,
offset: int = None,
) -> dict:
ctx = ReadContext(
session,
self,
user_id,
include,
filters,
sort,
resource_id,
limit,
offset,
)
self.read__query(ctx)
self.read__filtering(ctx)
self.read__sorting(ctx)
self.read__pagination(ctx)
self.read__execute_query(ctx)
if not ctx.obj_ids:
if ctx.is_list:
response = self.Response(self, is_list=ctx.is_list)
return response.serialize()
else:
raise BadRequest("Object Not Found")
self.read__includes(ctx)
return self.read__serializing(ctx)
def read__query(self, ctx: ReadContext):
return ctx.read__query()
def read__filtering(self, ctx: ReadContext):
return ctx.read__filtering()
def read__sorting(self, ctx: ReadContext):
return ctx.read__sorting()
def read__pagination(self, ctx: ReadContext):
return ctx.read__pagination()
def read__execute_query(self, ctx: ReadContext):
return ctx.read__execute_query()
def read__includes(self, ctx: ReadContext):
return ctx.read__includes()
def read__serializing(self, ctx: ReadContext) -> dict:
return ctx.read__serializing()
def get_related_model(self, field: Union[ToOne, ToMany]):
resource_name = field.metadata["resource"]
resource = self.RESOURCES[resource_name]
return resource.Meta.model
def _process_to_many_field(self, field: ToMany) -> M2MMapping:
related_model = self.get_related_model(field)
resource_model = self.Meta.model
model_field = field.metadata["model_field"]
field_obj = M2MMapping(related_model=related_model)
if not isinstance(model_field, sa.Column):
model_field = getattr(
model_field.parent.persist_selectable.c, model_field.key
)
if related_model.__table__ == model_field.table:
for fk in model_field.table.foreign_keys:
if fk.column.table == resource_model.__table__:
field_obj.left_fk_field = fk.parent
break
else:
field_obj.secondary = model_field.table
for fk in model_field.table.foreign_keys:
if fk.column.table == related_model.__table__:
field_obj.right_fk_field = fk.parent
elif fk.column.table == resource_model.__table__:
field_obj.left_fk_field = fk.parent
return field_obj
@cached_property
def _to_many_fields(self) -> List[Tuple[str, M2MMapping]]:
return [
(field_name, self._process_to_many_field(field))
for field_name, field in self.fields.items()
if isinstance(field, ToMany)
]
@staticmethod
def check_exists(
session: Session, table: sa.Table, ids: list, field_name: str
):
result = session.execute(
sa.select([table.c.id]).where(table.c.id.in_(ids))
)
missed = set(ids) - {item.id for item in result}
if missed:
raise BadRequest(
{
field_name: f"objects with id {','.join(map(str, missed))} does not exist"
}
)
@staticmethod
def _get_m2m(field: M2MMapping, field_name: str, data) -> List[dict]:
m2m = []
for obj in data:
rel_ids = obj.get(field_name) or ()
for rel_id in rel_ids:
m2m.append(
{
field.left_fk_field: obj.get("id"),
field.right_fk_field: rel_id,
}
)
return m2m
def _save_m2m(
self, session: Session, data: Union[list, dict], update: bool = False
) -> None:
data = data if utils.is_collection(data) else [data]
for field_name, field in self._to_many_fields:
if field.secondary is not None:
if update:
session.execute(
sa.delete(field.secondary).where(
field.left_fk_field.in_(
[obj.get("id") for obj in data]
)
)
)
many_2_many = self._get_m2m(field, field_name, data)
if many_2_many:
self.check_exists(
session,
field.related_model.__table__,
[obj[field.right_fk_field] for obj in many_2_many],
field_name,
)
session.execute(
sa.insert(field.secondary).values(many_2_many)
)
```
#### File: awokado/tests/base.py
```python
import unittest
from unittest import mock
import sqlalchemy as sa
from clavis import Transaction
from falcon import testing
from sqlalchemy.orm import Session as _Session
from sqlalchemy.pool import NullPool
import awokado.db
from tests.test_app import models as m
class Session(_Session):
def commit(self):
self.flush()
self.expire_all()
def rollback(self):
raise AssertionError("rollback is not supported within test run")
class DbTest(unittest.TestCase):
longMessage = True # XXX: assertXXX() message will be APPENDED to default
def __setup_engine(self):
self._engine = sa.create_engine(
awokado.db.DATABASE_URL, poolclass=NullPool
)
def setUp(self):
import warnings
warnings.filterwarnings("error")
super(DbTest, self).setUp()
self.__setup_engine()
self.__db_conn = self._engine.connect()
try:
self.__db_txn = self.__db_conn.begin()
if not hasattr(self, "__session") or not self.__session:
self.__session = Session(bind=self.__db_conn)
except:
self.__db_conn.close()
raise
def tearDown(self):
try:
try:
self.__db_txn.rollback()
finally:
self.__db_conn.close()
finally:
super(DbTest, self).tearDown()
@property
def session(self):
return self.__session
class BaseAPITest(testing.TestCase, DbTest):
def patch_session(self, session_patch):
class X:
session = self.session
mock_client = mock.MagicMock(spec=Transaction)
mock_client.__enter__.return_value = X
session_patch.return_value = mock_client
def create_author(self, name):
first_name, last_name = name.split()
author_id = self.session.execute(
sa.insert(m.Author)
.values(
{m.Author.first_name: first_name, m.Author.last_name: last_name}
)
.returning(m.Author.id)
).scalar()
return author_id
def create_tag(self, name):
tag_id = self.session.execute(
sa.insert(m.Tag).values({m.Tag.name: name}).returning(m.Tag.id)
).scalar()
return tag_id
```
#### File: awokado/tests/test_author.py
```python
from unittest.mock import patch
from tests.base import BaseAPITest
from .test_app.routes import api
class AuthorTest(BaseAPITest):
def setUp(self):
super().setUp()
self.app = api
@patch("awokado.resource.Transaction", autospec=True)
def test_create(self, session_patch):
self.patch_session(session_patch)
payload = {"author": {"first_name": "Steven", "last_name": "King"}}
api_response = self.simulate_post("/v1/author", json=payload)
self.assertEqual(api_response.status, "200 OK", api_response.text)
id_ = api_response.json["author"][0]["id"]
self.assertIsNotNone(id_)
self.assertDictEqual(
api_response.json["author"][0],
{"id": id_, "name": "<NAME>", "books_count": 0, "books": []},
)
@patch("awokado.resource.Transaction", autospec=True)
def test_update(self, session_patch):
self.patch_session(session_patch)
author_id = self.create_author("<NAME>")
payload = {
"author": [
{"id": author_id, "first_name": "Steven", "last_name": "King"}
]
}
api_response = self.simulate_patch("/v1/author/", json=payload)
self.assertEqual(api_response.status, "200 OK", api_response.json)
self.assertDictEqual(
api_response.json["payload"]["author"][0],
{
"id": author_id,
"name": "<NAME>",
"books_count": 0,
"books": [],
},
)
@patch("awokado.resource.Transaction", autospec=True)
def test_delete(self, session_patch):
self.patch_session(session_patch)
author_id = self.create_author("<NAME>")
api_response = self.simulate_delete(f"/v1/author/{author_id}")
self.assertEqual(api_response.status, "200 OK", api_response.json)
self.assertDictEqual(api_response.json, dict())
@patch("awokado.resource.Transaction", autospec=True)
def test_read(self, session_patch):
self.patch_session(session_patch)
author_id = self.create_author("<NAME>")
api_response = self.simulate_get("/v1/author/")
self.assertEqual(api_response.status, "200 OK", api_response.json)
self.assertDictEqual(
api_response.json["payload"]["author"][0],
{
"id": author_id,
"name": "<NAME>",
"books_count": 0,
"books": [],
},
)
```
#### File: awokado/tests/test_forbidden_book.py
```python
import sqlalchemy as sa
from unittest.mock import patch
from tests.base import BaseAPITest
from .test_app import models as m
from .test_app.resources.forbidden_book import ForbiddenBookResource
from .test_app.routes import api
class ForbiddenBookTest(BaseAPITest):
def setUp(self):
super().setUp()
self.app = api
@patch("awokado.resource.Transaction", autospec=True)
def test_create(self, session_patch):
self.patch_session(session_patch)
payload = {"forbidden_book": {"title": "None"}}
api_response = self.simulate_post("/v1/forbidden_book", json=payload)
self.assertEqual(
api_response.status, "403 Forbidden", api_response.text
)
self.assertEqual(api_response.status_code, 403, api_response.text)
self.assertEqual(
api_response.json,
{
"status": "403 Forbidden",
"title": "403 Forbidden",
"code": "create-forbidden",
"detail": "The creation of a resource forbidden",
},
)
@patch("awokado.resource.Transaction", autospec=True)
def test_update(self, session_patch):
self.patch_session(session_patch)
author_id = self.create_author("<NAME>")
book_id = self.session.execute(
sa.insert(m.Book)
.values(
{m.Book.title: "The Dead Zone", m.Book.author_id: author_id}
)
.returning(m.Book.id)
).scalar()
payload = {
"forbidden_book": [
{
"id": book_id,
"description": (
"Waking up from a five-year coma after "
"a car accident, former schoolteacher"
),
}
]
}
api_response = self.simulate_patch("/v1/forbidden_book/", json=payload)
self.assertEqual(
api_response.status, "403 Forbidden", api_response.text
)
self.assertEqual(api_response.status_code, 403, api_response.text)
self.assertEqual(
api_response.json,
{
"status": "403 Forbidden",
"title": "403 Forbidden",
"code": "update-forbidden",
"detail": "Change the resource is forbidden",
},
)
@patch("awokado.resource.Transaction", autospec=True)
def test_delete(self, session_patch):
self.patch_session(session_patch)
author_id = self.create_author("<NAME>")
book_id = self.session.execute(
sa.insert(m.Book)
.values(
{m.Book.title: "The Dead Zone", m.Book.author_id: author_id}
)
.returning(m.Book.id)
).scalar()
api_response = self.simulate_delete(f"/v1/forbidden_book/{book_id}")
self.assertEqual(
api_response.status, "403 Forbidden", api_response.text
)
self.assertEqual(api_response.status_code, 403, api_response.text)
self.assertEqual(
api_response.json,
{
"status": "403 Forbidden",
"title": "403 Forbidden",
"code": "delete-forbidden",
"detail": "Delete the resource is forbidden",
},
)
@patch("awokado.resource.Transaction", autospec=True)
def test_read(self, session_patch):
self.patch_session(session_patch)
author_id = self.create_author("<NAME>")
book_id = self.session.execute(
sa.insert(m.Book)
.values(
{m.Book.title: "The Dead Zone", m.Book.author_id: author_id}
)
.returning(m.Book.id)
).scalar()
api_response = self.simulate_get("/v1/forbidden_book/")
self.assertEqual(
api_response.status, "403 Forbidden", api_response.text
)
self.assertEqual(api_response.status_code, 403, api_response.text)
self.assertEqual(
api_response.json,
{
"status": "403 Forbidden",
"title": "403 Forbidden",
"code": "read-forbidden",
"detail": "Read the resource is forbidden",
},
)
@patch("awokado.resource.Transaction", autospec=True)
def test_forbidden_auth_skip_exception(self, session_patch):
self.assertFalse(
ForbiddenBookResource.Meta.auth.can_create(
None, None, None, skip_exc=True
)
)
self.assertFalse(
ForbiddenBookResource.Meta.auth.can_read(None, None, skip_exc=True)
)
self.assertFalse(
ForbiddenBookResource.Meta.auth.can_update(
None, None, None, skip_exc=True
)
)
self.assertFalse(
ForbiddenBookResource.Meta.auth.can_delete(
None, None, None, skip_exc=True
)
)
```
#### File: awokado/tests/test_store_stats.py
```python
from unittest.mock import patch
import sqlalchemy as sa
from awokado.exceptions import IdFieldMissingError
from tests.base import BaseAPITest
from tests.test_app import models as m
from tests.test_app.routes import api
class StoreStatsTest(BaseAPITest):
def setup_dataset(self):
self.store_id = self.session.execute(
sa.insert(m.Store)
.values({m.Store.name: "bookstore"})
.returning(m.Store.id)
).scalar()
self.book_id_1 = self.session.execute(
sa.insert(m.Book)
.values({m.Book.title: "new", m.Book.store_id: self.store_id})
.returning(m.Book.id)
).scalar()
self.book_id_2 = self.session.execute(
sa.insert(m.Book)
.values({m.Book.title: "new 2", m.Book.store_id: self.store_id})
.returning(m.Book.id)
).scalar()
def setUp(self):
super().setUp()
self.app = api
self.setup_dataset()
@patch("awokado.resource.Transaction", autospec=True)
def test_read(self, session_patch):
self.patch_session(session_patch)
api_response = self.simulate_get(f"/v1/store_stats/{self.store_id}")
self.assertEqual(
api_response.status, "400 Bad Request", api_response.text
)
self.assertEqual(
api_response.json["detail"], IdFieldMissingError().details
)
api_response = self.simulate_get("/v1/store_stats/")
self.assertEqual(
api_response.status, "400 Bad Request", api_response.text
)
self.assertEqual(
api_response.json["detail"], IdFieldMissingError().details
)
``` |
{
"source": "57r4n63r/dwm-status",
"score": 3
} |
#### File: dwm-status/Core/Actions.py
```python
import sys
class Actions:
arguments = {}
core = None
def __init__(self,core):
self.core = core
for arg in sys.argv :
if arg.find('--') != -1 :
argname = arg.replace('--','').split('=')[0]
self.arguments[argname] = arg.split('=')[1]
self.setDefault()
def handle(self):
if self.arguments['output'] == 'print' :
self.core.print()
return
if self.arguments['output'] == 'xsetroot' :
self.core.xsetroot()
return
raise SystemExit('[error] Selected output is invalid')
def setDefault(self):
try:
self.arguments['output']
if self.arguments['output'] == '' :
self.arguments['output'] = 'print'
except KeyError:
self.arguments['output'] = 'print'
```
#### File: Modules/Network/Module.py
```python
import re
from subprocess import call, check_output
from Core.Module.AbstractModule import AbstractModule
from Core.Core import Core
from Core.Icons import Icons
import time
class Network(AbstractModule):
configurations = None
getWanIpCommand = ['curl','-s']
getLanIpCommand = ['ip','addr','show']
interfaceStatePath = '/sys/class/net/{interface}/operstate'
wanIp = None
additionnal = []
interfaces = []
wifiIcon = 'wifi_good'
wiredIcon = 'wired'
wanLastChecked = False
order=40
def init(self):
core = Core.getInstance()
self.configurations = core.configurations
if 'wanProvider' in self.configurations['networking']:
self.getWanIpCommand.append(self.configurations['networking']['wanProvider'])
if("additionnal" in self.configurations['networking']):
self.additionnal = self.configurations['networking']['additionnal']
if 'wired' in self.configurations['networking']:
self.interfaces.append(self.configurations['networking']['wired'])
if 'wifi' in self.configurations['networking']:
self.interfaces.append(self.configurations['networking']['wifi'])
if 'wifi-icon' in self.configurations['networking']:
self.wifiIcon = self.configurations['networking']['wifi-icon']
if 'wired-icon' in self.configurations['networking']:
self.wiredIcon = self.configurations['networking']['wired-icon']
self.interfaces = [*self.interfaces,*self.additionnal]
def isInterfaceUp(self, interface):
path = self.interfaceStatePath.replace('{interface}',interface)
command = ['cat',path]
output = check_output(command).strip().decode('utf-8')
return True if output == 'up' else False
def getWAN(self):
if 'wanProvider' in self.configurations['networking']:
if self.wanLastChecked:
now = time.time()
diff = now - self.wanLastChecked
if diff > 300: #5 minutes
self.wanLastChecked = time.time()
self.wanIp = check_output(self.getWanIpCommand).strip().decode('utf-8')
else:
self.wanLastChecked = time.time()
self.wanIp = check_output(self.getWanIpCommand).strip().decode('utf-8')
return self.wanIp
def getLan(self):
ips = [];
for interface in self.interfaces:
if(self.isInterfaceUp(interface)):
currentLanIpCommand = self.getLanIpCommand[:]
currentLanIpCommand.append(interface)
output = check_output(currentLanIpCommand).strip().decode('utf-8')
ip = re.findall('(inet .+/)',output)[0].replace('inet ','').replace('/','')
ips.append(ip)
return ' | '.join(ips)
def getStates(self):
icons = []
if 'wired' in self.configurations['networking']:
wired = self.configurations['networking']['wired']
else:
wired = 'off'
if 'wifi' in self.configurations['networking']:
wifi = self.configurations['networking']['wifi']
else:
wifi = 'off'
if(wifi != 'off'):
if(self.isInterfaceUp(wifi)):
icons.append(Icons.get(self.wifiIcon))
if(wired != 'off'):
if(self.isInterfaceUp(wired)):
icons.append(Icons.get(self.wiredIcon))
return ' '.join(icons)
def getString(self):
output = []
wan = self.getWAN()
lan = self.getLan()
states = self.getStates()
if wan != None:
output.append(wan)
if lan != "":
output.append(lan)
if states != "":
output.append(states)
return ' | '.join(output)
```
#### File: Modules/Power/Module.py
```python
import os
from Core.Module.AbstractModule import AbstractModule
from subprocess import call, check_output
from Core.Core import Core
from Core.Icons import Icons
class Power(AbstractModule):
capacityPath = '/sys/class/power_supply/{battery}/capacity'
statusPath = '/sys/class/power_supply/{battery}/status'
chargeNowPath = '/sys/class/power_supply/{battery}/charge_now'
chargeFullPath = '/sys/class/power_supply/{battery}/charge_full'
batteryName = None
order = 20
def init(self):
core = Core.getInstance()
if 'power' in core.configurations and 'battery' in core.configurations['power'] :
self.batteryName = core.configurations['power']['battery']
self.capacityPath = self.capacityPath.replace('{battery}',self.batteryName)
self.statusPath = self.statusPath.replace('{battery}',self.batteryName)
self.chargeNowPath = self.chargeNowPath.replace('{battery}',self.batteryName)
self.chargeFullPath = self.chargeFullPath.replace('{battery}',self.batteryName)
def getValue(self,path):
return check_output(
['cat', path]).strip().decode('utf-8')
def getString(self):
if self.batteryName == None :
return Icons.get('battery_pluged')
capacity = self.getValue(self.capacityPath)
status = self.getValue(self.statusPath)
chargeNow = self.getValue(self.chargeNowPath)
chargeFull = self.getValue(self.chargeFullPath)
statusIcon = Icons.get( 'battery_unpluged' )
level = "100%"
if capacity == "0" or status == "Charging":
statusIcon = Icons.get( 'battery_pluged' )
floatLevel = float(chargeNow) / float(chargeFull) * 100
level = str(int(floatLevel)) + '%'
return level+' | '+statusIcon
``` |
{
"source": "57uff3r/pearsondictionary",
"score": 3
} |
#### File: pearsondictionary/pearsondictionary/tests.py
```python
try:
from .code import PearsonDictionary
except ImportError:
from code import PearsonDictionary
import unittest
COMMON_TEST_WORD = 'dog'
COMMON_TEST_WORD_NOUN = 'noun'
class FullTest(unittest.TestCase):
def test_1(self):
pd = PearsonDictionary()
result = pd.get_definitions(COMMON_TEST_WORD)
self.assertTrue(COMMON_TEST_WORD in result, msg='Low level api request with common word 2')
len_short = len(result)
self.assertTrue(len_short > 0)
result = pd.get_definitions(COMMON_TEST_WORD, load_all_items=True)
self.assertTrue(COMMON_TEST_WORD in result, msg='Low level api with recursive requests')
len_full = len(result)
self.assertTrue(len_full > 0)
self.assertTrue(len_full > len_short, msg='Full answer vs short answer')
result = pd.get_definitions(COMMON_TEST_WORD, pos=COMMON_TEST_WORD_NOUN)
self.assertTrue(COMMON_TEST_WORD in result, msg='Low level api request with common word and pos = noun')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "57ur14/DAC-clustering-PoC",
"score": 3
} |
#### File: 57ur14/DAC-clustering-PoC/count_files_requiring_analysis.py
```python
import os
import pickle
from multiprocessing import Process, Manager
files = {}
clusters = {
'imphash_clusters': {},
'icon_clusters': {},
'resource_clusters': {},
'tlsh_clusters': {}
}
def load_from_pickles(folder, load_clusters=False):
"""
Load data from pickles
Folder should be the path to a folder where the
files "files.pkl" and "clusters.pkl" will be stored.
Suggested values:
pickles/extracted/
pickles/clustered/
pickles/validated/
If load_clusters is True, clusters will also be loaded
Returns False on failure to load pickles and True on success
"""
global files
global clusters
files_path = os.path.join(folder, 'files.pkl')
clusters_path = os.path.join(folder, 'clusters.pkl')
if not os.path.exists(files_path):
print("Files pickle not found. Perform feature extraction before attempting to cluster / validate.")
return False
else:
with open(files_path, 'rb') as picklefile:
files = pickle.load(picklefile)
if load_clusters and not os.path.exists(clusters_path):
print("Clusters pickle not found. Perform training before attempting to validate.")
return False
elif load_clusters:
with open(clusters_path, 'rb') as picklefile:
clusters = pickle.load(picklefile)
return True
def total_files_to_label(files):
"""
Identify the number of files that do not have a label.
Returns the number of unlabelled, incoming files.
"""
total = 0
for fileinfo in files.values():
if fileinfo['incoming'] and fileinfo['given_label'] is None:
total += 1
return total
def fill_cluster_details(cluster, files):
"""
Count the number of incoming files, packed incoming files,
unlabelled incoming and labelled incoming files.
Add these properties to the cluster.
"""
incoming = 0
unlabelled = 0
packed = 0
for sha in cluster['items']:
if files[sha]['incoming']:
incoming += 1
if files[sha]['given_label'] is None:
unlabelled += 1
if files[sha]['obfuscation'] is not None:
packed += 1
labelled = incoming - unlabelled
cluster['total_incoming'] = incoming
cluster['packed_incoming'] = packed
cluster['unlabelled_files'] = unlabelled
cluster['labelled_files'] = labelled
def is_good_quality(cluster):
"""
Evaluate the quality of a cluster.
Returns True of the quality is evaluated to be good,
and False if the quality is evaluated to be poor.
"""
if (cluster['total_incoming'] != 0
and cluster['label'] is None
and cluster['unlabelled_files'] > cluster['labelled_files']):
return True
else:
return False
def get_unlabelled(cluster):
"""
Return the value contained in the key "unlabelled_files"
"""
return cluster['unlabelled_files']
def get_label_from_in_depth_analysis(fileinfo):
"""
Simulated in-depth analysis.
Returns the real label of the file.
"""
return fileinfo['family']
def label_clusters_of_file(fileinfo, files, clusters):
"""
Iterate over all clusters a file is part of, and which
do not have a label. If a cluster is of good quality,
use the label of the given file to label the cluster
and the files contained in the file.
"""
labelled = 0
correctly = 0
incorrectly = 0
results = []
if fileinfo['imphash'] is not None:
cluster = clusters['imphash_clusters'][fileinfo['imphash']]
if is_good_quality(cluster):
results.append(label_cluster_and_files(fileinfo['given_label'], cluster, files, clusters))
if fileinfo['icon_hash'] is not None:
cluster = clusters['icon_clusters'][fileinfo['icon_hash']]
if is_good_quality(cluster):
results.append(label_cluster_and_files(fileinfo['given_label'], cluster, files, clusters))
for sha in fileinfo['contained_resources']:
cluster = clusters['resource_clusters'][sha]
if is_good_quality(cluster):
results.append(label_cluster_and_files(fileinfo['given_label'], cluster, files, clusters))
if fileinfo['tlsh_cluster'] is not None:
cluster = clusters['tlsh_clusters'][fileinfo['tlsh_cluster']]
if is_good_quality(cluster):
results.append(label_cluster_and_files(fileinfo['given_label'], cluster, files, clusters))
for l, c, i in results:
labelled += l
correctly += c
incorrectly += i
return labelled, correctly, incorrectly
def label_cluster_and_files(label, cluster, files, clusters):
"""
Use the provided label to label a given cluster.
Then iterate over files in the cluster. If a file
does not have a label, label the file and propagate
labelling to the clusters of that file.
"""
l = 0
c = 0
i = 0
cluster['label'] = label
for sha in cluster['items']:
fileinfo = files[sha]
if fileinfo['given_label'] is None:
fileinfo['given_label'] = label
if fileinfo['incoming']:
if fileinfo['given_label'] == fileinfo['family']:
c += 1
else:
i += 1
l += 1
l2, c2, i2 = label_clusters_of_file(fileinfo, files, clusters)
l += l2
c += c2
i += i2
return l, c, i
if __name__ == '__main__':
successfully_loaded = load_from_pickles('pickles/validated/', True)
if not successfully_loaded:
print("Run training (-T or -E + -C) and validation (-V) first")
raise SystemExit
num_files_to_label = total_files_to_label(files)
files_analysed_in_depth = 0
correctly_labelled = 0
mislabelled = 0
print("Number of files to label before: " + str(num_files_to_label))
still_more = True
while still_more:
cluster_list = []
for cluster_type in clusters.values():
for cluster in cluster_type.values():
fill_cluster_details(cluster, files)
if is_good_quality(cluster):
cluster_list.append(cluster)
cluster_list.sort(key=get_unlabelled)
if cluster_list:
prioritised = cluster_list.pop()
else:
break
representative = None
for sha in prioritised['items']:
fileinfo = files[sha]
if fileinfo['incoming']:
if fileinfo['obfuscation'] is None:
# Representative file should ideally
# not be obfuscated
representative = fileinfo
break
elif representative is None:
# If no non-obfuscated file was available,
# use an obfuscated file as representative file.
representative = fileinfo
# If an representative file was identified (should be true)
if representative is not None:
label = get_label_from_in_depth_analysis(representative)
representative['given_label'] = label
files_analysed_in_depth += 1
num_files_to_label -= 1
labelled, correctly, incorrectly = label_clusters_of_file(representative, files, clusters)
num_files_to_label -= labelled
correctly_labelled += correctly
mislabelled += incorrectly
if not cluster_list:
still_more = False
total_in_depth_analysis = files_analysed_in_depth + num_files_to_label
print("Files sent to simulated in-depth analysis: " + str(files_analysed_in_depth))
print("Files correctly labelled through induction: " + str(correctly_labelled))
print("Files incorrectly labelled through induction: " + str(mislabelled))
print("Number of files to label after: " + str(num_files_to_label))
print("Files to send to in-depth analysis in total: " + str(total_in_depth_analysis))
```
#### File: 57ur14/DAC-clustering-PoC/run.py
```python
import argparse
import configparser
import multiprocessing
import os
import pickle
import queue
import time
from multiprocessing.managers import BaseManager
import clustering
import feature_extraction
# Data structure for storing files
files = {}
clusters = {
'imphash_clusters': {},
'icon_clusters': {},
'resource_clusters': {},
'tlsh_clusters': {}
}
# Retreive configuration
config = configparser.ConfigParser()
config.read('config.ini')
PRINT_PROGRESS = config.getboolean('general', 'print_progress')
QUEUE_MANAGER_IP = config.get('queue_managers', 'ip')
JOB_MANAGER_PORT = config.getint('queue_managers', 'job_port')
DONE_MANAGER_PORT = config.getint('queue_managers', 'done_port')
QUEUE_MANAGER_KEY = config.get('queue_managers', 'key').encode('utf-8')
QUEUE_TIMEOUT = config.getint('queue_managers', 'timeout')
CLUSTER_WITH_VHASH = config.getboolean('clustering', 'cluster_with_vhash')
if CLUSTER_WITH_VHASH:
clusters['vhash_clusters'] = {}
# Define queue manager class
class QueueManager(BaseManager):
pass
QueueManager.register('get_queue')
def serve_simple_queue(ip, port, key):
"""
Start a queue on the specified port
Start as new thread/process as the function will run "serve_forever()".
"""
simple_queue = queue.Queue()
QueueManager.register('get_queue', callable=lambda:simple_queue)
manager = QueueManager(address=(ip, port), authkey=key)
server = manager.get_server()
server.serve_forever()
def feature_extraction_worker(training=False):
"""
Connect to feature extraction (job) queue and clustering (job done) queue
If training is True, the file will be marked as being part of the training data set.
"""
job_manager = QueueManager(address=(QUEUE_MANAGER_IP, JOB_MANAGER_PORT), authkey=QUEUE_MANAGER_KEY)
done_manager = QueueManager(address=(QUEUE_MANAGER_IP, DONE_MANAGER_PORT), authkey=QUEUE_MANAGER_KEY)
try:
job_manager.connect()
done_manager.connect()
except:
print("Cannot connect to queue manager. Please check the configuration.")
else:
job_queue = job_manager.get_queue()
done_queue = done_manager.get_queue()
while True:
try:
file_to_cluster = job_queue.get(timeout=QUEUE_TIMEOUT)
except EOFError:
print("Queue not available. Please check if the feature extraction queue manager is still running.")
break
except queue.Empty:
# Stop when queue is empty
break
else:
result = feature_extraction.analyse_file(file_to_cluster['path'], family=file_to_cluster['family'], incoming=True, training=training)
send_to_done_queue(result, done_queue)
def send_to_done_queue(fileinfo, done_queue):
"""
Recursively send files to the queue of
completed feature extraction jobs
"""
if fileinfo is not None:
for contained_info in fileinfo['contained_pe_fileinfo'].values():
send_to_done_queue(contained_info, done_queue)
fileinfo.pop('contained_pe_fileinfo')
done_queue.put(fileinfo)
def add_files_for_extraction(*file_list):
"""
Add files to the queue of files that should have their
features extracted and their data sent to clustering
"""
job_manager = QueueManager(address=(QUEUE_MANAGER_IP, JOB_MANAGER_PORT), authkey=QUEUE_MANAGER_KEY)
try: # Connect to feature extraction queue
job_manager.connect()
except:
print("Cannot connect to queue manager. Make sure the daemon is running and the configuration is correct.")
else:
job_queue = job_manager.get_queue()
for item in file_list:
# Send all files in the list to the feature extraction queue
job_queue.put(item)
def get_done_queue():
"""
Retrieve a queue object from a queue manager created
with the options provided in the config file.
"""
done_manager = QueueManager(address=(QUEUE_MANAGER_IP, DONE_MANAGER_PORT), authkey=QUEUE_MANAGER_KEY)
try:
done_manager.connect()
except:
print("Cannot connect to queue manager. Please try again or check the configuration.")
raise SystemExit
return done_manager.get_queue()
def get_fileinfo_from_done_queue(done_queue):
"""
Returns one fileinfo/file feature dictionary from
the provided queue object.
"""
try:
# Return file metadata the done queue
return done_queue.get(timeout=QUEUE_TIMEOUT)
except EOFError:
print("Queue not available. Please check if the queue manager is still running.")
return None
except queue.Empty:
if PRINT_PROGRESS:
print("Done-queue empty. Stopping collection.")
return None
def collect_features(files):
"""
Retrieve fileinfo/file feature dictionaries from the
feature extraction workers and store the feature
information in the global "files" data structure.
"""
incoming_files_parsed = 0
done_queue = get_done_queue()
# Attempt to retrieve a file from the done queue
fileinfo = get_fileinfo_from_done_queue(done_queue)
# Continue while it is possible to retrieve a file
while fileinfo is not None:
fileinfo['training'] = True
if fileinfo['incoming']:
incoming_files_parsed += 1
if PRINT_PROGRESS:
print("Processing incoming file number: " + str(incoming_files_parsed))
# If file was successfully retrieved from queue
if fileinfo['sha256'] in files.keys():
# If file has been received and clustered before
# Merge new data into the existing data.
if PRINT_PROGRESS:
print("Merging file with existing information " + fileinfo['sha256'])
current_file = files[fileinfo['sha256']]
if fileinfo['incoming']:
current_file['incoming'] = True
else: # If file is not incoming (was unpacked from another file)
# Update "unpacks_from" since it might be contained in multiple different binaries
current_file['unpacks_from'].update(fileinfo['unpacks_from'])
else:
# If file has not been received before, add data
if PRINT_PROGRESS:
print("Storing file " + fileinfo['sha256'])
# Add file information to global data structure
files[fileinfo['sha256']] = fileinfo
# Attempt to retrieve next file and continue loop
fileinfo = get_fileinfo_from_done_queue(done_queue)
def cluster_and_validate_incoming(files, clusters):
"""
Cluster and perform validation on files that are in the
feature extraction job done queue.
"""
done_queue = get_done_queue()
incoming_files_parsed = 0
correctly_labelled = 0
incorrectly_labelled = 0
not_labelled = 0
labelled_packed = 0
not_labelled_packed = 0
fast_clustered = 0
fast_clustered_incoming = 0
slow_clustered = 0
slow_clustered_incoming = 0
# Attempt to retrieve a file from the done queue
fileinfo = get_fileinfo_from_done_queue(done_queue)
# Continue while it is possible to retrieve a file
while fileinfo is not None:
if fileinfo['incoming']:
incoming_files_parsed += 1
if PRINT_PROGRESS:
print("Clustering incoming file number: " + str(incoming_files_parsed))
# If file was successfully retrieved from queue
if fileinfo['sha256'] in files.keys():
# If file has been received and clustered before
# Merge new data into the existing data.
if PRINT_PROGRESS:
print("Merging file with existing information " + fileinfo['sha256'])
current_file = files[fileinfo['sha256']]
if fileinfo['incoming']:
current_file['incoming'] = True
else: # If file is not incoming (was unpacked from another file)
# Update "unpacks_from" since it might be contained in multiple different binaries
current_file['unpacks_from'].update(fileinfo['unpacks_from'])
else:
# If file has not been received before, add data
if PRINT_PROGRESS:
print("Storing file " + fileinfo['sha256'])
# Add file to global data structure
files[fileinfo['sha256']] = fileinfo
# Cluster the file
if clustering.cluster_file(fileinfo, files, clusters):
fast_clustered += 1
if fileinfo['incoming']:
fast_clustered_incoming += 1
else:
slow_clustered += 1
if fileinfo['incoming']:
slow_clustered_incoming += 1
# Label the file
clustering.label_file(fileinfo, files, clusters)
if fileinfo['incoming']:
# Check if correctly labelled and store results
if fileinfo['given_label'] is not None:
if fileinfo['family'] == fileinfo['given_label']:
correctly_labelled += 1
else:
incorrectly_labelled += 1
if fileinfo['obfuscation']:
labelled_packed += 1
else:
not_labelled += 1
if fileinfo['obfuscation'] is not None:
not_labelled_packed += 1
# Attempt to retrieve next file and continue loop
fileinfo = get_fileinfo_from_done_queue(done_queue)
# Return statistics:
return {
'correctly_labelled': correctly_labelled,
'incorrectly_labelled': incorrectly_labelled,
'not_labelled': not_labelled,
'not_labelled_packed': not_labelled_packed,
'labelled_packed': labelled_packed,
'incoming_files_parsed': incoming_files_parsed,
'fast_clustered': fast_clustered,
'fast_clustered_incoming': fast_clustered_incoming,
'slow_clustered': slow_clustered,
'slow_clustered_incoming': slow_clustered_incoming
}
def save_to_pickles(folder):
"""
Save data to pickles to allow later processing.
Folder should be the path to a folder where the
files "files.pkl" and "clusters.pkl" will be stored.
Suggested values for folder:
pickles/extracted/
pickles/clustered/
pickles/validated/
"""
global files
global clusters
if not os.path.exists(folder):
os.makedirs(folder)
files_path = os.path.join(folder, 'files.pkl')
clusters_path = os.path.join(folder, 'clusters.pkl')
with open(files_path, 'wb') as picklefile:
pickle.dump(files, picklefile)
with open(clusters_path, 'wb') as picklefile:
pickle.dump(clusters, picklefile)
def load_from_pickles(folder, load_clusters=False):
"""
Load data from pickles
Folder should be the path to a folder where the
files "files.pkl" and "clusters.pkl" will be stored.
Suggested values:
pickles/extracted/
pickles/clustered/
pickles/validated/
If load_clusters is True, clusters will also be loaded
Returns False on failure to load pickles and True on success
"""
global files
global clusters
files_path = os.path.join(folder, 'files.pkl')
clusters_path = os.path.join(folder, 'clusters.pkl')
if not os.path.exists(files_path):
print("Files pickle not found. Perform feature extraction before attempting to cluster / validate.")
return False
else:
with open(files_path, 'rb') as picklefile:
files = pickle.load(picklefile)
if load_clusters and not os.path.exists(clusters_path):
print("Clusters pickle not found. Perform training before attempting to validate.")
return False
elif load_clusters:
with open(clusters_path, 'rb') as picklefile:
clusters = pickle.load(picklefile)
return True
# If main script (not another thread/process)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run feature extraction/clustering')
parser.add_argument('-N', '--number-of-workers', type=int, default=multiprocessing.cpu_count(), help='Integer specifying the number of feature extraction threads')
parser.add_argument('-E', '--extraction-list', help='Path to a text file containing filepaths to files that should have their features extracted (for clustering), where each line consists of <path> <family>. Path must not contain any spaces.')
parser.add_argument('-C', '--cluster', action='store_true', help='Do clustering on files where features have been extracted.')
parser.add_argument('-T', '--train-list', help='Equivalent of -E # -C. Path to a text file containing filepaths to files in training set, where each line consists of <path> <family>. Path must not contain any spaces.')
parser.add_argument('-V', '--validation-list', help='Path to a text file containing filepaths to files in validation (testing) set, where each line consists of <path> <family>. Path must not contain any spaces.')
args = parser.parse_args()
# Fill list with files that should be sent to analysis
files_for_analysis = []
filename = None
mark_as_training = False
do_extraction = False
do_clustering = args.cluster
do_validation = False
if args.train_list is not None:
filename = args.train_list
mark_as_training = True
do_extraction = True
do_clustering = True
if args.extraction_list is not None:
filename = args.extraction_list
mark_as_training = True
do_extraction = True
if args.validation_list is not None:
filename = args.validation_list
do_validation = True
if do_extraction or do_validation:
# Load paths and families from file and process the files
with open(filename, 'r') as infile:
lines = infile.read().splitlines()
for line in lines:
path, fam = line.split(' ')
files_for_analysis.append({'path': path, 'family': fam})
number_of_files = len(files_for_analysis)
if not number_of_files:
print("No files to analyse")
raise SystemExit
# If filepaths have been loaded
# Create queue daemon for files to perform feature extraction on
multiprocessing.Process(target=serve_simple_queue, args=(QUEUE_MANAGER_IP, JOB_MANAGER_PORT, QUEUE_MANAGER_KEY), daemon=True).start()
# Create queue daemon for files to perform clustering on
multiprocessing.Process(target=serve_simple_queue, args=(QUEUE_MANAGER_IP, DONE_MANAGER_PORT, QUEUE_MANAGER_KEY), daemon=True).start()
# Sleep for 0.2 second to ensure queues are running
time.sleep(0.2)
multiprocessing.Process(target=add_files_for_extraction, args=(files_for_analysis), daemon=True).start()
# Create a thread that retrieves files from feature extraction queue,
# extracts their features and adds them to the clustering queue.
for i in range(args.number_of_workers):
multiprocessing.Process(target=feature_extraction_worker, args=(mark_as_training,), daemon=True).start()
if do_extraction:
# Store files coming from feature extraction job done queue.
collect_features(files)
# Save file features to pickles
save_to_pickles('pickles/extracted/')
if do_clustering:
# Load file features from pickles
if do_extraction or load_from_pickles('pickles/extracted/'):
# Cluster the files based on extracted features
clustering.cluster_files(files, clusters)
# Label the created clusters
clustering.label_clusters(files, clusters)
clustering_statistics = clustering.analyse_clustered_files(files)
clustering_statistics.update(clustering.analyse_clusters(files, clusters))
for key, val in clustering_statistics.items():
print(str(key) + ": " + str(val))
# Save updated file information and clusters to pickles.
save_to_pickles('pickles/clustered/')
if do_validation:
# Load files and clusters from training
if load_from_pickles('pickles/clustered/', True):
# Perform feature extraction, cluster and label
# files coming from feature extraction job done queue.
validation_statistics = cluster_and_validate_incoming(files, clusters)
# Calculate number of files not parsed
validation_statistics['non_parsed_files'] = number_of_files - validation_statistics['incoming_files_parsed']
# Collect statistics on clusters after validation
validation_statistics.update(clustering.analyse_clustered_files(files))
validation_statistics.update(clustering.analyse_clusters(files, clusters))
# Print statistics when done:
for key, val in validation_statistics.items():
print(str(key) + ": " + str(val))
# Save updated file information and clusters to pickles
save_to_pickles('pickles/validated/')
```
#### File: 57ur14/DAC-clustering-PoC/unpacking.py
```python
import configparser
import hashlib
import os
import shutil
import subprocess
import sys
import time
config = configparser.ConfigParser()
config.read('config.ini')
STORE_UNPACKED = config.getboolean('unpacking', 'store_unpacked')
if STORE_UNPACKED:
UNPACKED_DIRECTORY = config.get('unpacking', 'directory')
# Create necessary directory if it does not exist
if STORE_UNPACKED and not os.path.exists(UNPACKED_DIRECTORY):
os.makedirs(UNPACKED_DIRECTORY)
def detect_obfuscation_by_diec(filepath):
"""
Detect obfuscaton with DetectItEasy
"""
diec_output = get_diec_output(filepath)
if 'protector' in diec_output: # If it is protected, overwrite "type".
return {'type': 'protected', 'protector': diec_output['protector']}
elif 'packer' in diec_output:
return {'type': 'packed', 'packer': diec_output['packer']}
return None
def get_diec_output(filepath):
"""
Run Detect It Easy Console on a file specified by a filepath
and return values in a dictionary.
Detect It Easy console version (diec) must be installed manually
and "diec" must be included in $PATH.
"""
info = {}
try:
diec_process = subprocess.run(["diec", filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
except subprocess.CalledProcessError as err:
print(err)
except OSError as err:
# Potentially out of memory or encountered other error.
# Print error message and retry later.
print(err)
print("Sleeping 5 minutes before trying again")
time.sleep(300)
return get_diec_output(filepath)
else:
try:
diec_output = diec_process.stdout.decode('utf-8')
except UnicodeDecodeError:
diec_output = ''
for line in diec_output.splitlines():
if line[0:12] == 'PE: packer: ':
info['packer'] = line[12:]
elif line[0:15] == 'PE: protector: ':
info['protector'] = line[15:]
elif line[0:12] == 'PE: linker: ':
info['linker'] = line[12:]
elif line[0:14] == 'PE: compiler: ':
info['compiler'] = line[14:]
elif line[0:13] == 'PE: library: ':
info['library'] = line[13:]
elif line[0:15] == 'PE: installer: ':
info['installer'] = line[15:]
elif line[0:13] == 'PE: overlay: ':
info['overlay'] = line[13:]
elif line[0:9] == 'PE: sfx: ':
info['sfx'] = line[9:]
elif line[0:13] == 'PE: archive: ':
info['archive'] = line[13:]
elif line[0:12] == 'PE: joiner: ':
info['joiner'] = line[12:]
return info
def unpack_file(filepath, tmpdir):
"""
Attempt to unpack file.
filepath is the path to the file that should be attempted unpacked.
tmpdir is a path to a temporary directory unique to this thread where
the thread will attempt to unpack files to.
Returns a list of unpacked files or an empty list.
"""
# Other unpacking tools have been removed due to
# lacking reliability and usefulness of the tools.
# If multiple unpacking tools are to be used here,
# subdirectories below tmpdir should be created for each
# tool to avoid tools overwriting output of each other.
# Attempt static unpacking with ClamAV. Return unpacked files.
return clam_unpack(filepath, tmpdir)
def clam_unpack(filepath, tmpdir):
"""
Attempt to unpack the malware statically with ClamAV.
Returns a list that can either be empty or contain paths to files unpacked from the file at the specified path.
Packers supported by ClamAV (https://www.clamav.net/documents/libclamav):
* Aspack (2.12)
* UPX (all versions)
* FSG (1.3, 1.31, 1.33, 2.0)
* Petite (2.x)
* PeSpin (1.1)
* NsPack
* wwpack32 (1.20)
* MEW
* Upack
* Y0da Cryptor (1.3)
Dependencies (can be installed from apt on Ubuntu):
* clamav
* libclamunrar9
Loading and comparing all signatures typically requires 20 seconds extra runtime.
To ensure fast execution, remove all signatures and disable signature updates.:
* Disable the freshclam service: ``service clamav-freshclam stop && systemctl disable clamav-freshclam``
* Remove all signature files from /var/lib/clamav/: ``rm -r /var/lib/clamav/*``
* Add the new file /var/lib/clamav/pass.yar with the following content:
rule pass
{
condition:
false
}
"""
unpacked = []
try:
subprocess.run([
'clamscan',
'--debug',
'--leave-temps=yes',
'--tempdir='+tmpdir,
'--no-summary',
'--bytecode=no',
'--scan-mail=no',
'--phishing-sigs=no',
'--phishing-scan-urls=no',
'--heuristic-alerts=no',
'--scan-pe=yes',
'--scan-elf=no',
'--scan-ole2=no',
'--scan-pdf=no',
'--scan-swf=no',
'--scan-html=no',
'--scan-xmldocs=no',
'--scan-hwp3=no',
filepath
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
timeout=5
)
except subprocess.TimeoutExpired:
return unpacked # Timeout reached, return empty list
except subprocess.CalledProcessError:
return unpacked # clamscan crashed, return empty list
except OSError as err:
# Potentially out of memory or encountered other error.
# Print error message and retry later.
print(err)
print("Sleeping 5 minutes before trying again")
time.sleep(300)
return clam_unpack(filepath, tmpdir)
else:
for root, _, files in os.walk(tmpdir, topdown=False):
for filename in files:
if STORE_UNPACKED:
# Move file to permanent storage if
# unpacked files should be stored.
oldpath, newfilename = rename_to_sha256(os.path.join(root, filename))
newpath = os.path.join(UNPACKED_DIRECTORY, newfilename)
shutil.move(oldpath, newpath)
else:
newpath = os.path.join(root, filename)
unpacked.append(newpath)
return unpacked
def rename_to_sha256(filepath):
"""
Rename a file specified by a path to the sha256sum
of the files and return the new path.
Returns the new path and the sha256sum of the file
"""
with open(filepath, 'rb') as filehandle:
rawfile = filehandle.read()
directory = os.path.dirname(filepath)
sha256sum = hashlib.sha256(rawfile).hexdigest()
newpath = os.path.join(directory, sha256sum)
if filepath != newpath: # Only rename if it is not already named as the sha256sum
shutil.move(filepath, newpath) # Rename file to the sha256sum
return newpath, sha256sum # Return the new path of the file and the sha256sum (filename)
return None, None # Return None if the file could not be opened
``` |
{
"source": "580/mitmproxy",
"score": 2
} |
#### File: addons/browserup/browserup_addons_manager.py
```python
import _thread
import asyncio
import json
import falcon
import os
from wsgiref.simple_server import make_server
from pathlib import Path
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from falcon_apispec import FalconPlugin
from mitmproxy.addons.browserup.har.har_schemas import MatchCriteriaSchema, VerifyResultSchema, ErrorSchema, CounterSchema
from mitmproxy.addons.browserup.har_capture_addon import HarCaptureAddOn
from mitmproxy import ctx
class BrowserUpAddonsManagerAddOn:
initialized = False
def load(self, l):
ctx.log.info('Loading BrowserUpAddonsManagerAddOn')
l.add_option(
"addons_management_port", int, 8088, "REST api management port.",
)
def running(self):
ctx.log.info('Scanning for custom add-ons resources...')
global initialized
if not self.initialized and self.is_script_loader_initialized():
ctx.log.info('Scanning for custom add-ons resources...')
ctx.log.info('Starting falcon REST service...')
_thread.start_new_thread(self.start_falcon, ())
initialized = True
def is_script_loader_initialized(self):
script_loader = ctx.master.addons.get("scriptloader")
for custom_addon in script_loader.addons:
if len(custom_addon.addons) == 0:
return False
return True
def basic_spec(self, app):
return APISpec(
title='BrowserUp MitmProxy',
version='1.0.0',
servers = [{"url": "http://localhost:{port}/",
"description": "The development API server",
"variables": {"port": {"enum": ["8088"], "default": '8088'}}
}],
tags = [{"name": 'The BrowserUp MitmProxy API', "description": "BrowserUp MitmProxy REST API"}],
info= {"description":
"""___
This is the REST API for controlling the BrowserUp MitmProxy.
The BrowserUp MitmProxy is a swiss army knife for automated testing that
captures HTTP traffic in HAR files. It is also useful for Selenium/Cypress tests.
___
""", "x-logo": {"url": "logo.png"}},
openapi_version='3.0.3',
plugins=[
FalconPlugin(app),
MarshmallowPlugin(),
],
)
def write_spec(self, spec):
pretty_json = json.dumps(spec.to_dict(), indent=2)
root = Path(__file__).parent.parent.parent.parent
schema_path = os.path.join(root, 'browserup-proxy.schema.json')
f = open(schema_path, 'w')
f.write(pretty_json)
f.close()
def load_resources_from_addons(self, app, spec):
# Whenever the addons manager loads, we write out our openapi spec
# There might be a better place for this, although where isn't clear to me yet
addons = ctx.master.addons
resources = []
get_resources_fun_name = "get_resources"
for custom_addon in addons.chain:
if hasattr(custom_addon, get_resources_fun_name):
addon_resources = getattr(custom_addon, get_resources_fun_name)()
for resource in addon_resources:
route = "/" + resource.addon_path()
app.add_route(route, resource)
if 'apispec' in dir(resource):
resource.apispec(spec)
resources.append(resource)
return resources
def get_app(self):
app = falcon.API()
spec = self.basic_spec(app)
spec.components.schema('MatchCriteria', schema=MatchCriteriaSchema)
spec.components.schema('VerifyResult', schema=VerifyResultSchema)
spec.components.schema('Error', schema=ErrorSchema)
spec.components.schema('Counter', schema=CounterSchema)
self.load_resources_from_addons(app, spec)
self.write_spec(spec)
return app
def get_all_routes(self, app):
routes_list = []
def get_children(node):
if len(node.children):
for child_node in node.children:
get_children(child_node)
else:
routes_list.append((node.uri_template, node.resource))
[get_children(node) for node in app._router._roots]
return routes_list
def start_falcon(self):
app = self.get_app()
print("Routes: ")
print(self.get_all_routes(app))
with make_server('', ctx.options.addons_management_port, app) as httpd:
print('Starting REST API management on port: {}'.format(ctx.options.addons_management_port))
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
httpd.serve_forever()
# https://marshmallow.readthedocs.io/en/stable/quickstart.html
addons = [
HarCaptureAddOn(),
BrowserUpAddonsManagerAddOn()
]
```
#### File: addons/browserup/har_capture_addon.py
```python
import mitmproxy.http
from mitmproxy import ctx
from mitmproxy.addons.browserup.har.har_resources import HarResource, HarPageResource, HarCaptureTypesResource, \
PresentResource, NotPresentResource, SizeResource, \
SLAResource, ErrorResource, CounterResource, \
HealthCheckResource
from mitmproxy.addons.browserup.har.har_manager import HarManagerMixin
from mitmproxy.addons.browserup.har.flow_capture import FlowCaptureMixin
from mitmproxy.addons.browserup.har import flow_har_entry_patch
flow_har_entry_patch.patch_flow() # patch flow object with a har entry method
class HarCaptureAddOn(FlowCaptureMixin, HarManagerMixin):
def load(self, l):
ctx.log.info('Loading HarCaptureAddon')
l.add_option("harcapture", str, "", "HAR capture path.")
def get_resources(self):
return [HarResource(self),
HarPageResource(self),
HarCaptureTypesResource(self),
PresentResource(self),
NotPresentResource(self),
SizeResource(self),
SLAResource(self),
ErrorResource(self),
CounterResource(self),
HealthCheckResource()
]
def websocket_message(self, flow: mitmproxy.http.HTTPFlow):
if 'blocklisted' in flow.metadata:
return
self.capture_websocket_message(flow)
def request(self, flow: mitmproxy.http.HTTPFlow):
if 'blocklisted' in flow.metadata:
return
self.capture_request(flow)
def response(self, flow: mitmproxy.http.HTTPFlow):
if 'blocklisted' in flow.metadata:
ctx.log.debug('Blocklist filtered, return nothing.')
return
self.capture_response(flow)
addons = [
HarCaptureAddOn()
]
```
#### File: addons/browserup/latency_addon.py
```python
from time import sleep
class LatencyResource:
def addon_path(self):
return "latency/{latency}"
def __init__(self, latency_addon):
self.latency_addon = latency_addon
def on_put(self, req, resp, latency):
"""Puts (sets) a value for latency
---
description: Sets a value for latency in milliseconds. Default is 0.
operationId: setAddedLatencyMillis
parameters:
- in: path
name: latency
type: int
required: true
tags:
- BrowserUpProxy
responses:
204:
description: Success!
"""
self.latency_addon.latency_ms = int(latency)
class LatencyAddOn:
def __init__(self):
self.num = 0
self.latency_ms = 0
def get_resources(self):
return [LatencyResource(self)]
def response(self, flow):
if self.latency_ms != 0:
sleep(self.latency_ms / 1000)
addons = [
LatencyAddOn()
]
```
#### File: browserup/har/har_manager.py
```python
from mitmproxy import ctx
from datetime import datetime
from datetime import timezone
from mitmproxy.net.http import cookies
from mitmproxy.addons.browserup.har.har_builder import HarBuilder
from mitmproxy.addons.browserup.har.har_capture_types import HarCaptureTypes
import json
import copy
import tempfile
DEFAULT_PAGE_REF = "Default"
DEFAULT_PAGE_TITLE = "Default"
REQUEST_SUBMITTED_FLAG = "_request_submitted"
class HarManagerMixin():
# Used to manage a single active har
def __init__(self):
self.num = 0
self.har = HarBuilder.har()
self.har_page_count = 0
self.har_capture_types = [
HarCaptureTypes.REQUEST_HEADERS,
HarCaptureTypes.REQUEST_COOKIES,
HarCaptureTypes.REQUEST_CONTENT,
HarCaptureTypes.REQUEST_BINARY_CONTENT,
HarCaptureTypes.RESPONSE_HEADERS,
HarCaptureTypes.RESPONSE_COOKIES,
HarCaptureTypes.RESPONSE_CONTENT,
HarCaptureTypes.RESPONSE_BINARY_CONTENT,
HarCaptureTypes.WEBSOCKET_MESSAGES,
]
self.current_har_page = None
self.http_connect_timings = {}
def create_har_entry(self, flow):
har = self.get_or_create_har(DEFAULT_PAGE_REF, DEFAULT_PAGE_TITLE, True)
entry = HarBuilder.entry()
har['log']['entries'].append(entry)
return entry
def get_har(self, clean_har):
if clean_har:
return self.new_har(DEFAULT_PAGE_REF, DEFAULT_PAGE_TITLE)
return self.har
def get_default_har_page(self):
for hp in self.har['log']['pages']:
if hp['title'] == DEFAULT_PAGE_TITLE:
return hp
return None
def get_or_create_har(self, page_ref, page_title, create_page=False):
if self.har is None:
self.new_har(page_ref, page_title, create_page)
if create_page:
self.get_or_create_default_page()
return self.har
def new_page(self, page_ref, page_title):
ctx.log.info(
'Creating new page with initial page ref: {}, title: {}'.
format(page_ref, page_title))
har = self.get_or_create_har(page_ref, page_title, False)
end_of_page_har = None
if self.current_har_page is not None:
current_page_ref = self.current_har_page['id']
self.end_page()
end_of_page_har = self.copy_har_through_page_ref(har, current_page_ref)
if page_ref is None:
self.har_page_count += 1
page_ref = "Page " + str(self.har_page_count)
if page_title is None:
page_title = page_ref
new_page = HarBuilder.page(title=page_title, id=page_ref)
har['log']['pages'].append(new_page)
self.current_har_page = new_page
return end_of_page_har
def get_current_page_ref(self):
har_page = self.current_har_page
if har_page is None:
har_page = self.get_or_create_default_page()
return har_page['id']
def get_or_create_current_page(self):
har_page = self.current_har_page
if har_page is None:
har_page = self.get_or_create_default_page()
return har_page
def get_or_create_default_page(self):
default_page = self.get_default_page()
if default_page is None:
default_page = self.add_default_page()
return default_page
def add_default_page(self):
self.get_or_create_har(DEFAULT_PAGE_REF, DEFAULT_PAGE_TITLE, False)
new_page = HarBuilder.page(title=DEFAULT_PAGE_REF,id=DEFAULT_PAGE_REF)
self.har['log']['pages'].append(new_page)
return new_page
def get_default_page(self):
for p in self.har['log']['pages']:
if p['id'] == DEFAULT_PAGE_REF:
return p
return None
def new_har(self, initial_page_ref=DEFAULT_PAGE_REF, initial_page_title=DEFAULT_PAGE_TITLE, create_page=False):
if create_page:
ctx.log.info(
'Creating new har with initial page ref: {}, title: {}'.
format(initial_page_ref, initial_page_title))
else:
ctx.log.info('Creating new har without initial page')
old_har = self.end_har()
self.har_page_count = 0
self.har = HarBuilder.har()
if create_page:
self.new_page(initial_page_ref, initial_page_title)
self.copy_entries_without_response(old_har)
return old_har
def add_verification_to_har(self, verification_name, verification_type, result):
page = self.get_or_create_current_page()
page.setdefault('_verifications', {}).setdefault(verification_name, { "type": verification_type, "passed": result })
def end_har(self):
ctx.log.info('Ending current har...')
old_har = self.har
if old_har is None: return
self.end_page()
self.har = None
return old_har
def copy_entries_without_response(self, old_har):
if old_har is not None:
for entry in old_har['log']['entries']:
if not self.har_entry_has_response(entry):
self.har['log']['entries'].append(entry)
def add_har_page(self, pageRef, pageTitle):
ctx.log.debug('Adding har page with ref: {} and title: {}'.format(pageRef, pageTitle))
har_page = HarBuilder.page(id=pageRef, title=pageTitle)
self.har['log']['pages'].append(har_page)
return har_page
def end_page(self):
ctx.log.info('Ending current page...')
previous_har_page = self.current_har_page
self.current_har_page = None
if previous_har_page is None:
return
def is_har_entry_submitted(self, har_entry):
return REQUEST_SUBMITTED_FLAG in har_entry
def har_entry_has_response(self, har_entry):
return bool(har_entry['response'])
def har_entry_clear_request(self, har_entry):
har_entry['request'] = {}
def filter_har_for_report(self, har):
if har is None:
return har
har_copy = copy.deepcopy(har)
entries_to_report = []
for entry in har_copy['log']['entries']:
if self.is_har_entry_submitted(entry):
if self.har_entry_has_response(entry):
del entry[REQUEST_SUBMITTED_FLAG]
self.har_entry_clear_request(entry)
entries_to_report.append(entry)
else:
entries_to_report.append(entry)
har_copy['log']['entries'] = entries_to_report
return har_copy
def mark_har_entries_submitted(self, har):
if har is not None:
for entry in har['log']['entries']:
entry[REQUEST_SUBMITTED_FLAG] = True
def copy_har_through_page_ref(self, har, page_ref):
if har is None:
return None
if har['log'] is None:
return HarBuilder.har()
page_refs_to_copy = []
for page in har['log']['pages']:
page_refs_to_copy.append(page['id'])
if page_ref == page['id']:
break
log_copy = HarBuilder.log()
for entry in har['log']['entries']:
if entry['pageref'] in page_refs_to_copy:
log_copy['entries'].append(entry)
for page in har['log']['pages']:
if page['id'] in page_refs_to_copy:
log_copy['pages'].append(page)
har_copy = HarBuilder.har()
har_copy['log'] = log_copy
return har_copy
def format_cookies(self, cookie_list):
rv = []
for name, value, attrs in cookie_list:
cookie_har = {
"name": name,
"value": value,
}
# HAR only needs some attributes
for key in ["path", "domain", "comment"]:
if key in attrs:
cookie_har[key] = attrs[key]
# These keys need to be boolean!
for key in ["httpOnly", "secure"]:
cookie_har[key] = bool(key in attrs)
# Expiration time needs to be formatted
expire_ts = cookies.get_expiration_ts(attrs)
if expire_ts is not None:
cookie_har["expires"] = datetime.fromtimestamp(expire_ts, timezone.utc).isoformat()
rv.append(cookie_har)
return rv
def save_har(self, full_path):
json_dump: str = json.dumps(self.har, indent=2)
with open(full_path, "wb") as file:
raw: bytes = json_dump.encode()
file.write(raw)
file.flush()
file.close()
def format_request_cookies(self, fields):
return self.format_cookies(cookies.group_cookies(fields))
def format_response_cookies(self, fields):
return self.format_cookies((c[0], c[1][0], c[1][1]) for c in fields)
def name_value(self, obj):
"""
Convert (key, value) pairs to HAR format.
"""
return [{"name": k, "value": v} for k, v in obj.items()]
```
#### File: addons/browserup/perfdata_script_addon.py
```python
from netlib.http import decoded
from bs4 import BeautifulSoup
class InjectPerformanceTimingScriptAddOn:
def __init__(self):
file_name = "../"
with open(file_name) as f:
self.script = f.read().replace('{{URL}}', url)
def response(self, ctx, flow):
url = "http://{}:{}".format(
ctx.options.listen_host or "127.0.0.1",
ctx.options.listen_port
)
url = 'http://localhost8088:/proxy/har/custom'.format( ctx.options.listen_port,
ctx.options.listen_host)
if flow.request.host in ctx.script:
return # Make sure JS isn't injected to itself
with decoded(flow.response):
html = BeautifulSoup(flow.response.content)
if html.body and ("text/html" in flow.response.headers["content-type"]):
script = html.new_tag("script", type="application/javascript")
script.insert(0, self.script)
html.body.insert(1, script)
flow.response.content = str(html)
print("Injected Perf Timings Script")
```
#### File: 580/mitmproxy/test.py
```python
import subprocess
import time
import urllib
import urllib.request
def main():
proc = subprocess.Popen(['python3', '-u', '-m', 'http.server', '8070'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
try:
time.sleep(0.2)
resp = urllib.request.urlopen('http://localhost:8070')
assert b'Directory listing' in resp.read()
finally:
proc.terminate()
try:
outs, _ = proc.communicate(timeout=0.2)
print('== subprocess exited with rc =', proc.returncode)
print(outs.decode('utf-8'))
except subprocess.TimeoutExpired:
print('subprocess did not terminate in time')
if __name__ == "__main__":
main()
``` |
{
"source": "582911658/learn_python",
"score": 3
} |
#### File: learn_python/alien_invasion/alien_invasion.py
```python
import sys
import pygame
from settings import Settings
from ship import Ship
def run_game():
# 初始化游戏并创建一个屏幕对象
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption('第一个python游戏')
# 创建第一艘飞船
ship = Ship(screen)
# 开始游戏主循环
while True:
# 监视键盘鼠标事件
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# 每次循环都重新绘制屏幕
screen.fill(ai_settings.bg_color)
ship.blitme()
# 让最近绘制的屏幕可见
pygame.display.flip()
run_game()
``` |
{
"source": "582BandOfBrothers/Final",
"score": 3
} |
#### File: Final/titanic/Titanic.py
```python
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
import pprint
def clean_data(df, drop_passenger_id):
pp = pprint.PrettyPrinter(indent=4)
# Get the unique values of Sex
sexes = sorted(df['Sex'].unique())
# Generate a mapping of Sex from a string to a number representation
# Fill in missing values of Embarked
# Since the vast majority of passengers embarked in 'S': 3,
# we assign the missing vavlues in Embarked to 'S':
genders_mapping = dict(zip(sexes, range(0, len(sexes) + 1)))
# Transform Sex from a string to a number representation
df['Sex_Val'] = df['Sex'].map(genders_mapping).astype(int)
# Replace nan with S
df = df.replace({'Embarked': { np.nan: 'S' }});
# Get the unique values of Embarked
embarked_locs = sorted(df['Embarked'].unique())
# Generate a mapping of Embarked from a string to a number representation
embarked_locs_mapping = dict(zip(embarked_locs,
range(0, len(embarked_locs) + 1)))
# Transform Embarked from a string to dummy variables
df = pd.concat([df, pd.get_dummies(df['Embarked'], prefix='Embarked_Val')], axis=1)
# Fill in missing values of Fare with the average Fare
if len(df[df['Fare'].isnull()] > 0):
avg_fare = df['Fare'].mean()
df.replace({ None: avg_fare }, inplace=True)
# To keep Age in tact, make a copy of it called AgeFill
# that we will use to fill in the missing ages:
df['AgeFill'] = df['Age']
# Determine the Age typical for each passenger class by Sex_Val.
# We'll use the median instead of the mean because the Age
# histogram seems to be right skewed.
df['AgeFill'] = df['AgeFill'] \
.groupby([df['Sex_Val'], df['Pclass']]) \
.apply(lambda x: x.fillna(x.median()))
# Define a new feature FamilySize that is the sum of
# Parch (number of parents or children on board) and
# SibSp (number of siblings or spouses):
df['FamilySize'] = df['SibSp'] + df['Parch']
# Drop the columns we won't use:
df = df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1)
# Drop the Age column since we will be using the AgeFill column instead.
# Drop the SibSp and Parch columns since we will be using FamilySize.
# Drop the PassengerId column since it won't be used as a feature.
df = df.drop(['Age', 'SibSp', 'Parch'], axis=1)
if drop_passenger_id:
df = df.drop(['PassengerId'], axis=1)
return df
df_train = pd.read_csv('./data/train.csv')
df_train = clean_data(df_train, drop_passenger_id=True)
train_data = df_train.values
# print(df_train)
clf = RandomForestClassifier(n_estimators=100)
# Training data features, skip the first column 'Survived'
train_features = train_data[:, 1:]
# 'Survived' column values
train_target = train_data[:, 0]
# Fit the model to our training data
clf = clf.fit(train_features, train_target)
score = clf.score(train_features, train_target)
"Mean accuracy of Random Forest: {0}".format(score)
df_test = pd.read_csv('./data/test.csv')
# Data wrangle the test set and convert it to a numpy array
df_test = clean_data(df_test, drop_passenger_id=False)
test_data = df_test.values
# Get the test data features, skipping the first column 'PassengerId'
test_x = test_data[:, 1:]
# Predict the Survival values for the test data
test_y = clf.predict(test_x)
df_test['Survived'] = test_y.astype(int)
df_test[['PassengerId', 'Survived']].to_csv('./data/result.csv', index=False)
``` |
{
"source": "584807419/FreeProxyPool",
"score": 3
} |
#### File: crawlers/free_proxy_website/1xila.py
```python
import re
from proxypool.schemas.proxy import Proxy
from proxypool.crawlers.base import BaseCrawler
class xilaCrawler(BaseCrawler):
urls = ['http://www.xiladaili.com/http/', 'http://www.xiladaili.com/http/2/', 'http://www.xiladaili.com/http/3/',
'http://www.xiladaili.com/http/4/', 'http://www.xiladaili.com/http/5/', 'http://www.xiladaili.com/http/6/']
def parse(self, html_content):
ips = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}", html_content)
for i in ips:
ip_temp = i.split(':')
host = ip_temp[0]
port = ip_temp[1]
yield Proxy(host=host, port=port)
if __name__ == '__main__':
crawler = xilaCrawler()
for proxy in crawler.crawl():
print(proxy)
```
#### File: crawlers/free_proxy_website/89ip.py
```python
from lxml import html
from proxypool.schemas.proxy import Proxy
from proxypool.crawlers.base import BaseCrawler
BASE_URL = 'http://www.89ip.cn/tqdl.html?api=1&num=100&port=&address=&isp='
class Ip89Crawler(BaseCrawler):
urls = [BASE_URL]
def parse(self, html_content):
doc = html.fromstring(html_content)
node = doc.xpath('//text()')
for i in node:
ii = i.strip().replace('\n', '')
if ii and len(ii) < 23:
tr_list = ii.split(':')
host = tr_list[0]
port = tr_list[1]
yield Proxy(host=host, port=port)
if __name__ == '__main__':
crawler = Ip89Crawler()
for proxy in crawler.crawl():
print(proxy)
```
#### File: crawlers/free_proxy_website/jiangxianli.py
```python
import json
from proxypool.schemas.proxy import Proxy
from proxypool.crawlers.base import BaseCrawler
BASE_URL = 'https://ip.jiangxianli.com/api/proxy_ips'
class jiangxianliCrawler(BaseCrawler):
urls = [BASE_URL]
def parse(self, html_content):
i_list = json.loads(html_content).get('data').get('data')
for i in i_list:
if i and len(i) < 23:
host = i.get('ip')
port = i.get('port')
yield Proxy(host=host, port=port)
if __name__ == '__main__':
crawler = jiangxianliCrawler()
for proxy in crawler.crawl():
print(proxy)
```
#### File: crawlers/free_proxy_website/kuaidaili.py
```python
from proxypool.crawlers.base import BaseCrawler
from proxypool.schemas.proxy import Proxy
class KuaidailiCrawler(BaseCrawler):
# urls = ['http://ent.kdlapi.com/api/getproxy/?orderid=950753676261960&num=100&protocol=2&method=2&an_an=1&an_ha=1&sp1=1&quality=2&sort=2&sep=1']
urls = ['http://ent.kdlapi.com/api/getproxy/?orderid=950753676261960&num=200&sort=2&sep=1']
def parse(self, html):
proxies_list = html.split("\r\n")
for item in proxies_list:
td_ip = item.split(':')[0]
td_port = item.split(':')[1]
yield Proxy(host=td_ip, port=td_port)
if __name__ == '__main__':
crawler = KuaidailiCrawler()
for proxy in crawler.crawl():
print(proxy)
```
#### File: proxypool/processors/getter.py
```python
from loguru import logger
from proxypool.storages.redis import RedisClient
from proxypool.setting import PROXY_NUMBER_MAX
from proxypool.crawlers import __all__ as crawlers_cls
class Getter(object):
"""
getter of proxypool
"""
def __init__(self):
"""
init db and crawlers
"""
self.redis = RedisClient()
self.crawlers_cls = crawlers_cls
self.crawlers = [crawler_cls() for crawler_cls in self.crawlers_cls]
def is_full(self):
"""
if proxypool if full
return: bool
"""
return self.redis.count() >= PROXY_NUMBER_MAX
@logger.catch
def run(self):
"""
run crawlers to get proxy
:return:
"""
if self.is_full():
return
for crawler in self.crawlers:
for proxy in crawler.crawl(crawler.data, crawler.headers, crawler.cookies):
self.redis.add(proxy)
if __name__ == '__main__':
getter = Getter()
getter.run()
``` |
{
"source": "58563528/coolqqbot",
"score": 2
} |
#### File: plugins/ban/__init__.py
```python
from enum import Enum
from typing import Dict, Optional
from nonebot import on_command, on_notice
from nonebot.adapters import Bot, Event
from nonebot.adapters.cqhttp import (GroupAdminNoticeEvent, GroupMessageEvent,
MessageSegment, PrivateMessageEvent)
from nonebot.typing import T_State
from src.utils.helpers import render_expression
#region 禁言
EXPR_OK = (
'好的,满足你!',
'已禁言。',
'{duration} 分钟后见~'
) # yapf: disable
EXPR_NEED_HELP = (
'{at_user} 你是管理员,我没法禁言你,不过我可以帮你 {at_owner}~',
'有人想被禁言 {duration} 分钟,群主 {at_owner},快满足 {at_user}~',
'群主群主 {at_owner},快禁言 {at_user} {duration} 分钟'
) # yapf: disable
EXPR_OWNER = (
'你是群主,你开心就好。',
'群主别闹了!',
'没人能禁言你的!请不要再找我了!'
) # yapf: disable
class BanType(Enum):
""" 禁言的种类 """
OWNER = '禁言对象是群主'
NEED_HELP = '需要群主帮忙禁言'
OK = '可以直接禁言'
def get_ban_type(bot_role: str, sender_role: str) -> BanType:
""" 计算禁言的种类 """
if sender_role == 'owner':
return BanType.OWNER
if bot_role == 'member':
return BanType.NEED_HELP
if bot_role == 'admin' and sender_role == 'admin':
return BanType.NEED_HELP
return BanType.OK
ban_cmd = on_command('ban', aliases={'禁言'}, block=True)
ban_cmd.__doc__ = """
ban 禁言
自主禁言
禁言自己,单位为分钟
/ban 30 (禁言 30 分钟)
解除禁言
/ban 0
如果私聊,则需要再提供群号
"""
@ban_cmd.args_parser
async def ban_args_parser(bot: Bot, event: Event, state: T_State):
""" 处理参数,转换成数字 """
args = str(event.get_message()).strip()
# 检查输入参数是不是数字
if args.isdigit():
state[state['_current_key']] = int(args)
else:
await ban_cmd.reject('请只输入数字,不然我没法理解呢!')
@ban_cmd.handle()
async def ban_handle_first_receive(bot: Bot, event: Event, state: T_State):
""" 获取需要的参数 """
# 如果没有获取机器人在群中的职位,则获取
if not _bot_role:
await refresh_bot_role(bot)
args = str(event.get_message()).strip()
if not args:
return
# 检查输入参数是不是数字
if args.isdigit():
state['duration'] = int(args)
else:
await ban_cmd.finish('参数必须仅为数字')
@ban_cmd.got('duration', prompt='你想被禁言多少分钟呢?')
async def ban_handle_group_message(bot: Bot, event: GroupMessageEvent,
state: T_State):
""" 如果在群里发送,则在当前群禁言/解除 """
group_id = event.group_id
user_id = event.user_id
duration = state['duration']
duration_sec = duration * 60
bot_role = _bot_role[group_id]
sender_role = event.sender.role
if not sender_role:
return
ban_type = get_ban_type(bot_role, sender_role)
if ban_type == BanType.OWNER:
await ban_cmd.finish(render_expression(EXPR_OWNER), at_sender=True)
elif ban_type == BanType.NEED_HELP:
owner_id = await get_owner_id(group_id, bot)
if not owner_id:
raise Exception('无法获取群主QQ号')
await ban_cmd.finish(
render_expression(EXPR_NEED_HELP,
duration=duration,
at_owner=MessageSegment.at(owner_id),
at_user=MessageSegment.at(user_id)))
else:
await bot.set_group_ban(group_id=group_id,
user_id=user_id,
duration=duration_sec)
await ban_cmd.finish(render_expression(EXPR_OK, duration=duration),
at_sender=True)
@ban_cmd.got('duration', prompt='你想被禁言多少分钟呢?')
@ban_cmd.got('group_id', prompt='请问你想针对哪个群?')
async def ban_handle_private_message(bot: Bot, event: PrivateMessageEvent,
state: T_State):
""" 如果私聊的话,则向用户请求群号,并仅在支持的群禁言/解除 """
group_id = state['group_id']
user_id = event.user_id
duration = state['duration']
duration_sec = duration * 60
if group_id not in _bot_role:
await ban_cmd.finish('抱歉,我不在那个群里,帮不了你 >_<')
bot_role = _bot_role[group_id]
sender_role = await get_user_role_in_group(user_id, group_id, bot)
ban_type = get_ban_type(bot_role, sender_role)
if ban_type == BanType.OWNER:
await ban_cmd.finish(render_expression(EXPR_OWNER))
elif ban_type == BanType.NEED_HELP:
owner_id = await get_owner_id(group_id, bot)
if not owner_id:
raise Exception('无法获取群主QQ号')
await bot.send_group_msg(group_id=group_id,
message=render_expression(
EXPR_NEED_HELP,
duration=duration,
at_owner=MessageSegment.at(owner_id),
at_user=MessageSegment.at(user_id)))
await ban_cmd.finish('帮你@群主了,请耐心等待。')
else:
await bot.set_group_ban(group_id=group_id,
user_id=user_id,
duration=duration_sec)
await ban_cmd.finish(render_expression(EXPR_OK, duration=duration))
async def get_owner_id(group_id: int, bot: Bot) -> Optional[int]:
""" 获取群主 QQ 号 """
group_member_list = await bot.get_group_member_list(group_id=group_id)
for member in group_member_list:
if member['role'] == 'owner':
return member['user_id']
async def get_user_role_in_group(user_id: int, group_id: int, bot: Bot) -> str:
""" 获取用户在群内的身份 """
group_member_info = await bot.get_group_member_info(user_id=user_id,
group_id=group_id)
return group_member_info['role']
#endregion
#region 机器人是否为管理员
_bot_role: Dict[int, str] = {}
async def refresh_bot_role(bot: Bot) -> None:
""" 更新机器人在群内的身份 """
group_list = await bot.get_group_list()
for group in group_list:
member_info = await bot.get_group_member_info(
group_id=group['group_id'], user_id=bot.self_id)
_bot_role[group['group_id']] = member_info['role']
admin_notice = on_notice()
@admin_notice.handle()
async def admin_handle(bot: Bot, event: GroupAdminNoticeEvent, state: T_State):
""" 群内管理员发生变化时,更新机器人在群内的身份 """
if bot.self_id == event.self_id:
if event.sub_type == 'set':
_bot_role[event.group_id] = 'admin'
elif event.sub_type == 'unset':
_bot_role[event.group_id] = 'member'
#endregion
```
#### File: plugins/ff14/config.py
```python
from typing import List
from nonebot import get_driver
from pydantic import BaseSettings, validator
from dateutil.parser import parse
from datetime import date, datetime
from src.utils.helpers import groupidtostr, strtobool, strtogroupid
from src.utils.plugin import PluginData
DATA = PluginData('ff14')
class Config(BaseSettings):
# 新闻推送相关配置
# 自动推送新闻的间隔,单位 分钟
push_news_interval: int = int(
DATA.config.get('ff14', 'push_news_interval', '30'))
# 上次推送新闻的发布时间
push_news_last_news_date: datetime = parse(
DATA.config.get('ff14', 'push_news_last_news_date', '2000-01-01'))
# 启用新闻推送的群
push_news_group_id: List[int] = strtogroupid(
DATA.config.get('ff14', 'push_news_group_id'))
@validator('push_news_last_news_date', always=True)
def push_news_last_news_date_validator(cls, v: datetime):
""" 验证并保存配置 """
DATA.config.set('ff14', 'push_news_last_news_date', v.isoformat())
return v
@validator('push_news_group_id', always=True)
def push_news_group_id_validator(cls, v: List[int]):
""" 验证并保存配置 """
DATA.config.set('ff14', 'push_news_group_id', groupidtostr(v))
return v
# FFLogs 相关配置
fflogs_token: str = DATA.config.get('fflogs', 'token')
# 默认从两周的数据中计算排名百分比
fflogs_range: int = int(DATA.config.get('fflogs', 'range', '14'))
# 是否开启定时缓存
fflogs_cache: bool = strtobool(
DATA.config.get('fflogs', 'cache_enable', '0'))
# 缓存的时间
fflogs_cache_hour: int = int(
DATA.config.get('fflogs', 'cache_hour', fallback='4'))
fflogs_cache_minute: int = int(
DATA.config.get('fflogs', 'cache_minute', fallback='30'))
fflogs_cache_second: int = int(
DATA.config.get('fflogs', 'cache_second', fallback='0'))
# 需要缓存的副本
fflogs_cache_boss: List[str] = DATA.config.get('fflogs',
'cache_boss').split()
@validator('fflogs_token', always=True)
def fflogs_token_validator(cls, v):
""" 验证并保存配置 """
DATA.config.set('fflogs', 'token', v)
return v
@validator('fflogs_cache', always=True)
def fflogs_cache_validator(cls, v):
""" 验证并保存配置 """
if v:
DATA.config.set('fflogs', 'cache_enable', '1')
else:
DATA.config.set('fflogs', 'cache_enable', '0')
return v
class Config:
extra = 'ignore'
validate_assignment = True
global_config = get_driver().config
plugin_config = Config(**global_config.dict())
```
#### File: plugins/ff14/fflogs_api.py
```python
import asyncio
import json
import math
from datetime import datetime, timedelta
from typing import List, Literal
import httpx
from nonebot import logger, require
from .config import DATA, plugin_config
from .fflogs_data import (get_boss_info_by_nickname, get_job_info_by_nickname,
get_jobs_info)
scheduler = require("nonebot_plugin_apscheduler").scheduler
class DataException(Exception):
""" 数据异常 """
pass
class ParameterException(Exception):
""" 参数异常 """
pass
class AuthException(Exception):
""" 认证异常 """
pass
class FFLogs:
def __init__(self):
self.base_url = 'https://cn.fflogs.com/v1'
# 定时缓存任务
self._cache_job = None
# 根据配置启动
if plugin_config.fflogs_cache:
self.enable_cache()
# QQ号 与 最终幻想14 角色用户名,服务器的对应关系
if DATA.exists('characters.pkl'):
self.characters = DATA.load_pkl('characters')
else:
self.characters = {}
def enable_cache(self) -> None:
""" 开启定时缓存任务 """
self._cache_job = scheduler.add_job(
self.cache_data,
'cron',
hour=plugin_config.fflogs_cache_hour,
minute=plugin_config.fflogs_cache_minute,
second=plugin_config.fflogs_cache_second,
id='fflogs_cache')
plugin_config.fflogs_cache = True
logger.info(
f'开启定时缓存,执行时间为每天 {plugin_config.fflogs_cache_hour}:{plugin_config.fflogs_cache_minute}:{plugin_config.fflogs_cache_second}'
)
def disable_cache(self) -> None:
""" 关闭定时缓存任务 """
self._cache_job.remove()
self._cache_job = None
plugin_config.fflogs_cache = False
logger.info('定时缓存已关闭')
@property
def is_cache_enabled(self) -> bool:
""" 是否启用定时缓存 """
if self._cache_job:
return True
else:
return False
async def cache_data(self) -> None:
""" 缓存数据 """
jobs = await get_jobs_info()
for boss in plugin_config.fflogs_cache_boss:
for job in jobs:
await self.dps(boss, job.name)
logger.info(f'{boss} {job.name}的数据缓存完成。')
await asyncio.sleep(30)
@staticmethod
async def _http(url):
try:
# 使用 httpx 库发送最终的请求
async with httpx.AsyncClient() as client:
resp = await client.get(url)
if resp.status_code == 401:
raise AuthException('Token 有误,无法获取数据')
if resp.status_code == 400:
raise ParameterException('参数有误,无法获取数据')
if resp.status_code != 200:
# 如果 HTTP 响应状态码不是 200,说明调用失败
return None
return json.loads(resp.text)
except (httpx.HTTPError, json.JSONDecodeError, KeyError):
# 抛出上面任何异常,说明调用失败
return None
async def _get_one_day_ranking(self, boss: int, difficulty: int, job: int,
date: datetime) -> List:
""" 获取指定 boss,指定职业,指定一天中的排名数据
"""
# 查看是否有缓存
cache_name = f'{boss}_{difficulty}_{job}_{date.strftime("%Y%m%d")}'
if DATA.exists(f'{cache_name}.pkl'):
return DATA.load_pkl(cache_name)
page = 1
hasMorePages = True
rankings = []
end_date = date + timedelta(days=1)
# 转换成 API 支持的时间戳格式
start_timestamp = int(date.timestamp()) * 1000
end_timestamp = int(end_date.timestamp()) * 1000
# API 只支持获取 50 页以内的数据
while hasMorePages and page < 51:
rankings_url = f'{self.base_url}/rankings/encounter/{boss}?metric=rdps&difficulty={difficulty}&spec={job}&page={page}&filter=date.{start_timestamp}.{end_timestamp}&api_key={plugin_config.fflogs_token}'
res = await self._http(rankings_url)
if not res:
raise DataException('服务器没有正确返回数据')
hasMorePages = res['hasMorePages']
rankings += res['rankings']
page += 1
# 如果获取数据的日期不是当天,则缓存数据
# 因为今天的数据可能还会增加,不能先缓存
if end_date < datetime.now():
DATA.save_pkl(rankings, cache_name)
return rankings
async def _get_whole_ranking(self, boss: int, difficulty: int, job: int,
dps_type: Literal['rdps', 'adps', 'pdps'],
date: datetime) -> List:
date = datetime(year=date.year, month=date.month, day=date.day)
rankings = []
for _ in range(plugin_config.fflogs_range):
rankings += await self._get_one_day_ranking(
boss, difficulty, job, date)
date -= timedelta(days=1)
# 根据 DPS 类型进行排序,并提取数据
if dps_type == 'rdps':
rankings.sort(key=lambda x: x['total'], reverse=True)
rankings = [i['total'] for i in rankings]
if dps_type == 'adps':
rankings.sort(key=lambda x: x['otherAmount'], reverse=True)
rankings = [i['otherAmount'] for i in rankings]
if dps_type == 'pdps':
rankings.sort(key=lambda x: x['rawDPS'], reverse=True)
rankings = [i['rawDPS'] for i in rankings]
if not rankings:
raise DataException('网站里没有数据')
return rankings
async def _get_character_ranking(self, characterName: str, serverName: str,
zone: int, encounter: int,
difficulty: int,
metric: Literal['rdps', 'adps', 'pdps']):
""" 查询指定角色的 DPS
返回列表
"""
url = f'https://cn.fflogs.com/v1/rankings/character/{characterName}/{serverName}/CN?zone={zone}&encounter={encounter}&metric={metric}&api_key={plugin_config.fflogs_token}'
res = await self._http(url)
if not res and isinstance(res, list):
raise DataException('网站里没有数据')
if not res:
raise DataException('获取数据失败')
if 'hidden' in res:
raise DataException('角色数据被隐藏')
# 提取所需的数据
# 零式副本的难度是 101,普通的则是 100
# 极神也是 100
if difficulty == 0:
ranking = [i for i in res if i['difficulty'] == 101]
else:
ranking = [i for i in res if i['difficulty'] == 100]
if not ranking:
raise DataException('网站里没有数据')
return ranking
async def zones(self):
""" 副本 """
url = f'{self.base_url}/zones?api_key={plugin_config.fflogs_token}'
data = await self._http(url)
return data
async def classes(self):
""" 职业 """
url = f'{self.base_url}/classes?api_key={plugin_config.fflogs_token}'
data = await self._http(url)
return data
async def dps(self,
boss_nickname: str,
job_nickname: str,
dps_type: Literal['rdps', 'adps', 'pdps'] = 'rdps') -> str:
""" 查询 DPS 百分比排名
:param boss_nickname: BOSS 的称呼
:param job_nickname: 职业的称呼
:param dps_type: DPS 的种类,支持 rdps, adps, pdps (Default value = 'rdps')
"""
boss = await get_boss_info_by_nickname(boss_nickname)
if not boss:
return f'找不到 {boss_nickname} 的数据,请换个名字试试'
job = await get_job_info_by_nickname(job_nickname)
if not job:
return f'找不到 {job_nickname} 的数据,请换个名字试试'
if dps_type not in ['adps', 'rdps', 'pdps']:
return f'找不到类型为 {dps_type} 的数据,只支持 adps rdps pdps'
# 排名从前一天开始排,因为今天的数据并不全
date = datetime.now() - timedelta(days=1)
try:
rankings = await self._get_whole_ranking(boss.encounter,
boss.difficulty, job.spec,
dps_type, date)
except DataException as e:
return f'{e},请稍后再试'
reply = f'{boss.name} {job.name} 的数据({dps_type})'
total = len(rankings)
reply += f'\n数据总数:{total} 条'
# 计算百分比的 DPS
percentage_list = [100, 99, 95, 75, 50, 25, 10]
for perc in percentage_list:
number = math.floor(total * 0.01 * (100 - perc))
dps_value = float(rankings[number])
reply += f'\n{perc}% : {dps_value:.2f}'
return reply
def set_character(self, user_id: int, character_name: str,
server_name: str) -> None:
""" 设置 QQ号 与 最终幻想14 用户名和服务器名 """
self.characters[user_id] = [character_name, server_name]
DATA.save_pkl(self.characters, 'characters')
async def character_dps(
self,
boss_nickname: str,
character_name: str,
server_name: str,
dps_type: Literal['rdps', 'adps', 'pdps'] = 'rdps') -> str:
""" 查询指定角色在某个副本的 DPS
:param boss_nickname: BOSS 的称呼
:param character_name: 角色名
:param server_name: 服务器名
:param dps_type: DPS 的种类,支持 rdps, adps (Default value = 'rdps')
"""
boss = await get_boss_info_by_nickname(boss_nickname)
if not boss:
return f'找不到 {boss_nickname} 的数据,请换个名字试试'
reply = f'{boss.name} {character_name}-{server_name} 的排名({dps_type})'
try:
ranking = await self._get_character_ranking(
character_name, server_name, boss.zone, boss.encounter,
boss.difficulty, dps_type)
except DataException as e:
return f'{e},请稍后再试'
except ParameterException:
return '角色名或者服务器名有误,无法获取数据。'
for i in ranking:
reply += f'\n{i["spec"]} {i["percentile"]:.2f}% {i["total"]:.2f}'
return reply
fflogs = FFLogs()
```
#### File: plugins/music/__init__.py
```python
from nonebot import on_command
from nonebot.adapters import Bot, Event
from nonebot.adapters.cqhttp.event import MessageEvent
from nonebot.typing import T_State
from src.utils.helpers import render_expression
from .netease import call_netease_api
# 无法获取歌曲时的回答
EXPR_NOT_FOUND = (
'为什么找不到匹配的歌呢!',
'似乎哪里出错了,找不到你想点的歌 ~><~',
'没有找到,要不要换个关键字试试?'
) # yapf: disable
music_cmd = on_command('music', aliases={'点歌'}, block=True)
music_cmd.__doc__ = """
music 点歌
点歌
参数为歌曲相关信息
/music Sagitta luminis
如果仅凭歌曲名称无法获得正确歌曲时
可以尝试在后面加上歌手名称或其他信息
/music Sagitta luminis 梶浦由記
"""
@music_cmd.handle()
async def music_handle_first_receive(bot: Bot, event: MessageEvent,
state: T_State):
args = str(event.message).strip()
if args:
state['name'] = args
@music_cmd.got('name', prompt='你想听哪首歌呢?')
async def music_handle(bot: Bot, event: MessageEvent, state: T_State):
music_message = await call_netease_api(state['name'])
if music_message:
await music_cmd.finish(music_message)
else:
await music_cmd.finish(render_expression(EXPR_NOT_FOUND),
at_sender=True)
@music_cmd.args_parser
async def music_args_parser(bot: Bot, event: Event, state: T_State):
args = str(event.get_message()).strip()
if not args:
await music_cmd.reject('歌曲名不能为空呢,请重新输入!')
state[state['_current_key']] = args
```
#### File: plugins/robot/tencent.py
```python
import hashlib
import json
import random
import string
import time
from typing import Optional
from urllib import parse
import httpx
from nonebot.adapters.cqhttp import MessageEvent
from .config import plugin_config
async def call_tencent_api(event: MessageEvent, text: str) -> Optional[str]:
""" 调用腾讯机器人的 API 获取回复 """
if not plugin_config.tencent_ai_app_key:
return None
if not text:
return None
url = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_textchat'
# 构造请求数据
payload = {
'app_id':
int(plugin_config.tencent_ai_app_id),
'time_stamp':
int(time.time()),
'nonce_str':
''.join(random.sample(string.ascii_letters + string.digits, 32)),
'session':
event.get_session_id(),
'question':
text
}
# 接口鉴权 签名
payload['sign'] = gen_sign_string(payload,
plugin_config.tencent_ai_app_key)
try:
# 使用 httpx 库发送最终的请求
async with httpx.AsyncClient() as client:
resp = await client.get(url, params=payload)
if resp.status_code != 200:
# 如果 HTTP 响应状态码不是 200,说明调用失败
return None
resp_payload = json.loads(resp.text)
if resp_payload['ret'] != 0:
# 返回非 0 表示出错
return None
return resp_payload['data']['answer']
except (httpx.HTTPError, json.JSONDecodeError, KeyError):
# 抛出上面任何异常,说明调用失败
return None
def gen_sign_string(parser, app_key: str) -> str:
""" 获取请求签名,接口鉴权 https://ai.qq.com/doc/auth.shtml
1.将 <key, value> 请求参数对按 key 进行字典升序排序,得到有序的参数对列表 N
2.将列表 N 中的参数对按 URL 键值对的格式拼接成字符串,得到字符串 T(如:key1=value1&key2=value2),
URL 键值拼接过程 value 部分需要 URL 编码,URL 编码算法用大写字母,例如 %E8,而不是小写 %e8
3.将应用密钥以 app_key 为键名,组成 URL 键值拼接到字符串 T 末尾,得到字符串 S(如:key1=value1&key2=value2&app_key = 密钥)
4.对字符串 S 进行 MD5 运算,将得到的 MD5 值所有字符转换成大写,得到接口请求签名
:param parser: dect
:param app_key: str
:return: str,签名
"""
params = sorted(parser.items())
uri_str = parse.urlencode(params, encoding='UTF-8')
sign_str = '{}&app_key={}'.format(uri_str, app_key)
# print('sign =', sign_str.strip())
hash_md5 = hashlib.md5(sign_str.encode('UTF-8'))
return hash_md5.hexdigest().upper()
```
#### File: plugins/roll/__init__.py
```python
import re
from nonebot import on_command
from nonebot.adapters import Bot, Event
from nonebot.typing import T_State
from nonebot.adapters.cqhttp import MessageEvent
from .rand import get_rand
from .data import roll_dices
#region roll
roll_cmd = on_command('roll', block=True)
roll_cmd.__doc__ = """
roll
NGA 风格 ROLL 点
roll 一次点数100
/roll d100
roll 两次点数100和两次点数50
/roll 2d100+2d50
"""
@roll_cmd.handle()
async def roll_handle_first_receive(bot: Bot, event: MessageEvent,
state: T_State):
args = str(event.message).strip()
# 检查是否符合规则
match = re.match(r'^([\dd+\s]+?)$', args)
if args and match:
state['input'] = args
@roll_cmd.args_parser
async def roll_args_parser(bot: Bot, event: Event, state: T_State):
args = str(event.get_message()).strip()
# 检查是否符合规则
match = re.match(r'^([\dd+\s]+?)$', args)
if not args:
await roll_cmd.reject('ROLL 点方式不能为空呢,请重新输入')
if not match:
await roll_cmd.reject('请输入正确的参数 ~>_<~')
state[state['_current_key']] = args
@roll_cmd.got(
'input',
prompt='欢迎使用 NGA 风格 ROLL 点插件\n请问你想怎么 ROLL 点\n你可以输入 d100\n也可以输入 2d100+2d50')
async def roll_handle(bot: Bot, event: MessageEvent, state: T_State):
input_str = state['input']
str_data = roll_dices(input_str)
await roll_cmd.finish(str_data, at_sender=True)
#endregion
#region rand
rand_cmd = on_command('rand', block=True)
rand_cmd.__doc__ = """
rand
获得 0-100 的点数
/rand
获得一件事情的概率
/rand 今天捐钱的概率
"""
@rand_cmd.handle()
async def rand_handle(bot: Bot, event: MessageEvent, state: T_State):
args = str(event.message).strip()
str_data = get_rand(args)
await rand_cmd.finish(str_data, at_sender=True)
#endregion
``` |
{
"source": "58563528/nonebot-hk-reporter",
"score": 2
} |
#### File: plugins/nonebot_bison/config_manager.py
```python
from typing import Type
from nonebot import logger, on_command
from nonebot.adapters._bot import Bot as AbstractBot
from nonebot.adapters._event import Event as AbstractEvent
from nonebot.adapters.cqhttp import Bot, Event, GroupMessageEvent
from nonebot.adapters.cqhttp.message import Message
from nonebot.adapters.cqhttp.permission import GROUP_ADMIN, GROUP_MEMBER, GROUP_OWNER
from nonebot.matcher import Matcher
from nonebot.permission import Permission, SUPERUSER
from nonebot.rule import to_me
from nonebot.typing import T_State
from .config import Config, NoSuchSubscribeException
from .platform import check_sub_target, platform_manager
from .types import Target
from .utils import parse_text
def _gen_prompt_template(prompt: str):
if hasattr(Message, 'template'):
return Message.template(prompt)
return prompt
common_platform = [p.platform_name for p in \
filter(lambda platform: platform.enabled and platform.is_common,
platform_manager.values())
]
help_match = on_command('help', rule=to_me(), priority=5)
@help_match.handle()
async def send_help(bot: Bot, event: Event, state: T_State):
message = '使用方法:\n@bot 添加订阅(仅管理员)\n@bot 查询订阅\n@bot 删除订阅(仅管理员)'
await help_match.finish(Message(await parse_text(message)))
def do_add_sub(add_sub: Type[Matcher]):
@add_sub.handle()
async def init_promote(bot: Bot, event: Event, state: T_State):
state['_prompt'] = '请输入想要订阅的平台,目前支持,请输入冒号左边的名称:\n' + \
''.join(['{}:{}\n'.format(platform_name, platform_manager[platform_name].name) \
for platform_name in common_platform]) + \
'要查看全部平台请输入:“全部”'
async def parse_platform(bot: AbstractBot, event: AbstractEvent, state: T_State) -> None:
platform = str(event.get_message()).strip()
if platform == '全部':
message = '全部平台\n' + \
'\n'.join(['{}:{}'.format(platform_name, platform.name) \
for platform_name, platform in platform_manager.items()])
await add_sub.reject(message)
elif platform in platform_manager:
state['platform'] = platform
else:
await add_sub.reject('平台输入错误')
@add_sub.got('platform', _gen_prompt_template('{_prompt}'), parse_platform)
@add_sub.handle()
async def init_id(bot: Bot, event: Event, state: T_State):
if platform_manager[state['platform']].has_target:
state['_prompt'] = '请输入订阅用户的id,详情查阅https://nonebot-bison.vercel.app/usage/#%E6%89%80%E6%94%AF%E6%8C%81%E5%B9%B3%E5%8F%B0%E7%9A%84uid'
else:
state['id'] = 'default'
state['name'] = await platform_manager[state['platform']].get_target_name(Target(''))
async def parse_id(bot: AbstractBot, event: AbstractEvent, state: T_State):
target = str(event.get_message()).strip()
try:
name = await check_sub_target(state['platform'], target)
if not name:
await add_sub.reject('id输入错误')
state['id'] = target
state['name'] = name
except:
await add_sub.reject('id输入错误')
@add_sub.got('id', _gen_prompt_template('{_prompt}'), parse_id)
@add_sub.handle()
async def init_cat(bot: Bot, event: Event, state: T_State):
if not platform_manager[state['platform']].categories:
state['cats'] = []
return
state['_prompt'] = '请输入要订阅的类别,以空格分隔,支持的类别有:{}'.format(
' '.join(list(platform_manager[state['platform']].categories.values())))
async def parser_cats(bot: AbstractBot, event: AbstractEvent, state: T_State):
res = []
for cat in str(event.get_message()).strip().split():
if cat not in platform_manager[state['platform']].reverse_category:
await add_sub.reject('不支持 {}'.format(cat))
res.append(platform_manager[state['platform']].reverse_category[cat])
state['cats'] = res
@add_sub.got('cats', _gen_prompt_template('{_prompt}'), parser_cats)
@add_sub.handle()
async def init_tag(bot: Bot, event: Event, state: T_State):
if not platform_manager[state['platform']].enable_tag:
state['tags'] = []
return
state['_prompt'] = '请输入要订阅的tag,订阅所有tag输入"全部标签"'
async def parser_tags(bot: AbstractBot, event: AbstractEvent, state: T_State):
if str(event.get_message()).strip() == '全部标签':
state['tags'] = []
else:
state['tags'] = str(event.get_message()).strip().split()
@add_sub.got('tags', _gen_prompt_template('{_prompt}'), parser_tags)
@add_sub.handle()
async def add_sub_process(bot: Bot, event: Event, state: T_State):
config = Config()
config.add_subscribe(state.get('_user_id') or event.group_id, user_type='group',
target=state['id'],
target_name=state['name'], target_type=state['platform'],
cats=state.get('cats', []), tags=state.get('tags', []))
await add_sub.finish('添加 {} 成功'.format(state['name']))
def do_query_sub(query_sub: Type[Matcher]):
@query_sub.handle()
async def _(bot: Bot, event: Event, state: T_State):
config: Config = Config()
sub_list = config.list_subscribe(state.get('_user_id') or event.group_id, "group")
res = '订阅的帐号为:\n'
for sub in sub_list:
res += '{} {} {}'.format(sub['target_type'], sub['target_name'], sub['target'])
platform = platform_manager[sub['target_type']]
if platform.categories:
res += ' [{}]'.format(', '.join(map(lambda x: platform.categories[x], sub['cats'])))
if platform.enable_tag:
res += ' {}'.format(', '.join(sub['tags']))
res += '\n'
await query_sub.finish(Message(await parse_text(res)))
def do_del_sub(del_sub: Type[Matcher]):
@del_sub.handle()
async def send_list(bot: Bot, event: Event, state: T_State):
config: Config = Config()
sub_list = config.list_subscribe(state.get('_user_id') or event.group_id, "group")
res = '订阅的帐号为:\n'
state['sub_table'] = {}
for index, sub in enumerate(sub_list, 1):
state['sub_table'][index] = {'target_type': sub['target_type'], 'target': sub['target']}
res += '{} {} {} {}\n'.format(index, sub['target_type'], sub['target_name'], sub['target'])
platform = platform_manager[sub['target_type']]
if platform.categories:
res += ' [{}]'.format(', '.join(map(lambda x: platform.categories[x], sub['cats'])))
if platform.enable_tag:
res += ' {}'.format(', '.join(sub['tags']))
res += '\n'
res += '请输入要删除的订阅的序号'
await bot.send(event=event, message=Message(await parse_text(res)))
@del_sub.receive()
async def do_del(bot, event: Event, state: T_State):
try:
index = int(str(event.get_message()).strip())
config = Config()
config.del_subscribe(state.get('_user_id') or event.group_id, 'group', **state['sub_table'][index])
except Exception as e:
await del_sub.reject('删除错误')
logger.warning(e)
else:
await del_sub.finish('删除成功')
async def parse_group_number(bot: AbstractBot, event: AbstractEvent, state: T_State):
state[state["_current_key"]] = int(str(event.get_message()))
add_sub_matcher = on_command("添加订阅", rule=to_me(), permission=GROUP_ADMIN | GROUP_OWNER | SUPERUSER, priority=5)
do_add_sub(add_sub_matcher)
manage_add_sub_mather = on_command('管理-添加订阅', permission=SUPERUSER, priority=5)
@manage_add_sub_mather.got('_user_id', "群号", parse_group_number)
async def handle(bot: Bot, event: Event, state: T_State):
pass
do_add_sub(manage_add_sub_mather)
query_sub_macher = on_command("查询订阅", rule=to_me(), priority=5)
do_query_sub(query_sub_macher)
manage_query_sub_mather = on_command('管理-查询订阅', permission=SUPERUSER, priority=5)
@manage_query_sub_mather.got('_user_id', "群号", parse_group_number)
async def handle(bot: Bot, event: Event, state: T_State):
pass
do_query_sub(manage_query_sub_mather)
del_sub_macher = on_command("删除订阅", rule=to_me(), permission=GROUP_ADMIN | GROUP_OWNER | SUPERUSER, priority=5)
do_del_sub(del_sub_macher)
manage_del_sub_mather = on_command('管理-删除订阅', permission=SUPERUSER, priority=5)
@manage_del_sub_mather.got('_user_id', "群号", parse_group_number)
async def handle(bot: Bot, event: Event, state: T_State):
pass
do_del_sub(manage_del_sub_mather)
```
#### File: plugins/nonebot_bison/post.py
```python
import base64
from dataclasses import dataclass, field
from functools import reduce
from io import BytesIO
from typing import Optional, Union
from PIL import Image
import httpx
from nonebot import logger
from nonebot.adapters.cqhttp.message import Message, MessageSegment
from .plugin_config import plugin_config
from .utils import parse_text
@dataclass
class Post:
target_type: str
text: str
url: Optional[str] = None
target_name: Optional[str] = None
compress: bool = False
override_use_pic: Optional[bool] = None
pics: Union[list[Union[str,bytes]], list[str], list[bytes]] = field(default_factory=list)
extra_msg: list[Message] = field(default_factory=list)
_message: Optional[list] = None
def _use_pic(self):
if not self.override_use_pic is None:
return self.override_use_pic
return plugin_config.bison_use_pic
async def _pic_url_to_image(self, data: Union[str, bytes]) -> Image.Image:
pic_buffer = BytesIO()
if isinstance(data, str):
async with httpx.AsyncClient() as client:
res = await client.get(data)
pic_buffer.write(res.content)
else:
pic_buffer.write(data)
return Image.open(pic_buffer)
def _check_image_square(self, size: tuple[int, int]) -> bool:
return abs(size[0] - size[1]) / size[0] < 0.05
async def _pic_merge(self) -> None:
if len(self.pics) < 3:
return
first_image = await self._pic_url_to_image(self.pics[0])
if not self._check_image_square(first_image.size):
return
images: list[Image.Image] = [first_image]
# first row
for i in range(1, 3):
cur_img = await self._pic_url_to_image(self.pics[i])
if not self._check_image_square(cur_img.size):
return
if cur_img.size[1] != images[0].size[1]: # height not equal
return
images.append(cur_img)
_tmp = 0
x_coord = [0]
for i in range(3):
_tmp += images[i].size[0]
x_coord.append(_tmp)
y_coord = [0, first_image.size[1]]
async def process_row(row: int) -> bool:
if len(self.pics) < (row + 1) * 3:
return False
row_first_img = await self._pic_url_to_image(self.pics[row * 3])
if not self._check_image_square(row_first_img.size):
return False
if row_first_img.size[0] != images[0].size[0]:
return False
image_row: list[Image.Image] = [row_first_img]
for i in range(row * 3 + 1, row * 3 + 3):
cur_img = await self._pic_url_to_image(self.pics[i])
if not self._check_image_square(cur_img.size):
return False
if cur_img.size[1] != row_first_img.size[1]:
return False
if cur_img.size[0] != images[i % 3].size[0]:
return False
image_row.append(cur_img)
images.extend(image_row)
y_coord.append(y_coord[-1] + row_first_img.size[1])
return True
if await process_row(1):
matrix = (3,2)
else:
matrix = (3,1)
if await process_row(2):
matrix = (3,3)
logger.info('trigger merge image')
target = Image.new('RGB', (x_coord[-1], y_coord[-1]))
for y in range(matrix[1]):
for x in range(matrix[0]):
target.paste(images[y * matrix[0] + x], (
x_coord[x], y_coord[y], x_coord[x+1], y_coord[y+1]
))
target_io = BytesIO()
target.save(target_io, 'JPEG')
self.pics = self.pics[matrix[0] * matrix[1]: ]
self.pics.insert(0, target_io.getvalue())
async def generate_messages(self):
if self._message is None:
await self._pic_merge()
msgs = []
text = ''
if self.text:
if self._use_pic():
text += '{}'.format(self.text)
else:
text += '{}'.format(self.text if len(self.text) < 500 else self.text[:500] + '...')
text += '\n来源: {}'.format(self.target_type)
if self.target_name:
text += ' {}'.format(self.target_name)
if self._use_pic():
msgs.append(await parse_text(text))
if not self.target_type == 'rss' and self.url:
msgs.append(MessageSegment.text(self.url))
else:
if self.url:
text += ' \n详情: {}'.format(self.url)
msgs.append(MessageSegment.text(text))
for pic in self.pics:
# if isinstance(pic, bytes):
# pic = 'base64://' + base64.b64encode(pic).decode()
# msgs.append(Message("[CQ:image,file={url}]".format(url=pic)))
msgs.append(MessageSegment.image(pic))
if self.compress:
msgs = [reduce(lambda x, y: x.append(y), msgs, Message())]
msgs.extend(self.extra_msg)
self._message = msgs
return self._message
def __str__(self):
return 'type: {}\nfrom: {}\ntext: {}\nurl: {}\npic: {}'.format(
self.target_type,
self.target_name,
self.text if len(self.text) < 500 else self.text[:500] + '...',
self.url,
', '.join(map(lambda x: 'b64img' if isinstance(x, bytes) or x.startswith('base64') else x, self.pics))
)
```
#### File: plugins/nonebot_bison/send.py
```python
import time
from nonebot import logger
from nonebot.adapters.cqhttp.bot import Bot
from .plugin_config import plugin_config
QUEUE = []
LAST_SEND_TIME = time.time()
async def _do_send(bot: 'Bot', user: str, user_type: str, msg):
if user_type == 'group':
await bot.call_api('send_group_msg', group_id=user, message=msg)
elif user_type == 'private':
await bot.call_api('send_private_msg', user_id=user, message=msg)
async def do_send_msgs():
global LAST_SEND_TIME
if time.time() - LAST_SEND_TIME < 1.5:
return
if QUEUE:
bot, user, user_type, msg, retry_time = QUEUE.pop(0)
try:
await _do_send(bot, user, user_type, msg)
except Exception as e:
if retry_time > 0:
QUEUE.insert(0, (bot, user, user_type, msg, retry_time - 1))
else:
msg_str = str(msg)
if len(msg_str) > 50:
msg_str = msg_str[:50] + '...'
logger.warning(f'send msg err {e} {msg_str}')
LAST_SEND_TIME = time.time()
async def send_msgs(bot, user, user_type, msgs):
if plugin_config.bison_use_queue:
for msg in msgs:
QUEUE.append((bot, user, user_type, msg, 2))
else:
for msg in msgs:
await _do_send(bot, user, user_type, msg)
```
#### File: nonebot-hk-reporter/tests/conftest.py
```python
import pytest
import nonebot
import typing
if typing.TYPE_CHECKING:
import sys
sys.path.append('./src/plugins')
import nonebot_bison
@pytest.fixture#(scope="module")
def plugin_module(tmpdir):
nonebot.init(bison_config_path=str(tmpdir))
nonebot.load_plugins('src/plugins')
plugins = nonebot.get_loaded_plugins()
plugin = list(filter(lambda x: x.name == 'nonebot_bison', plugins))[0]
return plugin.module
@pytest.fixture
def dummy_user_subinfo(plugin_module: 'nonebot_bison'):
user = plugin_module.types.User('123', 'group')
return plugin_module.types.UserSubInfo(
user=user,
category_getter=lambda _: [],
tag_getter=lambda _: []
)
```
#### File: tests/platforms/test_platform.py
```python
import sys
import typing
from typing import Any, Optional
import pytest
if typing.TYPE_CHECKING:
import sys
sys.path.append('./src/plugins')
import nonebot_bison
from nonebot_bison.types import *
from nonebot_bison.post import Post
from time import time
now = time()
passed = now - 3 * 60 * 60
raw_post_list_1 = [
{'id': 1, 'text': 'p1', 'date': now, 'tags': ['tag1'], 'category': 1}
]
raw_post_list_2 = raw_post_list_1 + [
{'id': 2, 'text': 'p2', 'date': now, 'tags': ['tag1'], 'category': 1},
{'id': 3, 'text': 'p3', 'date': now, 'tags': ['tag2'], 'category': 2},
{'id': 4, 'text': 'p4', 'date': now, 'tags': ['tag2'], 'category': 3}
]
@pytest.fixture
def dummy_user(plugin_module: 'nonebot_bison'):
user = plugin_module.types.User('123', 'group')
return user
@pytest.fixture
def user_info_factory(plugin_module: 'nonebot_bison', dummy_user):
def _user_info(category_getter, tag_getter):
return plugin_module.types.UserSubInfo(dummy_user, category_getter, tag_getter)
return _user_info
@pytest.fixture
def mock_platform_without_cats_tags(plugin_module: 'nonebot_bison'):
class MockPlatform(plugin_module.platform.platform.NewMessage,
plugin_module.platform.platform.TargetMixin):
platform_name = 'mock_platform'
name = 'Mock Platform'
enabled = True
is_common = True
schedule_interval = 10
enable_tag = False
categories = {}
def __init__(self):
self.sub_index = 0
super().__init__()
@staticmethod
async def get_target_name(_: 'Target'):
return 'MockPlatform'
def get_id(self, post: 'RawPost') -> Any:
return post['id']
def get_date(self, raw_post: 'RawPost') -> float:
return raw_post['date']
async def parse(self, raw_post: 'RawPost') -> 'Post':
return plugin_module.post.Post('mock_platform', raw_post['text'], 'http://t.tt/' + str(self.get_id(raw_post)), target_name='Mock')
async def get_sub_list(self, _: 'Target'):
if self.sub_index == 0:
self.sub_index += 1
return raw_post_list_1
else:
return raw_post_list_2
return MockPlatform()
@pytest.fixture
def mock_platform(plugin_module: 'nonebot_bison'):
class MockPlatform(plugin_module.platform.platform.NewMessage,
plugin_module.platform.platform.TargetMixin):
platform_name = 'mock_platform'
name = 'Mock Platform'
enabled = True
is_common = True
schedule_interval = 10
enable_tag = True
categories = {
1: '转发',
2: '视频',
}
def __init__(self):
self.sub_index = 0
super().__init__()
@staticmethod
async def get_target_name(_: 'Target'):
return 'MockPlatform'
def get_id(self, post: 'RawPost') -> Any:
return post['id']
def get_date(self, raw_post: 'RawPost') -> float:
return raw_post['date']
def get_tags(self, raw_post: 'RawPost') -> list['Tag']:
return raw_post['tags']
def get_category(self, raw_post: 'RawPost') -> 'Category':
return raw_post['category']
async def parse(self, raw_post: 'RawPost') -> 'Post':
return plugin_module.post.Post('mock_platform', raw_post['text'], 'http://t.tt/' + str(self.get_id(raw_post)), target_name='Mock')
async def get_sub_list(self, _: 'Target'):
if self.sub_index == 0:
self.sub_index += 1
return raw_post_list_1
else:
return raw_post_list_2
return MockPlatform()
@pytest.fixture
def mock_platform_no_target(plugin_module: 'nonebot_bison'):
class MockPlatform(plugin_module.platform.platform.NewMessage,
plugin_module.platform.platform.NoTargetMixin):
platform_name = 'mock_platform'
name = 'Mock Platform'
enabled = True
is_common = True
schedule_type = 'interval'
schedule_kw = {'seconds': 30}
enable_tag = True
categories = {
1: '转发',
2: '视频',
3: '不支持'
}
def __init__(self):
self.sub_index = 0
super().__init__()
@staticmethod
async def get_target_name(_: 'Target'):
return 'MockPlatform'
def get_id(self, post: 'RawPost') -> Any:
return post['id']
def get_date(self, raw_post: 'RawPost') -> float:
return raw_post['date']
def get_tags(self, raw_post: 'RawPost') -> list['Tag']:
return raw_post['tags']
def get_category(self, raw_post: 'RawPost') -> 'Category':
if raw_post['category'] == 3:
raise plugin_module.platform.platform.CategoryNotSupport()
return raw_post['category']
async def parse(self, raw_post: 'RawPost') -> 'Post':
return plugin_module.post.Post('mock_platform', raw_post['text'], 'http://t.tt/' + str(self.get_id(raw_post)), target_name='Mock')
async def get_sub_list(self, _: 'Target'):
if self.sub_index == 0:
self.sub_index += 1
return raw_post_list_1
else:
return raw_post_list_2
return MockPlatform()
@pytest.fixture
def mock_platform_no_target_2(plugin_module: 'nonebot_bison'):
class MockPlatform(plugin_module.platform.platform.NewMessage,
plugin_module.platform.platform.NoTargetMixin):
platform_name = 'mock_platform'
name = 'Mock Platform'
enabled = True
schedule_type = 'interval'
schedule_kw = {'seconds': 30}
is_common = True
enable_tag = True
categories = {
4: 'leixing4',
5: 'leixing5',
}
def __init__(self):
self.sub_index = 0
super().__init__()
@staticmethod
async def get_target_name(_: 'Target'):
return 'MockPlatform'
def get_id(self, post: 'RawPost') -> Any:
return post['id']
def get_date(self, raw_post: 'RawPost') -> float:
return raw_post['date']
def get_tags(self, raw_post: 'RawPost') -> list['Tag']:
return raw_post['tags']
def get_category(self, raw_post: 'RawPost') -> 'Category':
return raw_post['category']
async def parse(self, raw_post: 'RawPost') -> 'Post':
return plugin_module.post.Post('mock_platform_2', raw_post['text'], 'http://t.tt/' + str(self.get_id(raw_post)), target_name='Mock')
async def get_sub_list(self, _: 'Target'):
list_1 = [
{'id': 5, 'text': 'p5', 'date': now, 'tags': ['tag1'], 'category': 4}
]
list_2 = list_1 + [
{'id': 6, 'text': 'p6', 'date': now, 'tags': ['tag1'], 'category': 4},
{'id': 7, 'text': 'p7', 'date': now, 'tags': ['tag2'], 'category': 5},
]
if self.sub_index == 0:
self.sub_index += 1
return list_1
else:
return list_2
return MockPlatform()
@pytest.fixture
def mock_status_change(plugin_module: 'nonebot_bison'):
class MockPlatform(plugin_module.platform.platform.StatusChange,
plugin_module.platform.platform.NoTargetMixin):
platform_name = 'mock_platform'
name = 'Mock Platform'
enabled = True
is_common = True
enable_tag = False
schedule_type = 'interval'
schedule_kw = {'seconds': 10}
categories = {
1: '转发',
2: '视频',
}
def __init__(self):
self.sub_index = 0
super().__init__()
async def get_status(self, _: 'Target'):
if self.sub_index == 0:
self.sub_index += 1
return {'s': False}
elif self.sub_index == 1:
self.sub_index += 1
return {'s': True}
else:
return {'s': False}
def compare_status(self, target, old_status, new_status) -> list['RawPost']:
if old_status['s'] == False and new_status['s'] == True:
return [{'text': 'on', 'cat': 1}]
elif old_status['s'] == True and new_status['s'] == False:
return [{'text': 'off', 'cat': 2}]
return []
async def parse(self, raw_post) -> 'Post':
return plugin_module.post.Post('mock_status', raw_post['text'], '')
def get_category(self, raw_post):
return raw_post['cat']
return MockPlatform()
@pytest.mark.asyncio
async def test_new_message_target_without_cats_tags(mock_platform_without_cats_tags, user_info_factory):
res1 = await mock_platform_without_cats_tags.fetch_new_post('dummy', [user_info_factory(lambda _: [1,2], lambda _: [])])
assert(len(res1) == 0)
res2 = await mock_platform_without_cats_tags.fetch_new_post('dummy', [
user_info_factory(lambda _: [], lambda _: []),
])
assert(len(res2) == 1)
posts_1 = res2[0][1]
assert(len(posts_1) == 3)
id_set_1 = set(map(lambda x: x.text, posts_1))
assert('p2' in id_set_1 and 'p3' in id_set_1 and 'p4' in id_set_1)
@pytest.mark.asyncio
async def test_new_message_target(mock_platform, user_info_factory):
res1 = await mock_platform.fetch_new_post('dummy', [user_info_factory(lambda _: [1,2], lambda _: [])])
assert(len(res1) == 0)
res2 = await mock_platform.fetch_new_post('dummy', [
user_info_factory(lambda _: [1,2], lambda _: []),
user_info_factory(lambda _: [1], lambda _: []),
user_info_factory(lambda _: [1,2], lambda _: ['tag1'])
])
assert(len(res2) == 3)
posts_1 = res2[0][1]
posts_2 = res2[1][1]
posts_3 = res2[2][1]
assert(len(posts_1) == 2)
assert(len(posts_2) == 1)
assert(len(posts_3) == 1)
id_set_1 = set(map(lambda x: x.text, posts_1))
id_set_2 = set(map(lambda x: x.text, posts_2))
id_set_3 = set(map(lambda x: x.text, posts_3))
assert('p2' in id_set_1 and 'p3' in id_set_1)
assert('p2' in id_set_2)
assert('p2' in id_set_3)
@pytest.mark.asyncio
async def test_new_message_no_target(mock_platform_no_target, user_info_factory):
res1 = await mock_platform_no_target.fetch_new_post('dummy', [user_info_factory(lambda _: [1,2], lambda _: [])])
assert(len(res1) == 0)
res2 = await mock_platform_no_target.fetch_new_post('dummy', [
user_info_factory(lambda _: [1,2], lambda _: []),
user_info_factory(lambda _: [1], lambda _: []),
user_info_factory(lambda _: [1,2], lambda _: ['tag1'])
])
assert(len(res2) == 3)
posts_1 = res2[0][1]
posts_2 = res2[1][1]
posts_3 = res2[2][1]
assert(len(posts_1) == 2)
assert(len(posts_2) == 1)
assert(len(posts_3) == 1)
id_set_1 = set(map(lambda x: x.text, posts_1))
id_set_2 = set(map(lambda x: x.text, posts_2))
id_set_3 = set(map(lambda x: x.text, posts_3))
assert('p2' in id_set_1 and 'p3' in id_set_1)
assert('p2' in id_set_2)
assert('p2' in id_set_3)
res3 = await mock_platform_no_target.fetch_new_post('dummy', [user_info_factory(lambda _: [1,2], lambda _: [])])
assert(len(res3) == 0)
@pytest.mark.asyncio
async def test_status_change(mock_status_change, user_info_factory):
res1 = await mock_status_change.fetch_new_post('dummy', [user_info_factory(lambda _: [1,2], lambda _: [])])
assert(len(res1) == 0)
res2 = await mock_status_change.fetch_new_post('dummy', [
user_info_factory(lambda _: [1,2], lambda _:[])
])
assert(len(res2) == 1)
posts = res2[0][1]
assert(len(posts) == 1)
assert(posts[0].text == 'on')
res3 = await mock_status_change.fetch_new_post('dummy', [
user_info_factory(lambda _: [1,2], lambda _: []),
user_info_factory(lambda _: [1], lambda _: []),
])
assert(len(res3) == 2)
assert(len(res3[0][1]) == 1)
assert(res3[0][1][0].text == 'off')
assert(len(res3[1][1]) == 0)
res4 = await mock_status_change.fetch_new_post('dummy', [user_info_factory(lambda _: [1,2], lambda _: [])])
assert(len(res4) == 0)
@pytest.mark.asyncio
async def test_group(plugin_module: 'nonebot_bison', mock_platform_no_target, mock_platform_no_target_2, user_info_factory):
group_platform = plugin_module.platform.platform.NoTargetGroup([mock_platform_no_target, mock_platform_no_target_2])
res1 = await group_platform.fetch_new_post('dummy', [user_info_factory(lambda _: [1,4], lambda _: [])])
assert(len(res1) == 0)
res2 = await group_platform.fetch_new_post('dummy', [user_info_factory(lambda _: [1,4], lambda _: [])])
assert(len(res2) == 1)
posts = res2[0][1]
assert(len(posts) == 2)
id_set_2 = set(map(lambda x: x.text, posts))
assert('p2' in id_set_2 and 'p6' in id_set_2)
res3 = await group_platform.fetch_new_post('dummy', [user_info_factory(lambda _: [1,4], lambda _: [])])
assert(len(res3) == 0)
``` |
{
"source": "58563528/omega-miyabot",
"score": 2
} |
#### File: plugins/miya_button/__init__.py
```python
import re
import os
from nonebot import MatcherGroup, logger
from nonebot.typing import T_State
from nonebot.rule import to_me
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.message import MessageSegment
from nonebot.adapters.cqhttp.event import GroupMessageEvent
from nonebot.adapters.cqhttp.permission import GROUP
from omega_miya.utils.Omega_plugin_utils import init_permission_state
from .resources import miya_voices
"""
miya按钮bot实现版本
测试中
"""
button = MatcherGroup(
type='message',
rule=to_me(),
# 使用run_preprocessor拦截权限管理, 在default_state初始化所需权限
state=init_permission_state(
name='button',
command=True,
level=10),
permission=GROUP,
priority=100,
block=False)
miya_button = button.on_endswith(msg='喵一个')
@miya_button.handle()
async def handle_miya_button(bot: Bot, event: GroupMessageEvent, state: T_State):
arg = str(event.get_plaintext()).strip().lower()
voice = re.sub('喵一个', '', arg)
voice_file = miya_voices.get_voice(keyword=voice)
if not os.path.exists(voice_file):
await miya_button.send('喵?')
else:
msg = MessageSegment.record(file=f'file:///{voice_file}')
await miya_button.send(msg)
```
#### File: plugins/Omega_vocation/__init__.py
```python
import re
from datetime import datetime, timedelta
from nonebot import MatcherGroup, export, logger, require
from nonebot.typing import T_State
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import GroupMessageEvent
from nonebot.adapters.cqhttp.permission import GROUP
from omega_miya.utils.Omega_Base import DBSkill, DBUser, DBGroup, DBTable
from omega_miya.utils.Omega_plugin_utils import init_export, init_permission_state, check_auth_node
# Custom plugin usage text
__plugin_name__ = '请假'
__plugin_usage__ = r'''【Omega 请假插件】
用来设置/查询自己以及群员的状态和假期
仅限群聊使用
**Permission**
Command
with AuthNode
**AuthNode**
basic
**Usage**
/我的状态
/重置状态
/我的假期
/请假 [时间] [理由]
/销假
/谁有空 [技能名称]
/谁在休假'''
# 声明本插件可配置的权限节点
__plugin_auth_node__ = [
'basic'
]
# Init plugin export
init_export(export(), __plugin_name__, __plugin_usage__, __plugin_auth_node__)
# 注册事件响应器
vocation = MatcherGroup(
type='message',
# 使用run_preprocessor拦截权限管理, 在default_state初始化所需权限
state=init_permission_state(
name='vocation',
command=True,
auth_node='basic'),
permission=GROUP,
priority=10,
block=True)
my_status = vocation.on_command('我的状态')
@my_status.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
user_id = event.user_id
user = DBUser(user_id=user_id)
result = await user.status()
if result.success():
status = result.result
if status == 1:
status = '请假中'
elif status == 2:
status = '工作中'
else:
status = '空闲中'
logger.info(f"my_status: {event.group_id}/{user_id}, Success, {result.info}")
await my_status.finish(f'你现在的状态是: 【{status}】')
else:
logger.error(f"my_status: {event.group_id}/{user_id}, Failed, {result.info}")
await my_status.finish('没有查询到你的状态信息QAQ, 请尝试使用【/重置状态】来解决问题')
# 注册事件响应器
reset_status = vocation.on_command('重置状态', aliases={'销假'})
@reset_status.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
user_id = event.user_id
user = DBUser(user_id=user_id)
result = await user.status_set(status=0)
if result.success():
logger.info(f"reset_status: {event.group_id}/{user_id}, Success, {result.info}")
await my_status.finish('Success')
else:
logger.error(f"reset_status: {event.group_id}/{user_id}, Failed, {result.info}")
await my_status.finish('Failed QAQ')
# 注册事件响应器
my_vocation = vocation.on_command('我的假期')
@my_vocation.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
user_id = event.user_id
user = DBUser(user_id=user_id)
result = await user.vocation_status()
if result.success():
status, stop_time = result.result
if status == 1:
msg = f'你的假期将持续到: 【{stop_time}】'
else:
msg = '你似乎并不在假期中呢~需要现在请假吗?'
logger.info(f"my_vocation: {event.group_id}/{user_id}, Success, {result.info}")
await my_status.finish(msg)
else:
logger.error(f"my_vocation: {event.group_id}/{user_id}, Failed, {result.info}")
await my_status.finish('没有查询到你的假期信息QAQ, 请尝试使用【/重置状态】来解决问题')
# 注册事件响应器
set_vocation = vocation.on_command('请假')
# 修改默认参数处理
@set_vocation.args_parser
async def parse(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip().lower().split()
if not args:
await set_vocation.reject('你似乎没有发送有效的参数呢QAQ, 请重新发送:')
state[state["_current_key"]] = args[0]
if state[state["_current_key"]] == '取消':
await set_vocation.finish('操作已取消')
@set_vocation.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip().lower().split()
if not args:
pass
elif args and len(args) == 1:
state['vocation_time'] = args[0]
state['reason'] = None
elif args and len(args) == 2:
state['vocation_time'] = args[0]
state['reason'] = args[1]
else:
await set_vocation.finish('参数错误QAQ')
@set_vocation.got('vocation_time', prompt='请输入你想请假的时间: \n仅支持数字+周/天/小时/分/分钟/秒')
async def handle_vocation_time(bot: Bot, event: GroupMessageEvent, state: T_State):
time = state['vocation_time']
add_time = timedelta()
if re.match(r'^\d+周$', time):
time = int(re.sub(r'周$', '', time))
add_time = timedelta(weeks=time)
elif re.match(r'^\d+天$', time):
time = int(re.sub(r'天$', '', time))
add_time = timedelta(days=time)
elif re.match(r'^\d+小时$', time):
time = int(re.sub(r'小时$', '', time))
add_time = timedelta(hours=time)
elif re.match(r'^\d+(分|分钟)$', time):
time = int(re.sub(r'(分|分钟)$', '', time))
add_time = timedelta(minutes=time)
elif re.match(r'^\d+秒$', time):
time = int(re.sub(r'秒$', '', time))
add_time = timedelta(seconds=time)
else:
await set_vocation.reject('仅支持数字+周/天/小时/分/分钟/秒, 请重新输入, 取消命令请发送【取消】:')
state['stop_at'] = datetime.now() + add_time
@set_vocation.got('stop_at', prompt='stop_at?')
@set_vocation.got('reason', prompt='请输入你的请假理由:')
async def handle_vocation_stop(bot: Bot, event: GroupMessageEvent, state: T_State):
user_id = event.user_id
user = DBUser(user_id=user_id)
stop_at = state['stop_at']
reason = state['reason']
result = await user.vocation_set(stop_time=stop_at, reason=reason)
if result.success():
logger.info(f"Group: {event.group_id}/{user_id}, set_vocation, Success, {result.info}")
await set_vocation.finish(f'请假成功! 你的假期将持续到【{stop_at.strftime("%Y-%m-%d %H:%M:%S")}】')
else:
logger.error(f"Group: {event.group_id}/{user_id}, set_vocation, Failed, {result.info}")
await set_vocation.finish('请假失败, 发生了意外的错误QAQ')
# 注册事件响应器
get_idle = vocation.on_command('谁有空')
# 修改默认参数处理
@get_idle.args_parser
async def parse(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip().lower().split()
if not args:
await get_idle.reject('你似乎没有发送有效的参数呢QAQ, 请重新发送:')
state[state["_current_key"]] = args[0]
if state[state["_current_key"]] == '取消':
await get_idle.finish('操作已取消')
@get_idle.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip().lower().split()
if not args:
state['skill'] = None
elif args and len(args) == 1:
state['skill'] = args[0]
else:
await set_vocation.finish('参数错误QAQ')
@get_idle.got('skill', prompt='空闲技能组?')
async def handle_skill(bot: Bot, event: GroupMessageEvent, state: T_State):
skill = state['skill']
group_id = event.group_id
group = DBGroup(group_id=group_id)
if not skill:
result = await group.idle_member_list()
if result.success() and result.result:
msg = ''
for nickname, user_skill in result.result:
msg += f'\n【{nickname}{user_skill}】'
logger.info(f"Group: {event.group_id}, get_idle, Success, {result.info}")
await get_idle.finish(f'现在有空的人: \n{msg}')
elif result.success() and not result.result:
logger.info(f"Group: {event.group_id}, get_idle, Success, {result.info}")
await get_idle.finish(f'现在似乎没人有空呢QAQ')
else:
logger.error(f"Group: {event.group_id}, get_idle, Failed, {result.info}")
await get_idle.finish(f'似乎发生了点错误QAQ')
else:
skill_table = DBTable(table_name='Skill')
skill_res = await skill_table.list_col(col_name='name')
exist_skill = [x for x in skill_res.result]
if skill not in exist_skill:
await get_idle.reject(f'没有{skill}这个技能, 请重新输入, 取消命令请发送【取消】:')
result = await group.idle_skill_list(skill=DBSkill(name=skill))
if result.success() and result.result:
msg = ''
for nickname in result.result:
msg += f'\n【{nickname}】'
await get_idle.finish(f'现在有空的{skill}人: \n{msg}')
elif result.success() and not result.result:
logger.info(f"Group: {event.group_id}, get_idle, Success, {result.info}")
await get_idle.finish(f'现在似乎没有{skill}人有空呢QAQ')
else:
logger.error(f"Group: {event.group_id}, get_idle, Failed, {result.info}")
await get_idle.finish(f'似乎发生了点错误QAQ')
# 注册事件响应器
get_vocation = vocation.on_command('谁在休假')
@get_vocation.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
group_id = event.group_id
group = DBGroup(group_id=group_id)
result = await group.vocation_member_list()
if result.success() and result.result:
msg = ''
for nickname, stop_at in result.result:
msg += f'\n【{nickname}/休假到: {stop_at}】'
logger.info(f"Group: {event.group_id}, get_vocation, Success, {result.info}")
await get_vocation.finish(f'现在在休假的的人: \n{msg}')
elif result.success() and not result.result:
logger.info(f"Group: {event.group_id}, get_vocation, Success, {result.info}")
await get_vocation.finish(f'现在似乎没没有人休假呢~')
else:
logger.error(f"Group: {event.group_id}, get_vocation, Failed, {result.info}")
await get_idle.finish(f'似乎发生了点错误QAQ')
# 启用检查假期的定时任务
scheduler = require("nonebot_plugin_apscheduler").scheduler
@scheduler.scheduled_job(
'cron',
# year=None,
# month=None,
# day='*/1',
# week=None,
# day_of_week=None,
# hour='4',
minute='*/10',
# second='*/10',
# start_date=None,
# end_date=None,
# timezone=None,
id='member_vocations_monitor',
coalesce=True,
misfire_grace_time=60
)
async def member_vocations_monitor():
logger.debug(f"member_vocations_monitor: vocation checking started")
from nonebot import get_bots
over_vocation_user = set()
for bot_id, bot in get_bots().items():
group_list = await bot.call_api('get_group_list')
for group in group_list:
group_id = group.get('group_id')
# 跳过不具备权限的组
auth_check_res = await check_auth_node(
auth_id=group_id, auth_type='group', auth_node='Omega_vocation.basic')
if auth_check_res != 1:
continue
logger.debug(f"member_vocations_monitor: checking group: {group_id}")
# 调用api获取群成员信息
group_member_list = await bot.call_api(api='get_group_member_list', group_id=group_id)
for user_info in group_member_list:
user_nickname = user_info['card']
if not user_nickname:
user_nickname = user_info['nickname']
user_qq = user_info['user_id']
user = DBUser(user_id=user_qq)
user_vocation_res = await user.vocation_status()
status, stop_time = user_vocation_res.result
if status == 1 and datetime.now() >= stop_time:
msg = f'【{user_nickname}】的假期已经结束啦~\n快给他/她安排工作吧!'
await bot.call_api(api='send_group_msg', group_id=group_id, message=msg)
over_vocation_user.add(user)
for user in over_vocation_user:
_res = await user.status_set(status=0)
if not _res.success():
logger.error(f"reset user status failed: {_res.info}")
logger.debug('member_vocations_monitor: vocation checking completed')
```
#### File: plugins/pixivsion_monitor/monitor.py
```python
import asyncio
from nonebot import logger, require, get_bots
from nonebot.adapters.cqhttp import MessageSegment
from omega_miya.utils.Omega_Base import DBSubscription, DBTable
from omega_miya.utils.pixiv_utils import PixivIllust, PixivisionArticle
from .utils import pixivsion_article_parse
from .block_tag import TAG_BLOCK_LIST
# 启用检查动态状态的定时任务
scheduler = require("nonebot_plugin_apscheduler").scheduler
@scheduler.scheduled_job(
'cron',
# year=None,
# month=None,
# day='*/1',
# week=None,
# day_of_week=None,
# hour='*/1',
minute='*/30',
# second='*/30',
# start_date=None,
# end_date=None,
# timezone=None,
id='pixivision_monitor',
coalesce=True,
misfire_grace_time=45
)
async def pixivision_monitor():
logger.debug(f"pixivision_monitor: checking started")
# 获取当前bot列表
bots = []
for bot_id, bot in get_bots().items():
bots.append(bot)
# 获取所有有通知权限的群组
t = DBTable(table_name='Group')
group_res = await t.list_col_with_condition('group_id', 'notice_permissions', 1)
all_noitce_groups = [int(x) for x in group_res.result]
# 初始化tag黑名单
block_tag_id = []
block_tag_name = []
for block_tag in TAG_BLOCK_LIST:
block_tag_id.append(block_tag.get('id'))
block_tag_name.append(block_tag.get('name'))
# 提取数据库中已有article的id列表
t = DBTable(table_name='Pixivision')
pixivision_res = await t.list_col(col_name='aid')
exist_article = [int(x) for x in pixivision_res.result]
# 获取最新一页pixivision的article
new_article = []
articles_result = await PixivisionArticle.get_illustration_list()
if articles_result.error:
logger.error(f'pixivision_monitor: checking pixivision failed: {articles_result.info}')
return
for article in articles_result.result:
try:
article = dict(article)
article_tags_id = []
article_tags_name = []
for tag in article['tags']:
article_tags_id.append(int(tag['tag_id']))
article_tags_name.append(str(tag['tag_name']))
# 跳过黑名单tag的article
if list(set(article_tags_id) & set(block_tag_id)) or list(set(article_tags_name) & set(block_tag_name)):
continue
# 获取新的article内容
if int(article['id']) not in exist_article:
logger.info(f"pixivision_monitor: 检查到新的Pixivision article: {article['id']}")
new_article.append({'aid': int(article['id']), 'tags': article_tags_name})
except Exception as e:
logger.error(f'pixivision_monitor: an error occured in checking pixivision: {repr(e)}')
continue
if not new_article:
logger.info(f'pixivision_monitor: checking completed, 没有新的article')
return
sub = DBSubscription(sub_type=8, sub_id=-1)
# 获取订阅了该直播间的所有群
sub_group_res = await sub.sub_group_list()
sub_group = sub_group_res.result
# 需通知的群
notice_group = list(set(all_noitce_groups) & set(sub_group))
# 处理新的aritcle
for article in new_article:
aid = int(article['aid'])
tags = list(article['tags'])
a_res = await pixivsion_article_parse(aid=aid, tags=tags)
if a_res.success():
if not notice_group:
continue
article_data = a_res.result
msg = f"新的Pixivision特辑!\n\n" \
f"《{article_data['title']}》\n\n{article_data['description']}\n{article_data['url']}"
for group_id in notice_group:
for _bot in bots:
try:
await _bot.call_api(api='send_group_msg', group_id=group_id, message=msg)
except Exception as e:
logger.warning(f"向群组: {group_id} 发送article简介信息失败, error: {repr(e)}")
continue
# 处理article中图片内容
tasks = []
for pid in article_data['illusts_list']:
tasks.append(PixivIllust(pid=pid).pic_2_base64())
p_res = await asyncio.gather(*tasks)
image_error = 0
for image_res in p_res:
if not image_res.success():
image_error += 1
continue
else:
img_seg = MessageSegment.image(image_res.result)
# 发送图片
for group_id in notice_group:
for _bot in bots:
try:
await _bot.call_api(api='send_group_msg', group_id=group_id, message=img_seg)
# 避免风控控制推送间隔
await asyncio.sleep(1)
except Exception as e:
logger.warning(f"向群组: {group_id} 发送图片内容失败, error: {repr(e)}")
continue
logger.info(f"article: {aid} 图片已发送完成, 失败: {image_error}")
else:
logger.error(f"article: {aid} 信息解析失败, info: {a_res.info}")
logger.info(f'pixivision_monitor: checking completed, 已处理新的article: {repr(new_article)}')
__all__ = [
'scheduler'
]
```
#### File: sticker_maker/utils/default_render.py
```python
from PIL import Image, ImageDraw, ImageFont
def stick_maker_temp_default(text: str, image_file: bytes, font_path: str, image_wight: int, image_height: int):
# 处理图片
draw = ImageDraw.Draw(image_file)
font_size = 72
font = ImageFont.truetype(font_path, font_size)
text_w, text_h = font.getsize_multiline(text)
while text_w >= image_wight:
font_size = font_size * 3 // 4
font = ImageFont.truetype(font_path, font_size)
text_w, text_h = font.getsize_multiline(text)
# 计算居中文字位置
text_coordinate = (((image_wight - text_w) // 2), 9 * (image_height - text_h) // 10)
# 为文字设置黑边
text_b_resize = 4
if font_size >= 72:
text_b_resize = 4
elif font_size >= 36:
text_b_resize = 3
elif font_size >= 24:
text_b_resize = 2
elif font_size < 12:
text_b_resize = 1
text_coordinate_b1 = (text_coordinate[0] + text_b_resize, text_coordinate[1])
text_coordinate_b2 = (text_coordinate[0] - text_b_resize, text_coordinate[1])
text_coordinate_b3 = (text_coordinate[0], text_coordinate[1] + text_b_resize)
text_coordinate_b4 = (text_coordinate[0], text_coordinate[1] - text_b_resize)
draw.multiline_text(text_coordinate_b1, text, font=font, fill=(0, 0, 0))
draw.multiline_text(text_coordinate_b2, text, font=font, fill=(0, 0, 0))
draw.multiline_text(text_coordinate_b3, text, font=font, fill=(0, 0, 0))
draw.multiline_text(text_coordinate_b4, text, font=font, fill=(0, 0, 0))
# 白字要后画,后画的在上层,不然就是黑滋在上面挡住了
draw.multiline_text(text_coordinate, text, font=font, fill=(255, 255, 255))
return image_file
def stick_maker_temp_littleangel(text: str, image_file: bytes, font_path: str, image_wight: int, image_height: int):
# 处理图片
background_w = image_wight + 100
background_h = image_height + 230
background = Image.new(mode="RGB", size=(background_w, background_h), color=(255, 255, 255))
# 处理粘贴位置 上留100像素,下留130像素
image_coordinate = (((background_w - image_wight) // 2), 100)
background.paste(image_file, image_coordinate)
draw = ImageDraw.Draw(background)
font_down_1 = ImageFont.truetype(font_path, 48)
text_down_1 = r'非常可爱!简直就是小天使'
text_down_1_w, text_down_1_h = font_down_1.getsize(text_down_1)
text_down_1_coordinate = (((background_w - text_down_1_w) // 2), background_h - 120)
draw.text(text_down_1_coordinate, text_down_1, font=font_down_1, fill=(0, 0, 0))
font_down_2 = ImageFont.truetype(font_path, 26)
text_down_2 = r'她没失踪也没怎么样 我只是觉得你们都该看一下'
text_down_2_w, text_down_2_h = font_down_2.getsize(text_down_2)
text_down_2_coordinate = (((background_w - text_down_2_w) // 2), background_h - 60)
draw.text(text_down_2_coordinate, text_down_2, font=font_down_2, fill=(0, 0, 0))
font_size_up = 72
font_up = ImageFont.truetype(font_path, font_size_up)
text_up = f'请问你们看到{text}了吗?'
text_up_w, text_up_h = font_up.getsize(text_up)
while text_up_w >= background_w:
font_size_up = font_size_up * 5 // 6
font_up = ImageFont.truetype(font_path, font_size_up)
text_up_w, text_up_h = font_up.getsize(text_up)
# 计算居中文字位置
text_up_coordinate = (((background_w - text_up_w) // 2), 25)
draw.text(text_up_coordinate, text_up, font=font_up, fill=(0, 0, 0))
return background
def stick_maker_temp_whitebg(text: str, image_file: bytes, font_path: str, image_wight: int, image_height: int):
# 处理文本
if image_wight > image_height:
font_size = 72
else:
font_size = 84
font = ImageFont.truetype(font_path, font_size)
text_w, text_h = font.getsize_multiline(text)
while text_w >= (image_wight * 8 // 9):
font_size = font_size * 7 // 8
font = ImageFont.truetype(font_path, font_size)
text_w, text_h = font.getsize_multiline(text)
# 处理图片
background_w = image_wight
background_h = image_height + round(text_h * 1.5)
background = Image.new(mode="RGB", size=(background_w, background_h), color=(255, 255, 255))
# 处理粘贴位置 顶头
image_coordinate = (0, 0)
background.paste(image_file, image_coordinate)
draw = ImageDraw.Draw(background)
# 计算居中文字位置
text_coordinate = (((background_w - text_w) // 2), image_height + round(text_h / 100) * round(text_h * 0.1))
draw.multiline_text(text_coordinate, text, font=font, fill=(0, 0, 0))
return background
def stick_maker_temp_blackbg(text: str, image_file: bytes, font_path: str, image_wight: int, image_height: int):
# 处理文本
if image_wight > image_height:
font_size = 96
else:
font_size = 108
font = ImageFont.truetype(font_path, font_size)
text_w, text_h = font.getsize_multiline(text)
while text_w >= (image_wight * 9 // 10):
font_size = font_size * 8 // 9
font = ImageFont.truetype(font_path, font_size)
text_w, text_h = font.getsize_multiline(text)
# 处理图片
background_w = image_wight + 150
background_h = image_height + 115 + round(text_h * 1.5)
background = Image.new(mode="RGB", size=(background_w, background_h), color=(0, 0, 0))
layer_1 = Image.new(mode="RGB", size=(image_wight + 12, image_height + 12), color=(255, 255, 255))
layer_2 = Image.new(mode="RGB", size=(image_wight + 10, image_height + 10), color=(0, 0, 0))
layer_3 = Image.new(mode="RGB", size=(image_wight + 6, image_height + 6), color=(255, 255, 255))
layer_4 = Image.new(mode="RGB", size=(image_wight + 4, image_height + 4), color=(0, 0, 0))
# 处理粘贴位置 留出黑边距离
background.paste(layer_1, (70, 70))
background.paste(layer_2, (71, 71))
background.paste(layer_3, (73, 73))
background.paste(layer_4, (74, 74))
background.paste(image_file, (76, 76))
draw = ImageDraw.Draw(background)
# 计算居中文字位置
text_coordinate = (((background_w - text_w) // 2),
image_height + 110 - round(text_h / 9) + round(text_h / 100) * round(text_h * 0.1))
draw.multiline_text(text_coordinate, text, font=font, fill=(255, 255, 255))
return background
__all__ = [
'stick_maker_temp_whitebg',
'stick_maker_temp_blackbg',
'stick_maker_temp_default',
'stick_maker_temp_littleangel'
]
```
#### File: utils/Omega_Base/class_result.py
```python
from dataclasses import dataclass
from typing import Dict, List, Set, Tuple, Union, Any
@dataclass
class BaseResult:
error: bool
info: str
def success(self) -> bool:
if not self.error:
return True
else:
return False
class Result(object):
@dataclass
class DictResult(BaseResult):
result: dict
def __repr__(self):
return f'<DictResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class IntDictResult(BaseResult):
result: Dict[int, int]
def __repr__(self):
return f'<IntDictResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class TextDictResult(BaseResult):
result: Dict[str, str]
def __repr__(self):
return f'<TextDictResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class JsonDictResult(BaseResult):
result: Dict[str, Union[str, int, bool, list, dict]]
def __repr__(self):
return f'<JsonDictResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class ListResult(BaseResult):
result: list
def __repr__(self):
return f'<ListResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class IntListResult(BaseResult):
result: List[int]
def __repr__(self):
return f'<IntListResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class TextListResult(BaseResult):
result: List[str]
def __repr__(self):
return f'<TextListResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class DictListResult(BaseResult):
result: List[dict]
def __repr__(self):
return f'<DictListResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class SetResult(BaseResult):
result: set
def __repr__(self):
return f'<SetResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class IntSetResult(BaseResult):
result: Set[int]
def __repr__(self):
return f'<IntSetResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class TextSetResult(BaseResult):
result: Set[str]
def __repr__(self):
return f'<TextSetResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class TupleResult(BaseResult):
result: tuple
def __repr__(self):
return f'<TupleResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class IntTupleResult(BaseResult):
result: Tuple[int, ...]
def __repr__(self):
return f'<IntTupleResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class TextTupleResult(BaseResult):
result: Tuple[str, ...]
def __repr__(self):
return f'<TextTupleResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class IntResult(BaseResult):
result: int
def __repr__(self):
return f'<IntResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class TextResult(BaseResult):
result: str
def __repr__(self):
return f'<TextResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class BoolResult(BaseResult):
result: bool
def __repr__(self):
return f'<BoolResult(error={self.error}, info={self.info}, result={self.result})>'
@dataclass
class AnyResult(BaseResult):
result: Any
def __repr__(self):
return f'<AnyResult(error={self.error}, info={self.info}, result={self.result})>'
__all__ = [
'Result'
]
```
#### File: Omega_Base/model/pixivillust.py
```python
from typing import List
from omega_miya.utils.Omega_Base.database import NBdb
from omega_miya.utils.Omega_Base.class_result import Result
from omega_miya.utils.Omega_Base.tables import Pixiv, PixivT2I
from .pixivtag import DBPixivtag
from datetime import datetime
from sqlalchemy.future import select
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.sql.expression import func
from sqlalchemy import or_
class DBPixivillust(object):
def __init__(self, pid: int):
self.pid = pid
async def id(self) -> Result.IntResult:
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(Pixiv.id).where(Pixiv.pid == self.pid)
)
pixiv_table_id = session_result.scalar_one()
result = Result.IntResult(error=False, info='Success', result=pixiv_table_id)
except NoResultFound:
result = Result.IntResult(error=True, info='NoResultFound', result=-1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def exist(self) -> bool:
result = await self.id()
return result.success()
async def add(self, uid: int, title: str, uname: str, nsfw_tag: int, tags: List[str], url: str) -> Result.IntResult:
# 将tag写入pixiv_tag表
for tag in tags:
_tag = DBPixivtag(tagname=tag)
await _tag.add()
tag_text = ','.join(tags)
# 将作品信息写入pixiv_illust表
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
need_upgrade_pixivt2i = False
async with session.begin():
try:
session_result = await session.execute(
select(Pixiv).where(Pixiv.pid == self.pid)
)
exist_illust = session_result.scalar_one()
exist_illust.title = title
exist_illust.uname = uname
if nsfw_tag > exist_illust.nsfw_tag:
exist_illust.nsfw_tag = nsfw_tag
exist_illust.tags = tag_text
exist_illust.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Exist illust updated', result=0)
except NoResultFound:
new_illust = Pixiv(pid=self.pid, uid=uid, title=title, uname=uname, url=url, nsfw_tag=nsfw_tag,
tags=tag_text, created_at=datetime.now())
session.add(new_illust)
need_upgrade_pixivt2i = True
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
# 写入tag_pixiv关联表
# 获取本作品在illust表中的id
id_result = await self.id()
if id_result.success() and need_upgrade_pixivt2i:
_illust_id = id_result.result
# 根据作品tag依次写入tag_illust表
async with session.begin():
for tag in tags:
_tag = DBPixivtag(tagname=tag)
_tag_id_res = await _tag.id()
if not _tag_id_res.success():
continue
_tag_id = _tag_id_res.result
try:
new_tag_illust = PixivT2I(illust_id=_illust_id, tag_id=_tag_id,
created_at=datetime.now())
session.add(new_tag_illust)
except Exception as e:
continue
await session.commit()
result = Result.IntResult(error=False, info='Success added with tags', result=0)
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
@classmethod
async def rand_illust(cls, num: int, nsfw_tag: int) -> Result.ListResult:
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(Pixiv.pid).
where(Pixiv.nsfw_tag == nsfw_tag).
order_by(func.random()).limit(num)
)
res = [x for x in session_result.scalars().all()]
result = Result.ListResult(error=False, info='Success', result=res)
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
@classmethod
async def status(cls) -> Result.DictResult:
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(select(func.count(Pixiv.id)))
all_count = session_result.scalar()
session_result = await session.execute(select(func.count(Pixiv.id)).where(Pixiv.nsfw_tag == 0))
moe_count = session_result.scalar()
session_result = await session.execute(select(func.count(Pixiv.id)).where(Pixiv.nsfw_tag == 1))
setu_count = session_result.scalar()
session_result = await session.execute(select(func.count(Pixiv.id)).where(Pixiv.nsfw_tag == 2))
r18_count = session_result.scalar()
res = {'total': int(all_count), 'moe': int(moe_count),
'setu': int(setu_count), 'r18': int(r18_count)}
result = Result.DictResult(error=False, info='Success', result=res)
except Exception as e:
result = Result.DictResult(error=True, info=repr(e), result={})
return result
@classmethod
async def list_illust(
cls, keywords: List[str], num: int, nsfw_tag: int, acc_mode: bool = False) -> Result.ListResult:
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
if acc_mode:
# 构造查询, 精确搜索标题, 用户和tag
query = select(Pixiv.pid).where(Pixiv.nsfw_tag == nsfw_tag)
for keyword in keywords:
query = query.where(or_(
func.find_in_set(keyword, Pixiv.tags),
func.find_in_set(keyword, Pixiv.uname),
func.find_in_set(keyword, Pixiv.title)
))
query = query.order_by(func.random()).limit(num)
session_result = await session.execute(query)
res = [x for x in session_result.scalars().all()]
if not acc_mode or (acc_mode and not res):
# 构造查询, 模糊搜索标题, 用户和tag
query = select(Pixiv.pid).where(Pixiv.nsfw_tag == nsfw_tag)
for keyword in keywords:
query = query.where(or_(
Pixiv.tags.ilike(f'%{keyword}%'),
Pixiv.uname.ilike(f'%{keyword}%'),
Pixiv.title.ilike(f'%{keyword}%')
))
query = query.order_by(func.random()).limit(num)
session_result = await session.execute(query)
res = [x for x in session_result.scalars().all()]
result = Result.ListResult(error=False, info='Success', result=res)
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
```
#### File: utils/Omega_CoolDown/__init__.py
```python
from nonebot import get_plugin, get_driver, logger
from nonebot.adapters.cqhttp import MessageSegment, Message
from nonebot.exception import IgnoredException
from nonebot.message import run_preprocessor
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import MessageEvent
from omega_miya.utils.Omega_plugin_utils import \
check_and_set_global_cool_down, check_and_set_plugin_cool_down, \
check_and_set_group_cool_down, check_and_set_user_cool_down, PluginCoolDown
from omega_miya.utils.Omega_Base import DBCoolDownEvent, DBAuth
@run_preprocessor
async def handle_plugin_cooldown(matcher: Matcher, bot: Bot, event: MessageEvent, state: T_State):
group_id = event.dict().get('group_id')
user_id = event.dict().get('user_id')
global_config = get_driver().config
superusers = global_config.superusers
# 忽略超级用户
if user_id in [int(x) for x in superusers]:
return
# 只处理message事件
if matcher.type != 'message':
return
# 处理插件冷却
# 冷却处理优先级: 全局>插件>群组>用户
# 冷却限制优先级: 用户>群组>插件>全局
plugin_name = matcher.module
plugin = get_plugin(plugin_name)
plugin_cool_down_list = plugin.export.get('cool_down')
# 只处理声明了__plugin_cool_down__的插件
if not plugin_cool_down_list:
return
# 检查用户或群组是否有skip_cd权限, 跳过冷却检查
skip_cd_auth_node = f'{plugin_name}.{PluginCoolDown.skip_auth_node}'
user_auth = DBAuth(auth_id=user_id, auth_type='user', auth_node=skip_cd_auth_node)
user_tag_res = await user_auth.tags_info()
if user_tag_res.result[0] == 1 and user_tag_res.result[1] == 0:
return
group_auth = DBAuth(auth_id=group_id, auth_type='group', auth_node=skip_cd_auth_node)
group_tag_res = await group_auth.tags_info()
if group_tag_res.result[0] == 1 and group_tag_res.result[1] == 0:
return
# 检查冷却情况
global_check = await DBCoolDownEvent.check_global_cool_down_event()
plugin_check = await DBCoolDownEvent.check_plugin_cool_down_event(plugin=plugin_name)
group_check = await DBCoolDownEvent.check_group_cool_down_event(plugin=plugin_name, group_id=group_id)
user_check = await DBCoolDownEvent.check_user_cool_down_event(plugin=plugin_name, user_id=user_id)
# 处理全局冷却
# 先检查是否已有全局冷却
if plugin_check.result == 1 or group_check.result == 1 or user_check.result == 1:
pass
elif global_check.result == 1:
await bot.send(event=event, message=Message(f'{MessageSegment.at(user_id=user_id)}命令冷却中!\n{global_check.info}'))
raise IgnoredException('全局命令冷却中')
elif global_check.result == 0:
pass
else:
logger.error(f'全局冷却事件异常! group: {group_id}, user: {user_id}, error: {global_check.info}')
# 然后再处理命令中存在的全局冷却
for time in [x.cool_down_time for x in plugin_cool_down_list if x.type == PluginCoolDown.global_type]:
# 若有插件、群组或用户冷却则交由其处理
if plugin_check.result == 1 or group_check.result == 1 or user_check.result == 1:
break
res = await check_and_set_global_cool_down(minutes=time)
if res.result == 1:
await bot.send(event=event, message=Message(f'{MessageSegment.at(user_id=user_id)}命令冷却中!\n{res.info}'))
raise IgnoredException('全局命令冷却中')
elif res.result == 0:
pass
else:
logger.error(f'全局冷却事件异常! group: {group_id}, user: {user_id}, error: {res.info}')
# 处理插件冷却
for time in [x.cool_down_time for x in plugin_cool_down_list if x.type == PluginCoolDown.plugin_type]:
# 若有群组或用户冷却则交由其处理
if group_check.result == 1 or user_check.result == 1:
break
res = await check_and_set_plugin_cool_down(minutes=time, plugin=plugin_name)
if res.result == 1:
await bot.send(event=event, message=Message(f'{MessageSegment.at(user_id=user_id)}命令冷却中!\n{res.info}'))
raise IgnoredException('插件命令冷却中')
elif res.result == 0:
pass
else:
logger.error(f'插件冷却事件异常! group: {group_id}, user: {user_id}, plugin: {plugin_name}, error: {res.info}')
# 处理群组冷却
for time in [x.cool_down_time for x in plugin_cool_down_list if x.type == PluginCoolDown.group_type]:
if not group_id:
break
# 若有用户冷却则交由其处理
if user_check.result == 1:
break
res = await check_and_set_group_cool_down(minutes=time, plugin=plugin_name, group_id=group_id)
if res.result == 1:
await bot.send(event=event, message=Message(f'{MessageSegment.at(user_id=user_id)}命令冷却中!\n{res.info}'))
raise IgnoredException('群组命令冷却中')
elif res.result == 0:
pass
else:
logger.error(f'群组冷却事件异常! group: {group_id}, user: {user_id}, plugin: {plugin_name}, error: {res.info}')
# 处理用户冷却
for time in [x.cool_down_time for x in plugin_cool_down_list if x.type == PluginCoolDown.user_type]:
if not user_id:
break
res = await check_and_set_user_cool_down(minutes=time, plugin=plugin_name, user_id=user_id)
if res.result == 1:
await bot.send(event=event, message=Message(f'{MessageSegment.at(user_id=user_id)}命令冷却中!\n{res.info}'))
raise IgnoredException('用户命令冷却中')
elif res.result == 0:
pass
else:
logger.error(f'用户冷却事件异常! group: {group_id}, user: {user_id}, plugin: {plugin_name}, error: {res.info}')
```
#### File: utils/Omega_history/__init__.py
```python
from nonebot import MatcherGroup, on_message, on_request, on_notice, logger
from nonebot.plugin import on
from nonebot.typing import T_State
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import Event
from omega_miya.utils.Omega_Base import DBHistory
# 注册事件响应器, 处理MessageEvent
Message_history = MatcherGroup(type='message', priority=101, block=True)
message_history = Message_history.on_message()
@message_history.handle()
async def handle_message(bot: Bot, event: Event, state: T_State):
try:
message_id = event.dict().get('message_id')
user_name = event.dict().get('sender').get('card')
if not user_name:
user_name = event.dict().get('sender').get('nickname')
time = event.dict().get('time')
self_id = event.dict().get('self_id')
post_type = event.get_type()
detail_type = event.dict().get(f'{event.get_type()}_type')
sub_type = event.dict().get('sub_type')
group_id = event.dict().get('group_id')
user_id = event.dict().get('user_id')
raw_data = repr(event)
msg_data = str(event.dict().get('message'))
new_event = DBHistory(time=time, self_id=self_id, post_type=post_type, detail_type=detail_type)
res = await new_event.add(sub_type=sub_type, event_id=message_id, group_id=group_id, user_id=user_id,
user_name=user_name, raw_data=raw_data, msg_data=msg_data)
if res.error:
logger.error(f'Message history recording Failed with database error: {res.info}')
except Exception as e:
logger.error(f'Message history recording Failed, error: {repr(e)}')
# 注册事件响应器, 处理message_sent
message_sent_history = on(type='message_sent', priority=101, block=True)
@message_sent_history.handle()
async def handle_message_sent_history(bot: Bot, event: Event, state: T_State):
try:
user_name = event.dict().get('sender').get('card')
if not user_name:
user_name = event.dict().get('sender').get('nickname')
time = event.dict().get('time')
self_id = event.dict().get('self_id')
post_type = event.get_type()
detail_type = 'self_sent'
sub_type = 'self'
group_id = event.dict().get('group_id')
user_id = event.dict().get('user_id')
raw_data = repr(event)
msg_data = str(event.dict().get('message'))
new_event = DBHistory(time=time, self_id=self_id, post_type=post_type, detail_type=detail_type)
res = await new_event.add(sub_type=sub_type, group_id=group_id, user_id=user_id, user_name=user_name,
raw_data=raw_data, msg_data=msg_data)
if res.error:
logger.error(f'Self-sent Message history recording Failed with database error: {res.info}')
except Exception as e:
logger.error(f'Self-sent Message history recording Failed, error: {repr(e)}')
# 注册事件响应器, 处理NoticeEvent
notice_history = on_notice(priority=101, block=True)
@notice_history.handle()
async def handle_notice(bot: Bot, event: Event, state: T_State):
try:
time = event.dict().get('time')
self_id = event.dict().get('self_id')
post_type = event.get_type()
detail_type = event.dict().get(f'{event.get_type()}_type')
sub_type = event.dict().get('sub_type')
group_id = event.dict().get('group_id')
user_id = event.dict().get('user_id')
raw_data = repr(event)
msg_data = str(event.dict().get('message'))
new_event = DBHistory(time=time, self_id=self_id, post_type=post_type, detail_type=detail_type)
res = await new_event.add(sub_type=sub_type, group_id=group_id, user_id=user_id, user_name=None,
raw_data=raw_data, msg_data=msg_data)
if res.error:
logger.error(f'Notice history recording Failed with database error: {res.info}')
except Exception as e:
logger.error(f'Notice history recording Failed, error: {repr(e)}')
# 注册事件响应器, 处理RequestEvent
request_history = on_request(priority=101, block=True)
@request_history.handle()
async def handle_request(bot: Bot, event: Event, state: T_State):
try:
time = event.dict().get('time')
self_id = event.dict().get('self_id')
post_type = event.get_type()
detail_type = event.dict().get(f'{event.get_type()}_type')
sub_type = event.dict().get('sub_type')
group_id = event.dict().get('group_id')
user_id = event.dict().get('user_id')
raw_data = repr(event)
msg_data = str(event.dict().get('message'))
new_event = DBHistory(time=time, self_id=self_id, post_type=post_type, detail_type=detail_type)
res = await new_event.add(sub_type=sub_type, group_id=group_id, user_id=user_id, user_name=None,
raw_data=raw_data, msg_data=msg_data)
if res.error:
logger.error(f'Request history recording Failed with database error: {res.info}')
except Exception as e:
logger.error(f'Request history recording Failed, error: {repr(e)}')
```
#### File: utils/Omega_plugin_utils/__init__.py
```python
from typing import Optional
from nonebot.plugin import Export
from nonebot.typing import T_State
from .rules import *
from .encrypt import AESEncryptStr
from .cooldown import *
from .permission import *
from .http_fetcher import HttpFetcher
from .picture_encoder import PicEncoder
from .zip_utils import create_zip_file, create_7z_file
def init_export(
plugin_export: Export,
custom_name: str,
usage: str,
auth_node: list = None,
cool_down: list = None,
**kwargs: str) -> Export:
setattr(plugin_export, 'custom_name', custom_name)
setattr(plugin_export, 'usage', usage)
setattr(plugin_export, 'auth_node', auth_node)
setattr(plugin_export, 'cool_down', cool_down)
for key, value in kwargs.items():
setattr(plugin_export, key, value)
return plugin_export
def init_permission_state(
name: str,
notice: Optional[bool] = None,
command: Optional[bool] = None,
level: Optional[int] = None,
auth_node: Optional[str] = None) -> T_State:
return {
'_matcher': name,
'_notice_permission': notice,
'_command_permission': command,
'_permission_level': level,
'_auth_node': auth_node
}
__all__ = [
'init_export',
'init_permission_state',
'has_notice_permission',
'has_command_permission',
'has_auth_node',
'has_level_or_node',
'permission_level',
'has_friend_private_permission',
'AESEncryptStr',
'PluginCoolDown',
'check_and_set_global_cool_down',
'check_and_set_plugin_cool_down',
'check_and_set_group_cool_down',
'check_and_set_user_cool_down',
'check_notice_permission',
'check_command_permission',
'check_permission_level',
'check_auth_node',
'check_friend_private_permission',
'HttpFetcher',
'PicEncoder',
'create_zip_file',
'create_7z_file'
]
```
#### File: utils/Omega_plugin_utils/permission.py
```python
from omega_miya.utils.Omega_Base import DBFriend, DBGroup, DBAuth
async def check_notice_permission(group_id: int) -> bool:
res = await DBGroup(group_id=group_id).permission_notice()
if res.result == 1:
return True
else:
return False
async def check_command_permission(group_id: int) -> bool:
res = await DBGroup(group_id=group_id).permission_command()
if res.result == 1:
return True
else:
return False
async def check_permission_level(group_id: int, level: int) -> bool:
res = await DBGroup(group_id=group_id).permission_level()
if res.result >= level:
return True
else:
return False
async def check_auth_node(auth_id: int, auth_type: str, auth_node: str) -> int:
auth = DBAuth(auth_id=auth_id, auth_type=auth_type, auth_node=auth_node)
tag_res = await auth.tags_info()
allow_tag = tag_res.result[0]
deny_tag = tag_res.result[1]
if allow_tag == 1 and deny_tag == 0:
return 1
elif allow_tag == -2 and deny_tag == -2:
return 0
else:
return -1
async def check_friend_private_permission(user_id: int) -> bool:
res = await DBFriend(user_id=user_id).get_private_permission()
if res.error:
return False
elif res.result == 1:
return True
else:
return False
__all__ = [
'check_notice_permission',
'check_command_permission',
'check_permission_level',
'check_auth_node',
'check_friend_private_permission'
]
```
#### File: utils/tencent_cloud_api/cloud_api.py
```python
import json
import hashlib
import hmac
import datetime
from dataclasses import dataclass
from typing import Dict, Any
import nonebot
from omega_miya.utils.Omega_plugin_utils import HttpFetcher
global_config = nonebot.get_driver().config
SECRET_ID = global_config.secret_id
SECRET_KEY = global_config.secret_key
class TencentCloudApi(object):
@dataclass
class ApiRes:
error: bool
info: str
result: dict
def success(self) -> bool:
if not self.error:
return True
else:
return False
def __init__(self,
secret_id: str,
secret_key: str,
host: str):
self.__secret_id = secret_id
self.__secret_key = secret_key
self.__host = host
self.__endpoint = f'https://{host}'
self.__service = host.split('.')[0]
self.__headers = {
"Content-Type": "application/json",
"Host": self.__host
}
self.__request_timestamp = int(datetime.datetime.now().timestamp())
self.__date = datetime.datetime.utcfromtimestamp(self.__request_timestamp).strftime('%Y-%m-%d')
self.__credential_scope = f'{self.__date}/{self.__service}/tc3_request'
sort_signed_headers = [f'{x}'.lower() for x in self.__headers.keys()]
sort_signed_headers.sort()
self.__signed_headers = ';'.join(sort_signed_headers)
def __upgrade_signed_header(self,
action: str,
region: str,
version: str,
payload: Dict[str, Any]):
self.__headers = {
'Authorization': self.__sign_v3(payload=payload),
'Content-Type': 'application/json',
'Host': self.__host,
'X-TC-Action': action,
'X-TC-Region': region,
'X-TC-Timestamp': str(self.__request_timestamp),
'X-TC-Version': version
}
def __canonical_request(self,
payload: Dict[str, Any],
http_request_method: str = 'POST',
canonical_uri: str = '/',
canonical_query_string: str = '') -> str:
sort_headers = [f'{x}:{y}\n'.lower() for x, y in self.__headers.items()]
sort_headers.sort()
canonical_headers = ''.join(sort_headers)
payload_str = json.dumps(payload)
hashed_request_payload = hashlib.sha256(payload_str.encode('utf-8')).hexdigest().lower()
canonical_request = f'{http_request_method}\n' \
f'{canonical_uri}\n' \
f'{canonical_query_string}\n' \
f'{canonical_headers}\n' \
f'{self.__signed_headers}\n' \
f'{hashed_request_payload}'
return canonical_request
def __string_to_sign(self,
canonical_request: str,
algorithm: str = 'TC3-HMAC-SHA256') -> str:
hashed_canonical_request = hashlib.sha256(canonical_request.encode('utf-8')).hexdigest().lower()
string_to_sign = f'{algorithm}\n' \
f'{self.__request_timestamp}\n' \
f'{self.__credential_scope}\n' \
f'{hashed_canonical_request}'
return string_to_sign
def __sign_v3(self, payload: Dict[str, Any]) -> str:
# 计算签名摘要函数
def __sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
secret_date = __sign(f'TC3{self.__secret_key}'.encode("utf-8"), self.__date)
secret_service = __sign(secret_date, self.__service)
secret_signing = __sign(secret_service, "tc3_request")
canonical_request = self.__canonical_request(payload=payload)
string_to_sign = self.__string_to_sign(canonical_request)
signature = hmac.new(secret_signing, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
authorization = f'TC3-HMAC-SHA256 Credential={self.__secret_id}/{self.__credential_scope}, ' \
f'SignedHeaders={self.__signed_headers}, ' \
f'Signature={signature}'
return authorization
async def post_request(self, action: str, region: str, version: str, payload: Dict[str, Any]) -> ApiRes:
self.__upgrade_signed_header(action=action, region=region, version=version, payload=payload)
fetcher = HttpFetcher(timeout=10, flag=f'tencent_cloud_api_{action}', headers=self.__headers)
result = await fetcher.post_json(url=self.__endpoint, json=payload)
return self.ApiRes(error=result.error, info=result.info, result=result.result)
__all__ = [
'TencentCloudApi',
'SECRET_ID',
'SECRET_KEY'
]
``` |
{
"source": "58563528/omega-miya",
"score": 2
} |
#### File: plugins/http_cat/data_source.py
```python
import os
import pathlib
from nonebot import get_driver
from omega_miya.utils.Omega_plugin_utils import HttpFetcher
from omega_miya.utils.Omega_Base import Result
global_config = get_driver().config
TMP_PATH = global_config.tmp_path_
API_URL = 'https://http.cat/'
async def get_http_cat(http_code: int) -> Result.TextResult:
file_name = f'{http_code}.jpg'
folder_path = os.path.abspath(os.path.join(TMP_PATH, 'http_cat'))
file_path = os.path.abspath(os.path.join(folder_path, file_name))
if os.path.exists(file_path):
file_url = pathlib.Path(file_path).as_uri()
return Result.TextResult(error=False, info='Success, file exists', result=file_url)
url = f'{API_URL}{http_code}.jpg'
fetcher = HttpFetcher(timeout=10, flag='http_cat')
result = await fetcher.download_file(url=url, path=folder_path, file_name=file_name)
if result.success():
file_url = pathlib.Path(result.result).as_uri()
return Result.TextResult(error=False, info='Success', result=file_url)
else:
return Result.TextResult(error=True, info=result.info, result='')
```
#### File: plugins/Omega_auto_manager/group_welcome_message.py
```python
from nonebot import logger
from nonebot.plugin import on_notice, CommandGroup
from nonebot.typing import T_State
from nonebot.rule import to_me
from nonebot.permission import SUPERUSER
from nonebot.adapters.cqhttp.permission import GROUP_ADMIN, GROUP_OWNER
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.message import Message, MessageSegment
from nonebot.adapters.cqhttp.event import GroupMessageEvent, GroupIncreaseNoticeEvent
from omega_miya.utils.Omega_Base import DBBot, DBBotGroup
from omega_miya.utils.Omega_plugin_utils import OmegaRules
SETTING_NAME: str = 'group_welcome_message'
DEFAULT_WELCOME_MSG: str = '欢迎新朋友~\n进群请先看群公告~\n一起愉快地聊天吧!'
# 注册事件响应器
WelcomeMsg = CommandGroup(
'WelcomeMsg',
rule=to_me(),
permission=SUPERUSER | GROUP_ADMIN | GROUP_OWNER,
priority=10,
block=True
)
welcome_msg_set = WelcomeMsg.command('set', aliases={'设置欢迎消息'})
welcome_msg_clear = WelcomeMsg.command('clear', aliases={'清空欢迎消息'})
# 修改默认参数处理
@welcome_msg_set.args_parser
async def parse(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_message()).strip()
if not args:
await welcome_msg_set.reject('你似乎没有发送有效的参数呢QAQ, 请重新发送:')
state[state["_current_key"]] = args
if state[state["_current_key"]] == '取消':
await welcome_msg_set.finish('操作已取消')
@welcome_msg_set.got('welcome_msg', prompt='请发送你要设置的欢迎消息:')
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
welcome_msg = state['welcome_msg']
group_id = event.group_id
self_bot = DBBot(self_qq=int(bot.self_id))
group = DBBotGroup(group_id=group_id, self_bot=self_bot)
msg_set_result = await group.setting_set(setting_name=SETTING_NAME, main_config='Custom',
extra_config=welcome_msg, setting_info='自定义群组欢迎信息')
if msg_set_result.success():
logger.info(f'已为群组: {group_id} 设置自定义欢迎信息: {welcome_msg}')
await welcome_msg_set.finish(f'已为本群组设定了自定义欢迎信息!')
else:
logger.error(f'为群组: {group_id} 设置自定义欢迎信息失败, error info: {msg_set_result.info}')
await welcome_msg_set.finish(f'为本群组设定自定义欢迎信息失败了QAQ, 请稍后再试或联系管理员处理')
@welcome_msg_clear.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
group_id = event.group_id
self_bot = DBBot(self_qq=int(bot.self_id))
group = DBBotGroup(group_id=group_id, self_bot=self_bot)
msg_set_result = await group.setting_del(setting_name=SETTING_NAME)
if msg_set_result.success():
logger.info(f'已为群组: {group_id} 清除自定义欢迎信息')
await welcome_msg_clear.finish(f'已清除了本群组设定的自定义欢迎信息!')
else:
logger.error(f'为群组: {group_id} 清除自定义欢迎信息失败, error info: {msg_set_result.info}')
await welcome_msg_clear.finish(f'为本群组清除自定义欢迎信息失败了QAQ, 请稍后再试或联系管理员处理')
# 注册事件响应器, 新增群成员
group_increase = on_notice(priority=100, rule=OmegaRules.has_group_command_permission())
@group_increase.handle()
async def handle_group_increase(bot: Bot, event: GroupIncreaseNoticeEvent, state: T_State):
user_id = event.user_id
group_id = event.group_id
self_bot = DBBot(self_qq=int(bot.self_id))
group = DBBotGroup(group_id=group_id, self_bot=self_bot)
# 获取自定义欢迎信息
welcome_msg_result = await group.setting_get(setting_name=SETTING_NAME)
if welcome_msg_result.success():
main, second, extra = welcome_msg_result.result
if extra:
msg = extra
else:
msg = DEFAULT_WELCOME_MSG
else:
msg = DEFAULT_WELCOME_MSG
# 发送欢迎消息
at_seg = MessageSegment.at(user_id=user_id)
await bot.send(event=event, message=Message(at_seg).append(msg))
logger.info(f'群组: {group_id}, 有新用户: {user_id} 进群')
__all__ = [
'WelcomeMsg',
'group_increase'
]
```
#### File: plugins/Omega_email/imap.py
```python
import imaplib
import email
import hashlib
from email.header import Header
from typing import List
class Email(object):
def __init__(self, date: str, header: str, sender: str, to: str, body: str = '', html: str = ''):
self.date = date
self.header = header
self.sender = sender
self.to = to
self.body = body
self.html = html
hash_str = str([date, header, sender, to])
md5 = hashlib.md5()
md5.update(hash_str.encode('utf-8'))
_hash = md5.hexdigest()
self.hash = _hash
def __repr__(self):
return f'<Email(header={self.header}, _from={self.sender}, to={self.to}' \
f"\n\nbody={self.body}\n\nhtml={self.html})>"
class EmailImap(object):
def __init__(self, host: str, address: str, password: str, port: int = 993):
self.__mail = imaplib.IMAP4_SSL(host=host, port=port)
self.__address = address
self.__password = password
def __enter__(self):
"""enter方法,返回file_handler"""
self.__mail.login(self.__address, self.__password)
return self.__mail
def __exit__(self, exc_type, exc_val, exc_tb):
"""exit方法,关闭文件并返回True"""
self.__mail.select()
if self.__mail.state == 'SELECTED':
self.__mail.close()
self.__mail.logout()
return True
def get_mail_info(self, charset, *criteria) -> List[Email]:
self.__mail.login(self.__address, self.__password)
if self.__address.endswith('@163.com'):
# 添加163邮箱 IMAP ID 验证
imaplib.Commands['ID'] = ('AUTH',)
args = ("name", "omega", "contact", "<EMAIL>", "version", "1.0.2", "vendor", "pyimaplibclient")
typ, dat = self.__mail._simple_command('ID', '("' + '" "'.join(args) + '")')
self.__mail._untagged_response(typ, dat, 'ID')
self.__mail.select()
typ, msg_nums = self.__mail.search(charset, *criteria)
msg_nums = str(msg_nums[0], encoding='utf8')
result_list = []
# 遍历所有邮件
for num in msg_nums.split(' '):
if num == '':
continue
stat_code, data = self.__mail.fetch(num, 'RFC822')
msg = email.message_from_bytes(data[0][1])
# 解析邮件
# 日期
date = email.header.decode_header(msg.get('Date'))[0][0]
date = str(date)
# 标题
header, charset = email.header.decode_header(msg.get('subject'))[0]
header = str(header, encoding=charset)
# 发件人
sender_info = email.header.decode_header(msg.get('from'))
sender = ''
for sender_text, charset in sender_info:
if charset and type(sender_text) == bytes:
sender_text = str(sender_text, encoding=charset)
sender += sender_text
elif type(sender_text) == bytes:
sender_text = str(sender_text, encoding='utf8')
sender += sender_text
else:
sender += sender_text
# 收件人
receiver_info = email.header.decode_header(msg.get('to'))
receiver = ''
for receiver_text, charset in receiver_info:
if charset and type(receiver_text) == bytes:
receiver_text = str(receiver_text, encoding=charset)
receiver += receiver_text
elif type(receiver_text) == bytes:
receiver_text = str(receiver_text, encoding='utf8')
receiver += receiver_text
else:
receiver += receiver_text
body = None
html = None
for part in msg.walk():
if part.get_content_type() == "text/plain":
charset = part.get_content_charset()
body = part.get_payload(decode=True)
if not body:
continue
if charset and type(body) == bytes:
body = str(body, encoding=charset)
elif type(body) == bytes:
body = str(body, encoding='utf8')
else:
body = str(body)
body = body.replace(r' ', '\n')
elif part.get_content_type() == "text/html":
charset = part.get_content_charset()
html = part.get_payload(decode=True)
if not html:
continue
if charset and type(html) == bytes:
html = str(html, encoding=charset)
elif type(html) == bytes:
html = str(html, encoding='utf8')
else:
html = str(html)
html = html.replace(' ', '')
else:
pass
result_list.append(Email(date=date, header=header, sender=sender, to=receiver, body=body, html=html))
return result_list
```
#### File: plugins/Omega_sign_in/__init__.py
```python
from nonebot import CommandGroup, logger, export, get_driver
from nonebot.typing import T_State
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import GroupMessageEvent
from nonebot.adapters.cqhttp.permission import GROUP
from omega_miya.utils.Omega_plugin_utils import init_export, init_permission_state
from omega_miya.utils.Omega_Base import DBUser
from .config import Config
__global_config = get_driver().config
plugin_config = Config(**__global_config.dict())
FAVORABILITY_ALIAS = plugin_config.favorability_alias
ENERGY_ALIAS = plugin_config.energy_alias
CURRENCY_ALIAS = plugin_config.currency_alias
class SignInException(Exception):
pass
class DuplicateException(SignInException):
pass
class FailedException(SignInException):
pass
# Custom plugin usage text
__plugin_name__ = '签到'
__plugin_usage__ = r'''【Omega 签到插件】
轻量化签到插件
好感度系统基础支持
仅限群聊使用
**Permission**
Command & Lv.10
or AuthNode
**AuthNode**
basic
**Usage**
/签到'''
# 声明本插件可配置的权限节点
__plugin_auth_node__ = [
'basic'
]
# Init plugin export
init_export(export(), __plugin_name__, __plugin_usage__, __plugin_auth_node__)
SignIn = CommandGroup(
'SignIn',
# 使用run_preprocessor拦截权限管理, 在default_state初始化所需权限
state=init_permission_state(
name='sign_in',
command=True,
level=10,
auth_node='basic'),
permission=GROUP,
priority=10,
block=True)
sign_in = SignIn.command('sign_in', aliases={'签到'})
@sign_in.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
user = DBUser(user_id=event.user_id)
try:
# 尝试签到
sign_in_result = await user.sign_in()
if sign_in_result.error:
raise FailedException(f'签到失败, {sign_in_result.info}')
elif sign_in_result.result == 1:
raise DuplicateException('重复签到')
# 查询连续签到时间
sign_in_c_d_result = await user.sign_in_continuous_days()
if sign_in_c_d_result.error:
raise FailedException(f'查询连续签到时间失败, {sign_in_c_d_result.info}')
continuous_days = sign_in_c_d_result.result
# 尝试为用户增加好感度
if continuous_days < 7:
favorability_inc = 10
currency_inc = 1
elif continuous_days < 30:
favorability_inc = 30
currency_inc = 2
else:
favorability_inc = 50
currency_inc = 5
favorability_result = await user.favorability_add(favorability=favorability_inc, currency=currency_inc)
if favorability_result.error and favorability_result.info == 'NoResultFound':
favorability_result = await user.favorability_reset(favorability=favorability_inc, currency=currency_inc)
if favorability_result.error:
raise FailedException(f'增加好感度失败, {favorability_result.info}')
# 获取当前好感度信息
favorability_status_result = await user.favorability_status()
if favorability_status_result.error:
raise FailedException(f'获取好感度信息失败, {favorability_status_result}')
status, mood, favorability, energy, currency, response_threshold = favorability_status_result.result
msg = f'签到成功! {FAVORABILITY_ALIAS}+{favorability_inc}, {CURRENCY_ALIAS}+{currency_inc}!\n\n' \
f'你已连续签到{continuous_days}天\n' \
f'当前{FAVORABILITY_ALIAS}: {favorability}\n' \
f'当前{CURRENCY_ALIAS}: {currency}'
logger.info(f'{event.group_id}/{event.user_id} 签到成功')
await sign_in.finish(msg)
except DuplicateException as e:
logger.info(f'{event.group_id}/{event.user_id} 重复签到, {str(e)}')
await sign_in.finish('你今天已经签到过了, 请明天再来吧~')
except FailedException as e:
logger.error(f'{event.group_id}/{event.user_id} 签到失败, {str(e)}')
await sign_in.finish('签到失败了QAQ, 请稍后再试或联系管理员处理')
```
#### File: plugins/repeater/__init__.py
```python
from typing import Dict
from nonebot import on_message
from nonebot.typing import T_State
from nonebot.exception import FinishedException
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import GroupMessageEvent
from nonebot.adapters.cqhttp.permission import GROUP
from omega_miya.utils.Omega_plugin_utils import OmegaRules
from .data_source import REPLY_RULES
LAST_MSG: Dict[int, str] = {}
LAST_REPEAT_MSG: Dict[int, str] = {}
REPEAT_COUNT: Dict[int, int] = {}
repeater = on_message(rule=OmegaRules.has_group_command_permission(), permission=GROUP, priority=100, block=False)
@repeater.handle()
async def handle_ignore_msg(bot: Bot, event: GroupMessageEvent, state: T_State):
msg = event.raw_message
if msg.startswith('/'):
raise FinishedException
elif msg.startswith('!SU'):
raise FinishedException
@repeater.handle()
async def handle_auto_reply(bot: Bot, event: GroupMessageEvent, state: T_State):
# 处理回复规则
msg = event.raw_message
group_id = event.group_id
check_res, reply_msg = REPLY_RULES.check_rule(group_id=group_id, message=msg)
if check_res:
await repeater.finish(reply_msg)
@repeater.handle()
async def handle_repeater(bot: Bot, event: GroupMessageEvent, state: T_State):
# 处理复读姬
global LAST_MSG, LAST_REPEAT_MSG, REPEAT_COUNT
group_id = event.group_id
try:
LAST_MSG[group_id]
except KeyError:
LAST_MSG[group_id] = ''
try:
LAST_REPEAT_MSG[group_id]
except KeyError:
LAST_REPEAT_MSG[group_id] = ''
message = event.get_message()
raw_msg = event.raw_message
# 如果当前消息与上一条消息不同, 或者与上一次复读的消息相同, 则重置复读计数, 并更新上一条消息LAST_MSG
if raw_msg != LAST_MSG[group_id] or raw_msg == LAST_REPEAT_MSG[group_id]:
LAST_MSG[group_id] = raw_msg
REPEAT_COUNT[group_id] = 0
return
# 否则这条消息和上条消息一致, 开始复读计数
else:
REPEAT_COUNT[group_id] += 1
LAST_REPEAT_MSG[group_id] = ''
# 当复读计数等于2时说明已经有连续三条同样的消息了, 此时触发复读, 更新上次服务消息LAST_REPEAT_MSG, 并重置复读计数
if REPEAT_COUNT[group_id] >= 2:
await repeater.send(message)
REPEAT_COUNT[group_id] = 0
LAST_MSG[group_id] = ''
LAST_REPEAT_MSG[group_id] = raw_msg
```
#### File: plugins/schedule_message/__init__.py
```python
import nonebot
import re
from datetime import datetime
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from nonebot import MatcherGroup, export, logger, require
from nonebot.typing import T_State
from nonebot.permission import SUPERUSER
from nonebot.adapters.cqhttp.permission import GROUP_ADMIN, GROUP_OWNER
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.message import Message
from nonebot.adapters.cqhttp.event import GroupMessageEvent
from omega_miya.utils.Omega_Base import DBBot, DBBotGroup, Result
from omega_miya.utils.Omega_plugin_utils import init_export, init_permission_state
# Custom plugin usage text
__plugin_name__ = '定时消息'
__plugin_usage__ = r'''【定时消息】
设置群组定时通知消息
仅限群聊使用
**Permission**
Command & Lv.10
or AuthNode
**AuthNode**
basic
**Usage**
**GroupAdmin and SuperUser Only**
/设置定时消息
/查看定时消息
/删除定时消息'''
# 声明本插件可配置的权限节点
__plugin_auth_node__ = [
'basic'
]
# Init plugin export
init_export(export(), __plugin_name__, __plugin_usage__, __plugin_auth_node__)
driver = nonebot.get_driver()
scheduler: AsyncIOScheduler = require("nonebot_plugin_apscheduler").scheduler
# 注册事件响应器
ScheduleMsg = MatcherGroup(
type='message',
# 使用run_preprocessor拦截权限管理, 在default_state初始化所需权限
state=init_permission_state(
name='schedule_message',
command=True,
level=10,
auth_node='basic'),
permission=GROUP_ADMIN | GROUP_OWNER | SUPERUSER,
priority=10,
block=True)
set_schedule_message = ScheduleMsg.on_command('设置定时消息', aliases={'添加定时消息'})
list_schedule_message = ScheduleMsg.on_command('查看定时消息')
del_schedule_message = ScheduleMsg.on_command('删除定时消息', aliases={'移除定时消息'})
# 设置定时消息部分
# 修改默认参数处理
@set_schedule_message.args_parser
async def parse(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_message()).strip()
if not args:
await set_schedule_message.reject('你似乎没有发送有效的参数呢QAQ, 请重新发送:')
state[state["_current_key"]] = args
if state[state["_current_key"]] == '取消':
await set_schedule_message.finish('操作已取消')
@set_schedule_message.got('mode', prompt='请发送设置定时消息的模式:\n【 cron / interval 】\n\n模式说明:\n'
'cron(闹钟) - 每天某个具体时间发送消息\n'
'interval(定时器) - 每间隔一定时间发送消息')
async def handle_mode(bot: Bot, event: GroupMessageEvent, state: T_State):
mode = state['mode']
if mode not in ['cron', 'interval']:
await set_schedule_message.finish('您发送的不是有效的模式QAQ')
if mode == 'interval':
state['repeat'] = 'all'
@set_schedule_message.got('name', prompt='请发送为当前定时任务设置的名称:')
async def handle_time(bot: Bot, event: GroupMessageEvent, state: T_State):
_name = state['name']
if len(_name) > 100:
await set_schedule_message.finish('设置的名称过长QAQ')
@set_schedule_message.got('time', prompt='请发送你要设置定时时间, 时间格式为24小时制四位数字:\n\n设置说明:\n'
'若模式为cron(闹钟), 则“1830”代表每天下午六点半发送定时消息\n'
'若模式为interval(定时器), 则“0025”代表每隔25分钟发送定时消息')
async def handle_time(bot: Bot, event: GroupMessageEvent, state: T_State):
time = state['time']
mode = state['mode']
try:
_time = datetime.strptime(time, '%H%M')
_hour = _time.hour
_minute = _time.minute
except ValueError:
await set_schedule_message.finish('输入的时间格式错误QAQ, 应该为24小时制四位数字')
return
if mode == 'interval' and _hour == 0 and _minute == 0:
await set_schedule_message.finish('输入的时间格式错误QAQ, interval模式不允许时间为0000')
return
state['hour'] = _hour
state['minute'] = _minute
@set_schedule_message.got('repeat', prompt='是否按星期重复?\n\n若只想在一周的某一天执行请以下日期中选择:\n'
'【mon/tue/wed/thu/fri/sat/sun】\n\n'
'若想每一天都执行请输入:\n【all】')
async def handle_time(bot: Bot, event: GroupMessageEvent, state: T_State):
repeat = state['repeat']
if repeat not in ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun', 'all']:
await set_schedule_message.finish('输入的日期格式错误QAQ, 请在【mon/tue/wed/thu/fri/sat/sun/all】中选择输入')
@set_schedule_message.got('message', prompt='请发送你要设置的消息内容:')
async def handle_message(bot: Bot, event: GroupMessageEvent, state: T_State):
message = state['message']
name = state['name']
mode = state['mode']
hour = state['hour']
minute = state['minute']
repeat = state['repeat']
group_id = event.group_id
self_bot = DBBot(self_qq=int(bot.self_id))
group = DBBotGroup(group_id=group_id, self_bot=self_bot)
try:
await add_scheduler(
group=group, schedule_name=name, mode=mode, hour=hour, minute=minute, repeat=repeat, message=message)
except Exception as e:
logger.error(f'为群组: {group_id} 设置群组定时消息失败任务, 添加计划任务时发生错误: {repr(e)}')
await set_schedule_message.finish(f'为本群组设定群组定时消息失败了QAQ, 请稍后再试或联系管理员处理')
msg_set_result = await add_db_group_schedule_message(
group=group, schedule_name=name, mode=mode, hour=hour, minute=minute, repeat=repeat, message=message)
if msg_set_result.success():
logger.info(f'已为群组: {group_id} 设置群组定时消息: {name}{mode}/{hour}:{minute}')
await set_schedule_message.finish(f'已为本群组设定了群组定时消息:\n{name}/{mode}/{repeat}:{hour}:{minute}')
else:
logger.error(f'为群组: {group_id} 设置群组定时消息失败, error info: {msg_set_result.info}')
await set_schedule_message.finish(f'为本群组设定了群组定时消息失败了QAQ, 请稍后再试或联系管理员处理')
# 查看定时消息部分
@list_schedule_message.handle()
async def handle(bot: Bot, event: GroupMessageEvent, state: T_State):
self_bot = DBBot(self_qq=int(bot.self_id))
group = DBBotGroup(group_id=event.group_id, self_bot=self_bot)
schedule_result = await list_db_group_schedule_message(group=group)
if schedule_result.error:
logger.error(f'Get group {event.group_id} message schedule list failed: {schedule_result.info}')
await list_schedule_message.finish(f'获取群定时消息失败了QAQ, 请稍后再试或联系管理员处理')
msg = f'本群已设置的定时消息任务:\n{"="*12}'
for _name, _mode, _time, _message in schedule_result.result:
_name = re.sub(r'^ScheduleMsg_', '', str(_name))
msg += f'\n【{_name}】 - {_mode}({_time})'
await list_schedule_message.finish(msg)
# 删除定时消息部分
# 修改默认参数处理
@del_schedule_message.args_parser
async def parse(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_message()).strip()
if not args:
await del_schedule_message.reject('你似乎没有发送有效的参数呢QAQ, 请重新发送:')
state[state["_current_key"]] = args
if state[state["_current_key"]] == '取消':
await del_schedule_message.finish('操作已取消')
@del_schedule_message.handle()
async def handle_jobs(bot: Bot, event: GroupMessageEvent, state: T_State):
self_bot = DBBot(self_qq=int(bot.self_id))
group = DBBotGroup(group_id=event.group_id, self_bot=self_bot)
schedule_result = await list_db_group_schedule_message(group=group)
if schedule_result.error:
logger.error(f'Get group {event.group_id} message schedule list failed: {schedule_result.info}')
await list_schedule_message.finish(f'获取群定时消息列表失败了QAQ, 请稍后再试或联系管理员处理')
msg = f'本群已设置的定时消息任务有:\n{"="*12}'
for _name, _mode, _time, _message in schedule_result.result:
_name = re.sub(r'^ScheduleMsg_', '', str(_name))
msg += f'\n【{_name}】 - {_mode}({_time})'
await list_schedule_message.send(msg)
@del_schedule_message.got('name', prompt='请发送将要移除的定时任务的名称:')
async def handle_remove(bot: Bot, event: GroupMessageEvent, state: T_State):
name = state['name']
group_id = event.group_id
self_bot = DBBot(self_qq=int(bot.self_id))
group = DBBotGroup(group_id=group_id, self_bot=self_bot)
try:
await remove_scheduler(group=group, schedule_name=name)
except Exception as e:
logger.error(f'移除群组: {group_id} 定时消息失败, 移除计划任务时发生错误: {repr(e)}')
await del_schedule_message.finish(f'移除群组定时消息失败了QAQ, 请稍后再试或联系管理员处理')
msg_del_result = await del_db_group_schedule_message(group=group, schedule_name=name)
if msg_del_result.success():
logger.info(f'已移除群组: {group_id} 群组定时消息: {name}')
await del_schedule_message.finish(f'已移除群组定时消息: {name}')
else:
logger.error(f'移除群组: {group_id} 群组定时消息失败, error info: {msg_del_result.info}')
await del_schedule_message.finish(f'移除群组定时消息失败了QAQ, 请稍后再试或联系管理员处理')
async def add_db_group_schedule_message(
group: DBBotGroup,
schedule_name: str,
mode: str,
hour: int,
minute: int,
repeat: str,
message: str) -> Result.IntResult:
# 初始化计划任务设置ID
_schedule_setting_id = f'ScheduleMsg_{schedule_name}'
schedule_set_result = await group.setting_set(setting_name=_schedule_setting_id, main_config=mode,
secondary_config=f'{repeat}:{hour}:{minute}',
extra_config=message, setting_info='群组定时消息')
return schedule_set_result
async def list_db_group_schedule_message(group: DBBotGroup) -> Result.ListResult:
exist_setting = await group.setting_list()
if exist_setting.error:
return Result.ListResult(error=True, info=f'Get config wrong: {exist_setting.info}', result=[])
else:
result = [x for x in exist_setting.result if str(x[0]).startswith('ScheduleMsg_')]
return Result.ListResult(error=False, info=f'Success', result=result)
async def del_db_group_schedule_message(group: DBBotGroup, schedule_name: str) -> Result.IntResult:
_schedule_setting_id = f'ScheduleMsg_{schedule_name}'
result = await group.setting_del(setting_name=_schedule_setting_id)
return result
async def add_scheduler(
group: DBBotGroup,
schedule_name: str,
mode: str,
hour: int,
minute: int,
repeat: str,
message: str):
global scheduler
_schedule_setting_id = f'ScheduleMsg_{group.self_bot.self_qq}_{schedule_name}'
self_bot: Bot = nonebot.get_bots().get(str(group.self_bot.self_qq), None)
if not self_bot:
raise ValueError('Can not get Bot')
async def _scheduler_handle():
await self_bot.send_group_msg(group_id=group.group_id, message=Message(f'【定时消息】\n{"="*12}\n{message}'))
if mode == 'cron':
if repeat == 'all':
scheduler.add_job(
_scheduler_handle,
'cron',
hour=hour,
minute=minute,
id=_schedule_setting_id,
coalesce=True,
misfire_grace_time=10
)
else:
scheduler.add_job(
_scheduler_handle,
'cron',
day_of_week=repeat,
hour=hour,
minute=minute,
id=_schedule_setting_id,
coalesce=True,
misfire_grace_time=10
)
elif mode == 'interval':
if hour == 0 and minute != 0:
scheduler.add_job(
_scheduler_handle,
'interval',
minutes=minute,
id=_schedule_setting_id,
coalesce=True,
misfire_grace_time=10
)
elif minute == 0:
scheduler.add_job(
_scheduler_handle,
'interval',
hours=hour,
id=_schedule_setting_id,
coalesce=True,
misfire_grace_time=10
)
else:
scheduler.add_job(
_scheduler_handle,
'interval',
hours=hour,
minutes=minute,
id=_schedule_setting_id,
coalesce=True,
misfire_grace_time=10
)
else:
raise ValueError(f'Unknown mode {mode}')
async def remove_scheduler(group: DBBotGroup, schedule_name: str):
global scheduler
_schedule_setting_id = f'ScheduleMsg_{group.self_bot.self_qq}_{schedule_name}'
scheduler.remove_job(_schedule_setting_id)
# Bot 连接时初始化其消息任务
@driver.on_bot_connect
async def init_bot_message_schedule(bot: Bot):
self_bot = DBBot(self_qq=int(bot.self_id))
group_list_result = await DBBotGroup.list_exist_bot_groups(self_bot=self_bot)
if group_list_result.error:
logger.error(f'Init bot message schedule failed, get bot group list failed: {group_list_result.info}')
for group in group_list_result.result:
_bot_group = DBBotGroup(group_id=group, self_bot=self_bot)
schedule_result = await list_db_group_schedule_message(group=_bot_group)
if schedule_result.error:
logger.error(f'Error occurred in init bot message schedule, '
f'get group {_bot_group.group_id} message schedule list failed: {schedule_result.info}')
continue
for _name, _mode, _time, _message in schedule_result.result:
_name = re.sub(r'^ScheduleMsg_', '', str(_name))
_repeat, _hour, _minute = [x for x in str(_time).split(':', maxsplit=3)]
_hour = int(_hour)
_minute = int(_minute)
try:
await add_scheduler(group=_bot_group, schedule_name=_name,
mode=_mode, hour=_hour, minute=_minute, repeat=_repeat, message=_message)
except Exception as e:
logger.error(f'Init bot message schedule failed, '
f'为群组: {_bot_group.group_id} 添加群组定时消息任务失败, 添加计划任务时发生错误: {repr(e)}')
continue
# Bot 断开连接时移除其消息任务
@driver.on_bot_disconnect
async def remove_bot_message_schedule(bot: Bot):
self_bot = DBBot(self_qq=int(bot.self_id))
group_list_result = await DBBotGroup.list_exist_bot_groups(self_bot=self_bot)
if group_list_result.error:
logger.error(f'Remove bot message schedule failed, get bot group list failed: {group_list_result.info}')
for group in group_list_result.result:
_bot_group = DBBotGroup(group_id=group, self_bot=self_bot)
schedule_result = await list_db_group_schedule_message(group=_bot_group)
if schedule_result.error:
logger.error(f'Error occurred in remove bot message schedule, '
f'get group {_bot_group.group_id} message schedule list failed: {schedule_result.info}')
continue
for _name, _mode, _time, _message in schedule_result.result:
_repeat, _hour, _minute = [x for x in str(_time).split(':', maxsplit=3)]
_hour = int(_hour)
_minute = int(_minute)
try:
await remove_scheduler(group=_bot_group, schedule_name=_name)
except Exception as e:
logger.error(f'Remove bot message schedule failed, '
f'移除群组: {_bot_group.group_id} 定时消息任务失败, 移除计划任务时发生错误: {repr(e)}')
continue
```
#### File: plugins/search_image/__init__.py
```python
import random
import asyncio
from nonebot import on_command, export, logger, get_driver
from nonebot.typing import T_State
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import MessageEvent, GroupMessageEvent, PrivateMessageEvent
from nonebot.adapters.cqhttp.permission import GROUP, PRIVATE_FRIEND
from nonebot.adapters.cqhttp import MessageSegment, Message
from omega_miya.utils.Omega_Base import DBBot
from omega_miya.utils.Omega_plugin_utils import init_export, init_permission_state, PicEncoder, PermissionChecker
from omega_miya.utils.pixiv_utils import PixivIllust
from .utils import SEARCH_ENGINE, HEADERS
from .config import Config
__global_config = get_driver().config
plugin_config = Config(**__global_config.dict())
ENABLE_SAUCENAO = plugin_config.enable_saucenao
ENABLE_IQDB = plugin_config.enable_iqdb
ENABLE_ASCII2D = plugin_config.enable_ascii2d
# Custom plugin usage text
__plugin_name__ = '识图'
__plugin_usage__ = r'''【识图助手】
使用SauceNAO/ascii2d识别各类图片、插画
群组/私聊可用
**Permission**
Friend Private
Command & Lv.50
or AuthNode
**AuthNode**
basic
**Usage**
/识图
**Hidden Command**
/再来点'''
# 声明本插件可配置的权限节点
__plugin_auth_node__ = [
'basic',
'recommend_image',
'allow_recommend_r18'
]
# Init plugin export
init_export(export(), __plugin_name__, __plugin_usage__, __plugin_auth_node__)
# 注册事件响应器
search_image = on_command(
'识图',
aliases={'搜图'},
# 使用run_preprocessor拦截权限管理, 在default_state初始化所需权限
state=init_permission_state(
name='search_image',
command=True,
level=50,
auth_node='basic'),
permission=GROUP | PRIVATE_FRIEND,
priority=20,
block=True)
# 修改默认参数处理
@search_image.args_parser
async def parse(bot: Bot, event: MessageEvent, state: T_State):
args = str(event.get_message()).strip().split()
if not args:
await search_image.reject('你似乎没有发送有效的消息呢QAQ, 请重新发送:')
if state["_current_key"] == 'using_engine':
if args[0] == '是':
return
else:
await search_image.finish('操作已取消')
state[state["_current_key"]] = args[0]
if state[state["_current_key"]] == '取消':
await search_image.finish('操作已取消')
for msg_seg in event.message:
if msg_seg.type == 'image':
state[state["_current_key"]] = msg_seg.data.get('url')
return
@search_image.handle()
async def handle_first_receive(bot: Bot, event: MessageEvent, state: T_State):
# 识图引擎开关
usable_engine = []
if ENABLE_SAUCENAO:
usable_engine.append('saucenao')
if ENABLE_IQDB:
usable_engine.append('iqdb')
if ENABLE_ASCII2D:
usable_engine.append('ascii2d')
state['using_engine'] = usable_engine.pop(0) if usable_engine else None
state['usable_engine'] = usable_engine
# 提取图片链接, 默认只取消息中的第一张图
img_url = None
if event.reply:
for msg_seg in event.reply.message:
if msg_seg.type == 'image':
img_url = msg_seg.data.get('url')
break
else:
for msg_seg in event.message:
if msg_seg.type == 'image':
img_url = msg_seg.data.get('url')
break
if img_url:
state['image_url'] = img_url
return
args = str(event.get_plaintext()).strip().lower().split()
if args:
await search_image.finish('你发送的好像不是图片呢QAQ')
@search_image.got('image_url', prompt='请发送你想要识别的图片:')
async def handle_got_image(bot: Bot, event: MessageEvent, state: T_State):
image_url = state['image_url']
if not str(image_url).startswith('http'):
await search_image.finish('错误QAQ,你发送的不是有效的图片')
await search_image.send('获取识别结果中, 请稍后~')
@search_image.got('using_engine', prompt='使用识图引擎识图:')
async def handle_saucenao(bot: Bot, event: MessageEvent, state: T_State):
image_url = state['image_url']
using_engine = state['using_engine']
usable_engine = list(state['usable_engine'])
# 获取识图结果
search_engine = SEARCH_ENGINE.get(using_engine, None)
if using_engine and search_engine:
identify_result = await search_engine(image_url)
if identify_result.success() and identify_result.result:
# 有结果了, 继续执行接下来的结果解析handler
pass
else:
# 没有结果
if identify_result.error:
logger.warning(f'{using_engine}引擎获取识别结果失败: {identify_result.info}')
if usable_engine:
# 还有可用的识图引擎
next_using_engine = usable_engine.pop(0)
msg = f'{using_engine}引擎没有找到相似度足够高的图片,是否继续使用{next_using_engine}引擎识别图片?\n\n【是/否】'
state['using_engine'] = next_using_engine
state['usable_engine'] = usable_engine
await search_image.reject(msg)
else:
# 没有可用的识图引擎了
logger.info(f'{event.user_id} 使用了searchimage所有的识图引擎, 但没有找到相似的图片')
await search_image.finish('没有找到相似度足够高的图片QAQ')
else:
logger.error(f'获取识图引擎异常, using_engine: {using_engine}')
await search_image.finish('发生了意外的错误QAQ, 请稍后再试或联系管理员')
return
state['identify_result'] = identify_result.result
@search_image.handle()
async def handle_result(bot: Bot, event: MessageEvent, state: T_State):
if isinstance(event, GroupMessageEvent):
group_id = event.group_id
else:
group_id = 'Private event'
identify_result = state['identify_result']
try:
if identify_result:
for item in identify_result:
try:
if isinstance(item['ext_urls'], list):
ext_urls = '\n'.join(item['ext_urls'])
else:
ext_urls = item['ext_urls'].strip()
img_result = await PicEncoder(
pic_url=item['thumbnail'], headers=HEADERS).get_file(folder_flag='search_image')
if img_result.error:
msg = f"识别结果: {item['index_name']}\n\n相似度: {item['similarity']}\n资源链接: {ext_urls}"
await search_image.send(msg)
else:
img_seg = MessageSegment.image(img_result.result)
msg = f"识别结果: {item['index_name']}\n\n相似度: {item['similarity']}\n资源链接: {ext_urls}\n{img_seg}"
await search_image.send(Message(msg))
except Exception as e:
logger.warning(f'处理和发送识别结果时发生了错误: {repr(e)}')
continue
logger.info(f"{group_id} / {event.user_id} 使用searchimage成功搜索了一张图片")
return
else:
await search_image.send('没有找到相似度足够高的图片QAQ')
logger.info(f"{group_id} / {event.user_id} 使用了searchimage, 但没有找到相似的图片")
return
except Exception as e:
await search_image.send('识图失败, 发生了意外的错误QAQ, 请稍后重试')
logger.error(f"{group_id} / {event.user_id} 使用命令searchimage时发生了错误: {repr(e)}")
return
# 注册事件响应器
recommend_image = on_command( # 使用 pixiv api 的相关作品推荐功能查找相似作品
'再来点',
aliases={'多来点', '相似作品', '类似作品'},
# 使用run_preprocessor拦截权限管理, 在default_state初始化所需权限
state=init_permission_state(
name='search_image_recommend_image',
command=True,
auth_node='recommend_image'),
permission=GROUP | PRIVATE_FRIEND,
priority=20,
block=True)
@recommend_image.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
# 从回复消息中捕获待匹配的图片信息
# 只返回匹配到的第一个符合要求的链接或图片
if event.reply:
# 首先筛查链接
for msg_seg in event.reply.message:
if msg_seg.type == 'text':
text = msg_seg.data.get('text')
if pid := PixivIllust.parse_pid_from_url(text=text):
state['pid'] = pid
logger.debug(f"Recommend image | 已从消息段文本匹配到 pixiv url, pid: {pid}")
return
# 若消息被分片可能导致链接被拆分
raw_text = event.reply.dict().get('raw_message')
if pid := PixivIllust.parse_pid_from_url(text=raw_text):
state['pid'] = pid
logger.debug(f"Recommend image | 已从消息 raw 文本匹配到 pixiv url, pid: {pid}")
return
# 没有发现则开始对图片进行识别, 为保证准确性只使用 saucenao api
for msg_seg in event.reply.message:
if msg_seg.type == 'image':
img_url = msg_seg.data.get('url')
saucenao_search_engine = SEARCH_ENGINE.get('saucenao')
identify_result = await saucenao_search_engine(img_url)
# 从识别结果中匹配图片
for url_list in [x.get('ext_urls') for x in identify_result.result]:
for url in url_list:
if pid := PixivIllust.parse_pid_from_url(text=url):
state['pid'] = pid
logger.debug(f"Recommend image | 已从识别图片匹配到 pixiv url, pid: {pid}")
return
else:
logger.debug(f'Recommend image | 命令没有引用消息, 操作已取消')
await recommend_image.finish('没有引用需要查找的图片QAQ, 请使用本命令时直接回复相关消息')
@recommend_image.handle()
async def handle_illust_recommend(bot: Bot, event: GroupMessageEvent, state: T_State):
pid = state.get('pid')
if not pid:
logger.debug(f'Recommend image | 没有匹配到图片pid, 操作已取消')
await recommend_image.finish('没有匹配到相关图片QAQ, 请确认搜索的图片是在 Pixiv 上的作品')
recommend_result = await PixivIllust(pid=pid).get_recommend(init_limit=36)
if recommend_result.error:
logger.warning(f'Recommend image | 获取相似作品信息失败, pid: {pid}, error: {recommend_result.info}')
await recommend_image.finish('获取相关作品信息失败QAQ, 原作品可能已经被删除')
# 获取推荐作品的信息
await recommend_image.send('稍等, 正在获取相似作品~')
pid_list = [x.get('id') for x in recommend_result.result.get('illusts') if x.get('illustType') == 0]
tasks = [PixivIllust(pid=x).get_illust_data() for x in pid_list]
recommend_illust_data_result = await asyncio.gather(*tasks)
# 执行 r18 权限检查
if isinstance(event, PrivateMessageEvent):
user_id = event.user_id
auth_checker = await PermissionChecker(self_bot=DBBot(self_qq=int(bot.self_id))). \
check_auth_node(auth_id=user_id, auth_type='user', auth_node='search_image.allow_recommend_r18')
elif isinstance(event, GroupMessageEvent):
group_id = event.group_id
auth_checker = await PermissionChecker(self_bot=DBBot(self_qq=int(bot.self_id))). \
check_auth_node(auth_id=group_id, auth_type='group', auth_node='search_image.allow_recommend_r18')
else:
auth_checker = 0
# 筛选推荐作品 筛选条件 收藏不少于2k 点赞数不少于收藏一半 点赞率大于百分之五
if auth_checker == 1:
filtered_illust_data_result = [x for x in recommend_illust_data_result if (
x.success() and
2000 <= x.result.get('bookmark_count') <= 2 * x.result.get('like_count') and
x.result.get('view_count') <= 20 * x.result.get('like_count')
)]
else:
filtered_illust_data_result = [x for x in recommend_illust_data_result if (
x.success() and
not x.result.get('is_r18') and
2000 <= x.result.get('bookmark_count') <= 2 * x.result.get('like_count') and
x.result.get('view_count') <= 20 * x.result.get('like_count')
)]
# 从筛选结果里面随机挑三个
if len(filtered_illust_data_result) > 3:
illust_list = [PixivIllust(pid=x.result.get('pid')) for x in random.sample(filtered_illust_data_result, k=3)]
else:
illust_list = [PixivIllust(pid=x.result.get('pid')) for x in filtered_illust_data_result]
if not illust_list:
logger.info(f'Recommend image | 筛选结果为0, 没有找到符合要求的相似作品')
await recommend_image.finish('没有找到符合要求的相似作品QAQ')
# 直接下载图片
tasks = [x.get_sending_msg() for x in illust_list]
illust_download_result = await asyncio.gather(*tasks)
for img, info in [x.result for x in illust_download_result if x.success()]:
img_seg = MessageSegment.image(file=img)
try:
await recommend_image.send(Message(img_seg).append(info))
except Exception as e:
logger.warning(f'Recommend image | 发送图片失败, error: {repr(e)}')
continue
logger.info(f'Recommend image | User: {event.user_id} 已获取相似图片')
```
#### File: plugins/tencent_cloud/__init__.py
```python
import re
from nonebot import MatcherGroup, logger, export
from nonebot.typing import T_State
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import GroupMessageEvent
from nonebot.adapters.cqhttp.permission import GROUP
from omega_miya.utils.Omega_plugin_utils import init_export, init_permission_state, OmegaRules
from omega_miya.utils.tencent_cloud_api import TencentNLP, TencentTMT
# Custom plugin usage text
__plugin_name__ = 'TencentCloudCore'
__plugin_usage__ = r'''【TencentCloud API Support】
腾讯云API插件
测试中
**Permission**
Command & Lv.50
or AuthNode
**AuthNode**
basic
**Usage**
/翻译'''
# 声明本插件可配置的权限节点
__plugin_auth_node__ = [
'tmt',
'nlp'
]
# Init plugin export
init_export(export(), __plugin_name__, __plugin_usage__, __plugin_auth_node__)
tencent_cloud = MatcherGroup(
type='message',
permission=GROUP,
priority=100,
block=False)
translate = tencent_cloud.on_command(
'翻译',
aliases={'translate'},
# 使用run_preprocessor拦截权限管理, 在default_state初始化所需权限
state=init_permission_state(
name='translate',
command=True,
level=30,
auth_node='tmt'),
priority=30,
block=True)
# 修改默认参数处理
@translate.args_parser
async def parse(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip()
if not args:
await translate.reject('你似乎没有发送有效的参数呢QAQ, 请重新发送:')
state[state["_current_key"]] = args
if state[state["_current_key"]] == '取消':
await translate.finish('操作已取消')
@translate.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip()
if not args:
pass
else:
state['content'] = args
@translate.got('content', prompt='请发送需要翻译的内容:')
async def handle_roll(bot: Bot, event: GroupMessageEvent, state: T_State):
content = state['content']
translate_result = await TencentTMT().translate(source_text=content)
if translate_result.error:
await translate.finish('翻译失败了QAQ, 发生了意外的错误')
else:
await translate.finish(f"翻译结果:\n\n{translate_result.result.get('targettext')}")
nlp = tencent_cloud.on_message(
rule=OmegaRules.has_group_command_permission() & OmegaRules.has_level_or_node(30, 'tencent_cloud.nlp'))
@nlp.handle()
async def handle_nlp(bot: Bot, event: GroupMessageEvent, state: T_State):
arg = str(event.get_plaintext()).strip().lower()
# 排除列表
ignore_pattern = [
re.compile(r'喵一个'),
re.compile(r'^今天'),
re.compile(r'[这那谁你我他她它]个?是[(什么)谁啥]')
]
for pattern in ignore_pattern:
if re.search(pattern, arg):
await nlp.finish()
# describe_entity实体查询
if re.match(r'^(你?知道)?(.{1,32}?)的(.{1,32}?)是(什么|谁|啥)吗?[??]?$', arg):
item, attr = re.findall(r'^(你?知道)?(.{1,32}?)的(.{1,32}?)是(什么|谁|啥)吗?[??]?$', arg)[0][1:3]
res = await TencentNLP().describe_entity(entity_name=item, attr=attr)
if not res.error and res.result:
await nlp.finish(f'{item}的{attr}是{res.result}')
else:
logger.warning(f'nlp handling describe entity failed: {res.info}')
elif re.match(r'^(你?知道)?(.{1,32}?)是(什么|谁|啥)吗?[??]?$', arg):
item = re.findall(r'^(你?知道)?(.{1,32}?)是(什么|谁|啥)吗?[??]?$', arg)[0][1]
res = await TencentNLP().describe_entity(entity_name=item)
if not res.error and res.result:
await nlp.finish(str(res.result))
else:
logger.warning(f'nlp handling describe entity failed: {res.info}')
```
#### File: utils/dice_utils/exception.py
```python
class DiceBaseException(Exception):
pass
class CalculateException(DiceBaseException):
"""
计算模块异常
"""
def __init__(self, reason, expression):
self.reason = reason
self.expression = expression
def __repr__(self):
return f'<CalculateException, reason={self.reason}, expression={self.expression}>'
def __str__(self):
return self.__repr__()
```
#### File: Omega_Base/model/friend.py
```python
from typing import Optional
from datetime import datetime
from omega_miya.utils.Omega_Base.database import NBdb
from omega_miya.utils.Omega_Base.class_result import Result
from omega_miya.utils.Omega_Base.tables import Friends, User, Subscription, UserSub
from .user import DBUser
from .bot_self import DBBot
from .subscription import DBSubscription
from sqlalchemy.future import select
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
class DBFriend(DBUser):
def __init__(self, user_id: int, self_bot: DBBot):
super().__init__(user_id)
self.self_bot = self_bot
@classmethod
async def list_exist_friends(cls, self_bot: DBBot) -> Result.ListResult:
self_bot_id_result = await self_bot.id()
if self_bot_id_result.error:
return Result.ListResult(error=True, info='Bot not exist', result=[])
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(User.qq).
join(Friends).
where(User.id == Friends.user_id).
where(Friends.bot_self_id == self_bot_id_result.result)
)
exist_friends = [x for x in session_result.scalars().all()]
result = Result.ListResult(error=False, info='Success', result=exist_friends)
except NoResultFound:
result = Result.ListResult(error=True, info='NoResultFound', result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info='MultipleResultsFound', result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
@classmethod
async def list_exist_friends_by_private_permission(
cls, private_permission: int, self_bot: DBBot) -> Result.ListResult:
self_bot_id_result = await self_bot.id()
if self_bot_id_result.error:
return Result.ListResult(error=True, info='Bot not exist', result=[])
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(User.qq).
join(Friends).
where(User.id == Friends.user_id).
where(Friends.bot_self_id == self_bot_id_result.result).
where(Friends.private_permissions == private_permission)
)
exist_friends = [x for x in session_result.scalars().all()]
result = Result.ListResult(error=False, info='Success', result=exist_friends)
except NoResultFound:
result = Result.ListResult(error=True, info='NoResultFound', result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info='MultipleResultsFound', result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
async def friend_id(self) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
self_bot_id_result = await self.self_bot.id()
if self_bot_id_result.error:
return Result.IntResult(error=True, info='Bot not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(Friends.id).
where(Friends.bot_self_id == self_bot_id_result.result).
where(Friends.user_id == user_id_result.result)
)
friend_table_id = session_result.scalar_one()
result = Result.IntResult(error=False, info='Success', result=friend_table_id)
except NoResultFound:
result = Result.IntResult(error=True, info='NoResultFound', result=-1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def exist(self) -> bool:
result = await self.friend_id()
return result.success()
async def set_friend(
self, nickname: str, remark: Optional[str] = None, private_permissions: Optional[int] = None
) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
self_bot_id_result = await self.self_bot.id()
if self_bot_id_result.error:
return Result.IntResult(error=True, info='Bot not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(Friends).
where(Friends.user_id == user_id_result.result).
where(Friends.bot_self_id == self_bot_id_result.result)
)
exist_friend = session_result.scalar_one()
exist_friend.nickname = nickname
exist_friend.remark = remark
if private_permissions:
exist_friend.private_permissions = private_permissions
exist_friend.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
if private_permissions:
new_friend = Friends(user_id=user_id_result.result, bot_self_id=self_bot_id_result.result,
nickname=nickname, remark=remark,
private_permissions=private_permissions, created_at=datetime.now())
else:
new_friend = Friends(user_id=user_id_result.result, bot_self_id=self_bot_id_result.result,
nickname=nickname, remark=remark,
private_permissions=0, created_at=datetime.now())
session.add(new_friend)
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def del_friend(self) -> Result.IntResult:
friend_id_result = await self.friend_id()
if friend_id_result.error:
return Result.IntResult(error=True, info='Friend not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
# 删除好友表中该好友信息
session_result = await session.execute(
select(Friends).
where(Friends.id == friend_id_result.result)
)
exist_friend = session_result.scalar_one()
await session.delete(exist_friend)
await session.commit()
result = Result.IntResult(error=False, info='Success', result=0)
except NoResultFound:
await session.rollback()
result = Result.IntResult(error=True, info='NoResultFound', result=-1)
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def set_private_permission(self, private_permissions: int) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
self_bot_id_result = await self.self_bot.id()
if self_bot_id_result.error:
return Result.IntResult(error=True, info='Bot not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(Friends).
where(Friends.user_id == user_id_result.result).
where(Friends.bot_self_id == self_bot_id_result.result)
)
exist_friend = session_result.scalar_one()
exist_friend.private_permissions = private_permissions
exist_friend.updated_at = datetime.now()
await session.commit()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
await session.rollback()
result = Result.IntResult(error=True, info='NoResultFound', result=-1)
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def get_private_permission(self) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
self_bot_id_result = await self.self_bot.id()
if self_bot_id_result.error:
return Result.IntResult(error=True, info='Bot not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(Friends.private_permissions).
where(Friends.user_id == user_id_result.result).
where(Friends.bot_self_id == self_bot_id_result.result)
)
private_permissions = session_result.scalar_one()
result = Result.IntResult(error=False, info='Success', result=private_permissions)
except NoResultFound:
result = Result.IntResult(error=True, info='NoResultFound', result=-2)
except MultipleResultsFound:
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def subscription_list(self) -> Result.TupleListResult:
"""
:return: Result: List[Tuple[sub_type, sub_id, up_name]]
"""
friend_id_result = await self.friend_id()
if friend_id_result.error:
return Result.TupleListResult(error=True, info='Friend not exist', result=[])
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(Subscription.sub_type, Subscription.sub_id, Subscription.up_name).
join(UserSub).
where(Subscription.id == UserSub.sub_id).
where(UserSub.user_id == friend_id_result.result)
)
res = [(x[0], x[1], x[2]) for x in session_result.all()]
result = Result.TupleListResult(error=False, info='Success', result=res)
except Exception as e:
result = Result.TupleListResult(error=True, info=repr(e), result=[])
return result
async def subscription_list_by_type(self, sub_type: int) -> Result.TupleListResult:
"""
:param sub_type: 订阅类型
:return: Result: List[Tuple[sub_id, up_name]]
"""
friend_id_result = await self.friend_id()
if friend_id_result.error:
return Result.TupleListResult(error=True, info='Friend not exist', result=[])
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(Subscription.sub_id, Subscription.up_name).
join(UserSub).
where(Subscription.sub_type == sub_type).
where(Subscription.id == UserSub.sub_id).
where(UserSub.user_id == friend_id_result.result)
)
res = [(x[0], x[1]) for x in session_result.all()]
result = Result.TupleListResult(error=False, info='Success', result=res)
except Exception as e:
result = Result.TupleListResult(error=True, info=repr(e), result=[])
return result
async def subscription_add(self, sub: DBSubscription, user_sub_info: str = None) -> Result.IntResult:
friend_id_result = await self.friend_id()
if friend_id_result.error:
return Result.IntResult(error=True, info='Friend not exist', result=-1)
sub_id_result = await sub.id()
if sub_id_result.error:
return Result.IntResult(error=True, info='Subscription not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(UserSub).
where(UserSub.user_id == friend_id_result.result).
where(UserSub.sub_id == sub_id_result.result)
)
# 订阅关系已存在, 更新信息
exist_subscription = session_result.scalar_one()
exist_subscription.user_sub_info = user_sub_info
exist_subscription.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
subscription = UserSub(sub_id=sub_id_result.result, user_id=friend_id_result.result,
user_sub_info=user_sub_info, created_at=datetime.now())
session.add(subscription)
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def subscription_del(self, sub: DBSubscription) -> Result.IntResult:
friend_id_result = await self.friend_id()
if friend_id_result.error:
return Result.IntResult(error=True, info='Friend not exist', result=-1)
sub_id_result = await sub.id()
if sub_id_result.error:
return Result.IntResult(error=True, info='Subscription not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(UserSub).
where(UserSub.user_id == friend_id_result.result).
where(UserSub.sub_id == sub_id_result.result)
)
exist_subscription = session_result.scalar_one()
await session.delete(exist_subscription)
await session.commit()
result = Result.IntResult(error=False, info='Success', result=0)
except NoResultFound:
await session.rollback()
result = Result.IntResult(error=True, info='NoResultFound', result=-1)
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def subscription_clear(self) -> Result.IntResult:
friend_id_result = await self.friend_id()
if friend_id_result.error:
return Result.IntResult(error=True, info='Friend not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(UserSub).where(UserSub.user_id == friend_id_result.result)
)
for exist_user_sub in session_result.scalars().all():
await session.delete(exist_user_sub)
await session.commit()
result = Result.IntResult(error=False, info='Success', result=0)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def subscription_clear_by_type(self, sub_type: int) -> Result.IntResult:
friend_id_result = await self.friend_id()
if friend_id_result.error:
return Result.IntResult(error=True, info='Friend not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(UserSub).join(Subscription).
where(UserSub.sub_id == Subscription.id).
where(Subscription.sub_type == sub_type).
where(UserSub.user_id == friend_id_result.result)
)
for exist_user_sub in session_result.scalars().all():
await session.delete(exist_user_sub)
await session.commit()
result = Result.IntResult(error=False, info='Success', result=0)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
```
#### File: Omega_Base/model/mail.py
```python
from omega_miya.utils.Omega_Base.database import NBdb
from omega_miya.utils.Omega_Base.class_result import Result
from omega_miya.utils.Omega_Base.tables import Email, EmailBox, GroupEmailBox
from datetime import datetime
from sqlalchemy.future import select
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
class DBEmailBox(object):
def __init__(self, address: str):
self.address = address
async def id(self) -> Result.IntResult:
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(EmailBox.id).
where(EmailBox.address == self.address)
)
email_box_table_id = session_result.scalar_one()
result = Result.IntResult(error=False, info='Success', result=email_box_table_id)
except NoResultFound:
result = Result.IntResult(error=True, info='NoResultFound', result=-1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def exist(self) -> bool:
result = await self.id()
return result.success()
@classmethod
async def list(cls) -> Result.ListResult:
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(EmailBox.address).order_by(EmailBox.id)
)
res = [x for x in session_result.scalars().all()]
result = Result.ListResult(error=False, info='Success', result=res)
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
async def get_info(self) -> Result.DictResult:
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(EmailBox).
where(EmailBox.address == self.address)
)
exist_box = session_result.scalar_one()
server_host = exist_box.server_host
port = exist_box.port
password = <PASSWORD>
res_dict = {'server_host': server_host, 'port': port, 'password': password}
result = Result.DictResult(error=False, info='Success', result=res_dict)
except NoResultFound:
result = Result.DictResult(error=True, info='NoResultFound', result={})
except MultipleResultsFound:
result = Result.DictResult(error=True, info='MultipleResultsFound', result={})
except Exception as e:
result = Result.DictResult(error=True, info=repr(e), result={})
return result
async def add(self, server_host: str, password: str, port: int = 993) -> Result.IntResult:
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
# 已存在则更新
session_result = await session.execute(
select(EmailBox).
where(EmailBox.address == self.address)
)
exist_box = session_result.scalar_one()
exist_box.server_host = server_host
exist_box.port = port
exist_box.password = password
exist_box.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
new_box = EmailBox(address=self.address, server_host=server_host, password=password,
port=port, created_at=datetime.now())
session.add(new_box)
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def delete(self) -> Result.IntResult:
id_result = await self.id()
if id_result.error:
return Result.IntResult(error=True, info='EmailBox not exist', result=-1)
# 清空持已绑定这个邮箱的群组
await self.mailbox_group_clear()
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(EmailBox).
where(EmailBox.address == self.address)
)
exist_box = session_result.scalar_one()
await session.delete(exist_box)
await session.commit()
result = Result.IntResult(error=False, info='Success', result=0)
except NoResultFound:
await session.rollback()
result = Result.IntResult(error=True, info='NoResultFound', result=-1)
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def mailbox_group_clear(self) -> Result.IntResult:
id_result = await self.id()
if id_result.error:
return Result.IntResult(error=True, info='EmailBox not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(GroupEmailBox).where(GroupEmailBox.email_box_id == id_result.result)
)
for exist_group_mailbox in session_result.scalars().all():
await session.delete(exist_group_mailbox)
await session.commit()
result = Result.IntResult(error=False, info='Success', result=0)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
class DBEmail(object):
def __init__(self, mail_hash: str):
self.mail_hash = mail_hash
async def add(
self, date: str, header: str, sender: str, to: str, body: str = None, html: str = None
) -> Result.IntResult:
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
new_email = Email(mail_hash=self.mail_hash, date=date, header=header, sender=sender, to=to,
body=body, html=html, created_at=datetime.now())
session.add(new_email)
await session.commit()
result = Result.IntResult(error=False, info='Success added', result=0)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
```
#### File: Omega_Base/model/user.py
```python
from typing import List, Optional
from datetime import date, datetime
from dataclasses import dataclass
from omega_miya.utils.Omega_Base.database import NBdb
from omega_miya.utils.Omega_Base.class_result import Result
from omega_miya.utils.Omega_Base.tables import User, UserFavorability, UserSignIn, Skill, UserSkill, Vacation
from .skill import DBSkill
from sqlalchemy.future import select
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
class DBUser(object):
def __init__(self, user_id: int):
self.qq = user_id
@dataclass
class DateListResult(Result.AnyResult):
result: List[date]
def __repr__(self):
return f'<DateListResult(error={self.error}, info={self.info}, result={self.result})>'
async def id(self) -> Result.IntResult:
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(User.id).where(User.qq == self.qq)
)
user_table_id = session_result.scalar_one()
result = Result.IntResult(error=False, info='Success', result=user_table_id)
except NoResultFound:
result = Result.IntResult(error=True, info='NoResultFound', result=-1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def exist(self) -> bool:
result = await self.id()
return result.success()
async def nickname(self) -> Result.TextResult:
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(User.nickname).where(User.qq == self.qq)
)
user_nickname = session_result.scalar_one()
result = Result.TextResult(error=False, info='Success', result=user_nickname)
except NoResultFound:
result = Result.TextResult(error=True, info='NoResultFound', result='')
except MultipleResultsFound:
result = Result.TextResult(error=True, info='MultipleResultsFound', result='')
except Exception as e:
result = Result.TextResult(error=True, info=repr(e), result='')
return result
async def add(self, nickname: str, aliasname: str = None) -> Result.IntResult:
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
# 用户已存在则更新成员表昵称
session_result = await session.execute(
select(User).where(User.qq == self.qq)
)
exist_user = session_result.scalar_one()
if exist_user.nickname == nickname:
result = Result.IntResult(error=False, info='Nickname not change', result=0)
else:
exist_user.nickname = nickname
exist_user.aliasname = aliasname
exist_user.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
# 不存在则成员表中添加新成员
new_user = User(qq=self.qq, nickname=nickname, aliasname=aliasname, created_at=datetime.now())
session.add(new_user)
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def delete(self) -> Result.IntResult:
id_result = await self.id()
if id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
# 删除用户表中用户
session_result = await session.execute(
select(User).where(User.qq == self.qq)
)
exist_user = session_result.scalar_one()
await session.delete(exist_user)
await session.commit()
result = Result.IntResult(error=False, info='Success Delete', result=0)
except NoResultFound:
await session.rollback()
result = Result.IntResult(error=True, info='NoResultFound', result=-1)
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def skill_list(self) -> Result.ListResult:
id_result = await self.id()
if id_result.error:
return Result.ListResult(error=True, info='User not exist', result=[])
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(Skill.name, UserSkill.skill_level).
join(UserSkill).
where(Skill.id == UserSkill.skill_id).
where(UserSkill.user_id == id_result.result)
)
res = [(x[0], x[1]) for x in session_result.all()]
result = Result.ListResult(error=False, info='Success', result=res)
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
async def skill_add(self, skill: DBSkill, skill_level: int) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
skill_id_result = await skill.id()
if skill_id_result.error:
return Result.IntResult(error=True, info='Skill not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
# 查询用户已有技能
try:
# 已有技能, 更新等级
session_result = await session.execute(
select(UserSkill).
where(UserSkill.skill_id == skill_id_result.result).
where(UserSkill.user_id == user_id_result.result)
)
exist_skill = session_result.scalar_one()
exist_skill.skill_level = skill_level
exist_skill.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
new_skill = UserSkill(user_id=user_id_result.result, skill_id=skill_id_result.result,
skill_level=skill_level, created_at=datetime.now())
session.add(new_skill)
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def skill_del(self, skill: DBSkill) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
skill_id_result = await skill.id()
if skill_id_result.error:
return Result.IntResult(error=True, info='Skill not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(UserSkill).
where(UserSkill.skill_id == skill_id_result.result).
where(UserSkill.user_id == user_id_result.result)
)
exist_skill = session_result.scalar_one()
await session.delete(exist_skill)
await session.commit()
result = Result.IntResult(error=False, info='Success', result=0)
except NoResultFound:
await session.rollback()
result = Result.IntResult(error=True, info='NoResultFound', result=-1)
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def skill_clear(self) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(UserSkill).where(UserSkill.user_id == user_id_result.result)
)
for exist_skill in session_result.scalars().all():
await session.delete(exist_skill)
await session.commit()
result = Result.IntResult(error=False, info='Success', result=0)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def status(self) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(Vacation.status).where(Vacation.user_id == user_id_result.result)
)
res = session_result.scalar_one()
result = Result.IntResult(error=False, info='Success', result=res)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def vacation_status(self) -> Result.ListResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.ListResult(error=True, info='User not exist', result=[-1, None])
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(Vacation.status, Vacation.stop_at).
where(Vacation.user_id == user_id_result.result)
)
res = session_result.one()
result = Result.ListResult(error=False, info='Success', result=[res[0], res[1]])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[-1, None])
return result
async def status_set(self, status: int) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(Vacation).where(Vacation.user_id == user_id_result.result)
)
exist_status = session_result.scalar_one()
exist_status.status = status
exist_status.stop_at = None
exist_status.reason = None
exist_status.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
new_status = Vacation(user_id=user_id_result.result, status=status, created_at=datetime.now())
session.add(new_status)
result = Result.IntResult(error=False, info='Success set', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def vacation_set(self, stop_time: datetime, reason: str = None) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(Vacation).where(Vacation.user_id == user_id_result.result)
)
exist_status = session_result.scalar_one()
exist_status.status = 1
exist_status.stop_at = stop_time
exist_status.reason = reason
exist_status.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
new_status = Vacation(user_id=user_id_result.result, status=1,
stop_at=stop_time, reason=reason, created_at=datetime.now())
session.add(new_status)
result = Result.IntResult(error=False, info='Success set', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def status_del(self) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(Vacation).where(Vacation.user_id == user_id_result.result)
)
exist_status = session_result.scalar_one()
await session.delete(exist_status)
await session.commit()
result = Result.IntResult(error=False, info='Success', result=0)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def sign_in(self, *, sign_in_info: Optional[str] = 'Normal sign in') -> Result.IntResult:
"""
签到
:param sign_in_info: 签到信息
:return: IntResult
1: 已签到
0: 签到成功
-1: 错误
"""
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
datetime_now = datetime.now()
date_now = datetime_now.date()
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(UserSignIn).
where(UserSignIn.user_id == user_id_result.result).
where(UserSignIn.sign_in_date == date_now)
)
# 已有签到记录
exist_sign_in = session_result.scalar_one()
exist_sign_in.sign_in_info = 'Duplicate sign in'
exist_sign_in.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=1)
except NoResultFound:
sign_in = UserSignIn(user_id=user_id_result.result, sign_in_date=date_now,
sign_in_info=sign_in_info, created_at=datetime.now())
session.add(sign_in)
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def sign_in_statistics(self) -> DateListResult:
"""
查询所有签到记录
:return: Result: List[sign_in_date]
"""
user_id_result = await self.id()
if user_id_result.error:
return self.DateListResult(error=True, info='User not exist', result=[])
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(UserSignIn.sign_in_date).
where(UserSignIn.user_id == user_id_result.result)
)
res = [x for x in session_result.scalars().all()]
result = self.DateListResult(error=False, info='Success', result=res)
except Exception as e:
result = self.DateListResult(error=True, info=repr(e), result=[])
return result
async def sign_in_continuous_days(self) -> Result.IntResult:
"""
查询到目前为止最长连续签到日数
"""
sign_in_statistics_result = await self.sign_in_statistics()
if sign_in_statistics_result.error:
return Result.IntResult(error=True, info=sign_in_statistics_result.info, result=-1)
# 还没有签到过
if not sign_in_statistics_result.result:
return Result.IntResult(error=False, info='Success with sign in not found', result=0)
datetime_now = datetime.now()
date_now = datetime_now.date()
date_now_toordinal = date_now.toordinal()
# 先将签到记录中的日期转化为整数便于比较
all_sign_in_list = list(set([x.toordinal() for x in sign_in_statistics_result.result]))
# 去重后由大到小排序
all_sign_in_list.sort(reverse=True)
# 如果今日日期不等于已签到日期最大值, 说明今日没有签到, 则连签日数为0
if date_now_toordinal != all_sign_in_list[0]:
return Result.IntResult(error=False, info='Success with not sign in today', result=0)
# 从大到小检查(即日期从后向前检查), 如果当日序号大小大于与今日日期之差, 说明在这里断签了
for index, value in enumerate(all_sign_in_list):
if index != date_now_toordinal - value:
return Result.IntResult(error=False, info='Success with found interrupt', result=index)
else:
# 如果全部遍历完了那就说明全部没有断签
return Result.IntResult(error=False, info='Success with all continuous', result=len(all_sign_in_list))
async def favorability_status(self) -> Result.TupleResult:
"""
查询好感度记录
:return: Result:
Tuple[status: str, mood: float, favorability: float, energy: float, currency: float, response_threshold: float]
"""
user_id_result = await self.id()
if user_id_result.error:
return Result.TupleResult(error=True, info='User not exist', result=())
async_session = NBdb().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(UserFavorability.status,
UserFavorability.mood,
UserFavorability.favorability,
UserFavorability.energy,
UserFavorability.currency,
UserFavorability.response_threshold).
where(UserFavorability.user_id == user_id_result.result)
)
res = session_result.one()
result = Result.TupleResult(error=False, info='Success', result=res)
except NoResultFound:
result = Result.TupleResult(error=True, info='NoResultFound', result=())
except MultipleResultsFound:
result = Result.TupleResult(error=True, info='MultipleResultsFound', result=())
except Exception as e:
result = Result.TupleResult(error=True, info=repr(e), result=())
return result
async def favorability_reset(
self,
*,
status: str = 'normal',
mood: float = 0,
favorability: float = 0,
energy: float = 0,
currency: float = 0,
response_threshold: float = 0
) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(UserFavorability).
where(UserFavorability.user_id == user_id_result.result)
)
# 已有好感度记录条目
exist_favorability = session_result.scalar_one()
exist_favorability.status = status
exist_favorability.mood = mood
exist_favorability.favorability = favorability
exist_favorability.energy = energy
exist_favorability.currency = currency
exist_favorability.response_threshold = response_threshold
exist_favorability.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
favorability = UserFavorability(
user_id=user_id_result.result, status=status, mood=mood, favorability=favorability,
energy=energy, currency=currency, response_threshold=response_threshold,
created_at=datetime.now())
session.add(favorability)
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def favorability_add(
self,
*,
status: Optional[str] = None,
mood: Optional[float] = None,
favorability: Optional[float] = None,
energy: Optional[float] = None,
currency: Optional[float] = None,
response_threshold: Optional[float] = None
) -> Result.IntResult:
user_id_result = await self.id()
if user_id_result.error:
return Result.IntResult(error=True, info='User not exist', result=-1)
async_session = NBdb().get_async_session()
async with async_session() as session:
try:
async with session.begin():
session_result = await session.execute(
select(UserFavorability).
where(UserFavorability.user_id == user_id_result.result)
)
# 已有好感度记录条目
exist_favorability = session_result.scalar_one()
if status:
exist_favorability.status = status
if mood:
exist_favorability.mood += mood
if favorability:
exist_favorability.favorability += favorability
if energy:
exist_favorability.energy += energy
if currency:
exist_favorability.currency += currency
if response_threshold:
exist_favorability.response_threshold += response_threshold
exist_favorability.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
await session.commit()
except NoResultFound:
await session.rollback()
result = Result.IntResult(error=True, info='NoResultFound', result=-1)
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
```
#### File: utils/Omega_plugin_utils/__init__.py
```python
from typing import Optional
from nonebot.plugin import Export
from nonebot.typing import T_State
from .rules import OmegaRules
from .encrypt import AESEncryptStr
from .cooldown import PluginCoolDown
from .permission import PermissionChecker
from .http_fetcher import HttpFetcher
from .message_sender import MsgSender
from .picture_encoder import PicEncoder
from .picture_effector import PicEffector
from .process_utils import ProcessUtils
from .zip_utils import create_zip_file, create_7z_file
def init_export(
plugin_export: Export,
custom_name: str,
usage: str,
auth_node: list = None,
cool_down: list = None,
**kwargs: str) -> Export:
setattr(plugin_export, 'custom_name', custom_name)
setattr(plugin_export, 'usage', usage)
setattr(plugin_export, 'auth_node', auth_node)
setattr(plugin_export, 'cool_down', cool_down)
for key, value in kwargs.items():
setattr(plugin_export, key, value)
return plugin_export
def init_permission_state(
name: str,
notice: Optional[bool] = None,
command: Optional[bool] = None,
level: Optional[int] = None,
auth_node: Optional[str] = None) -> T_State:
return {
'_matcher': name,
'_notice_permission': notice,
'_command_permission': command,
'_permission_level': level,
'_auth_node': auth_node
}
__all__ = [
'init_export',
'init_permission_state',
'OmegaRules',
'AESEncryptStr',
'PluginCoolDown',
'PermissionChecker',
'HttpFetcher',
'MsgSender',
'PicEncoder',
'PicEffector',
'ProcessUtils',
'create_zip_file',
'create_7z_file'
]
```
#### File: utils/Omega_plugin_utils/message_sender.py
```python
from nonebot import logger
from typing import Optional, List, Union
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.message import Message, MessageSegment
from omega_miya.utils.Omega_Base import DBBot, DBBotGroup, DBFriend, DBSubscription
class MsgSender(object):
def __init__(self, bot: Bot, log_flag: Optional[str] = 'DefaultSender'):
self.bot = bot
self.self_bot = DBBot(self_qq=int(bot.self_id))
self.log_flag = f'MsgSender/{log_flag}/Bot[{bot.self_id}]'
async def safe_broadcast_groups_subscription(
self, subscription: DBSubscription, message: Union[str, Message, MessageSegment]):
"""
向所有具有某个订阅且启用了通知权限 notice permission 的群组发送消息
"""
# 获取所有需要通知的群组
notice_group_res = await subscription.sub_group_list_by_notice_permission(self_bot=self.self_bot,
notice_permission=1)
if notice_group_res.error:
logger.opt(colors=True).error(
f'<Y><lw>{self.log_flag}</lw></Y> | Can not send subscription '
f'{subscription.sub_type}/{subscription.sub_id} broadcast message, '
f'getting sub group list with notice permission failed, error: {notice_group_res.info}')
return
for group_id in notice_group_res.result:
try:
await self.bot.send_group_msg(group_id=group_id, message=message)
except Exception as e:
logger.opt(colors=True).warning(
f'<Y><lw>{self.log_flag}</lw></Y> | Sending subscription '
f'{subscription.sub_type}/{subscription.sub_id} broadcast message '
f'to group: {group_id} failed, error: {repr(e)}')
continue
async def safe_broadcast_groups_subscription_node_custom(
self, subscription: DBSubscription, message_list: List[Union[str, Message, MessageSegment]],
*,
custom_nickname: str = '<PASSWORD>'
):
"""
向所有具有某个订阅且启用了通知权限 notice permission 的群组发送自定义转发消息节点
仅支持 cq-http
"""
# 获取所有需要通知的群组
notice_group_res = await subscription.sub_group_list_by_notice_permission(self_bot=self.self_bot,
notice_permission=1)
if notice_group_res.error:
logger.opt(colors=True).error(
f'<Y><lw>{self.log_flag}</lw></Y> | Can not send subscription '
f'{subscription.sub_type}/{subscription.sub_id} broadcast node_custom message, '
f'getting sub group list with notice permission failed, error: {notice_group_res.info}')
return
# 构造自定义消息节点
custom_user_id = self.bot.self_id
node_message = []
for msg in message_list:
if not msg:
logger.opt(colors=True).warning(
f'<Y><lw>{self.log_flag}</lw></Y> | A None-type message in message_list.')
continue
node_message.append({
"type": "node",
"data": {
"name": custom_nickname,
"uin": custom_user_id,
"content": msg
}
})
for group_id in notice_group_res.result:
try:
await self.bot.send_group_forward_msg(group_id=group_id, messages=node_message)
except Exception as e:
logger.opt(colors=True).warning(
f'<Y><lw>{self.log_flag}</lw></Y> | Sending subscription '
f'{subscription.sub_type}/{subscription.sub_id} broadcast node_custom message '
f'to group: {group_id} failed, error: {repr(e)}')
continue
async def safe_send_group_node_custom(
self, group_id: int, message_list: List[Union[str, Message, MessageSegment]],
*,
custom_nickname: str = '<PASSWORD>'
):
"""
向某个群组发送自定义转发消息节点
仅支持 cq-http
"""
# 构造自定义消息节点
custom_user_id = self.bot.self_id
node_message = []
for msg in message_list:
if not msg:
logger.opt(colors=True).warning(
f'<Y><lw>{self.log_flag}</lw></Y> | A None-type message in message_list.')
continue
node_message.append({
"type": "node",
"data": {
"name": custom_nickname,
"user_id": custom_user_id,
"uin": custom_user_id,
"content": msg
}
})
try:
await self.bot.send_group_forward_msg(group_id=group_id, messages=node_message)
except Exception as e:
logger.opt(colors=True).warning(
f'<Y><lw>{self.log_flag}</lw></Y> | Sending node_custom message '
f'to group: {group_id} failed, error: {repr(e)}')
async def safe_broadcast_friends_subscription(
self, subscription: DBSubscription, message: Union[str, Message, MessageSegment]):
"""
向所有具有某个订阅且启用了通知权限 notice permission 的好友发送消息
"""
# 获取所有需要通知的好友
notice_friends_res = await subscription.sub_user_list_by_private_permission(self_bot=self.self_bot,
private_permission=1)
if notice_friends_res.error:
logger.opt(colors=True).error(
f'<Y><lw>{self.log_flag}</lw></Y> | Can not send subscription '
f'{subscription.sub_type}/{subscription.sub_id} broadcast message, '
f'getting sub friends list with private permission failed, error: {notice_friends_res.info}')
return
for user_id in notice_friends_res.result:
try:
await self.bot.send_private_msg(user_id=user_id, message=message)
except Exception as e:
logger.opt(colors=True).warning(
f'<Y><lw>{self.log_flag}</lw></Y> | Sending subscription '
f'{subscription.sub_type}/{subscription.sub_id} broadcast message '
f'to user: {user_id} failed, error: {repr(e)}')
continue
async def safe_send_msg_enabled_friends(self, message: Union[str, Message, MessageSegment]):
"""
向所有具有好友权限 private permission (已启用bot命令) 的好友发送消息
"""
# 获取所有启用 private permission 好友
enabled_friends_res = await DBFriend.list_exist_friends_by_private_permission(self_bot=self.self_bot,
private_permission=1)
if enabled_friends_res.error:
logger.opt(colors=True).error(
f'<Y><lw>{self.log_flag}</lw></Y> | Can not send message to friends, '
f'getting enabled friends list with private permission failed, error: {enabled_friends_res.info}')
return
for user_id in enabled_friends_res.result:
try:
await self.bot.send_private_msg(user_id=user_id, message=message)
except Exception as e:
logger.opt(colors=True).warning(
f'<Y><lw>{self.log_flag}</lw></Y> | Sending message to friend: {user_id} failed, error: {repr(e)}')
continue
async def safe_send_msg_all_friends(self, message: Union[str, Message, MessageSegment]):
"""
向所有好友发送消息
"""
# 获取数据库中所有好友
all_friends_res = await DBFriend.list_exist_friends(self_bot=self.self_bot)
if all_friends_res.error:
logger.opt(colors=True).error(
f'<Y><lw>{self.log_flag}</lw></Y> | Can not send message to friends, '
f'getting all friends list with private permission failed, error: {all_friends_res.info}')
return
for user_id in all_friends_res.result:
try:
await self.bot.send_private_msg(user_id=user_id, message=message)
except Exception as e:
logger.opt(colors=True).warning(
f'<Y><lw>{self.log_flag}</lw></Y> | Sending message to friend: {user_id} failed, error: {repr(e)}')
continue
async def safe_send_msg_enabled_command_groups(self, message: Union[str, Message, MessageSegment]):
"""
向所有具有命令权限 command permission 的群组发送消息
"""
# 获取所有需要通知的群组
command_group_res = await DBBotGroup.list_exist_bot_groups_by_command_permissions(self_bot=self.self_bot,
command_permissions=1)
if command_group_res.error:
logger.opt(colors=True).error(
f'<Y><lw>{self.log_flag}</lw></Y> | Can not send subscription message to command groups, '
f'getting command group list failed, error: {command_group_res.info}')
return
for group_id in command_group_res.result:
try:
await self.bot.send_group_msg(group_id=group_id, message=message)
except Exception as e:
logger.opt(colors=True).warning(
f'<Y><lw>{self.log_flag}</lw></Y> | Sending message to group: {group_id} failed, error: {repr(e)}')
continue
async def safe_send_msg_enabled_notice_groups(self, message: Union[str, Message, MessageSegment]):
"""
向所有具有通知权限 notice permission 的群组发送消息
"""
# 获取所有需要通知的群组
notice_group_res = await DBBotGroup.list_exist_bot_groups_by_notice_permissions(self_bot=self.self_bot,
notice_permissions=1)
if notice_group_res.error:
logger.opt(colors=True).error(
f'<Y><lw>{self.log_flag}</lw></Y> | Can not send subscription message to notice groups, '
f'getting notice group list failed, error: {notice_group_res.info}')
return
for group_id in notice_group_res.result:
try:
await self.bot.send_group_msg(group_id=group_id, message=message)
except Exception as e:
logger.opt(colors=True).warning(
f'<Y><lw>{self.log_flag}</lw></Y> | Sending message to group: {group_id} failed, error: {repr(e)}')
continue
async def safe_send_msg_permission_level_groups(
self, permission_level: int, message: Union[str, Message, MessageSegment]):
"""
向所有大于等于指定权限等级 permission level 的群组发送消息
"""
# 获取所有需要通知的群组
level_group_res = await DBBotGroup.list_exist_bot_groups_by_permission_level(self_bot=self.self_bot,
permission_level=permission_level)
if level_group_res.error:
logger.opt(colors=True).error(
f'<Y><lw>{self.log_flag}</lw></Y> | Can not send subscription message to groups had level, '
f'getting permission level group list failed, error: {level_group_res.info}')
return
for group_id in level_group_res.result:
try:
await self.bot.send_group_msg(group_id=group_id, message=message)
except Exception as e:
logger.opt(colors=True).warning(
f'<Y><lw>{self.log_flag}</lw></Y> | Sending message to group: {group_id} failed, error: {repr(e)}')
continue
async def safe_send_msg_all_groups(self, message: Union[str, Message, MessageSegment]):
"""
向所有群组发送消息
"""
# 获取所有需要通知的群组
all_group_res = await DBBotGroup.list_exist_bot_groups(self_bot=self.self_bot)
if all_group_res.error:
logger.opt(colors=True).error(
f'<Y><lw>{self.log_flag}</lw></Y> | Can not send subscription message to all groups, '
f'getting permission all group list failed, error: {all_group_res.info}')
return
for group_id in all_group_res.result:
try:
await self.bot.send_group_msg(group_id=group_id, message=message)
except Exception as e:
logger.opt(colors=True).warning(
f'<Y><lw>{self.log_flag}</lw></Y> | Sending message to group: {group_id} failed, error: {repr(e)}')
continue
__all__ = [
'MsgSender'
]
```
#### File: utils/text_to_img/__init__.py
```python
import os
import asyncio
from datetime import datetime
from nonebot import logger, get_driver
from PIL import Image, ImageDraw, ImageFont
from omega_miya.utils.Omega_Base import Result
global_config = get_driver().config
TMP_PATH = global_config.tmp_path_
FOLDER_PATH = os.path.abspath(os.path.dirname(__file__))
def __text_to_img(text: str, image_wight: int = 512) -> Image:
font_path = os.path.abspath(os.path.join(FOLDER_PATH, 'default_font.otf'))
if not os.path.exists(font_path):
raise ValueError('Font not found')
# 处理文字层 主体部分
font_main_size = image_wight // 25
font_main = ImageFont.truetype(font_path, font_main_size)
# 按长度切分文本
spl_num = 0
spl_list = []
for num in range(len(text)):
text_w = font_main.getsize_multiline(text[spl_num:num])[0]
if text_w >= image_wight * 0.78:
spl_list.append(text[spl_num:num])
spl_num = num
else:
spl_list.append(text[spl_num:])
test_main_fin = '\n' + '\n'.join(spl_list) + '\n'
# 绘制文字图层
text_w, text_h = font_main.getsize_multiline(test_main_fin)
text_main_img = Image.new(mode="RGBA", size=(text_w, text_h), color=(0, 0, 0, 0))
ImageDraw.Draw(text_main_img).multiline_text(xy=(0, 0), text=test_main_fin, font=font_main, fill=(0, 0, 0))
# 初始化背景图层
image_height = text_h + 100
background = Image.new(mode="RGB", size=(image_wight, image_height), color=(255, 255, 255))
# 向背景图层中置入文字图层
background.paste(im=text_main_img, box=(image_wight // 10, 50), mask=text_main_img)
return background
async def text_to_img(text: str, image_wight: int = 512) -> Result.TextResult:
def __handle():
byte_img = __text_to_img(text, image_wight)
# 检查生成图片路径
img_folder_path = os.path.abspath(os.path.join(TMP_PATH, 'text_to_img'))
if not os.path.exists(img_folder_path):
os.makedirs(img_folder_path)
img_path = os.path.abspath(
os.path.join(img_folder_path, f"{hash(text)}_{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.jpg"))
# 保存图片
byte_img.save(img_path, 'JPEG')
return img_path
loop = asyncio.get_running_loop()
try:
path_result = await loop.run_in_executor(None, __handle)
path = os.path.abspath(path_result)
return Result.TextResult(error=False, info='Success', result=path)
except Exception as e:
logger.error(f'text_to_img failed, error: {repr(e)}')
return Result.TextResult(error=True, info=repr(e), result='')
__all__ = [
'text_to_img'
]
``` |
{
"source": "58565856/checkinpanel",
"score": 2
} |
#### File: 58565856/checkinpanel/ck_acfun.py
```python
import re
import requests
import urllib3
from notify_mtr import send
from utils import get_data
urllib3.disable_warnings()
class AcFun:
def __init__(self, check_items):
self.check_items = check_items
self.contentid = "27259341"
self.content_type = "application/x-www-form-urlencoded"
self.user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.70"
def get_cookies(self, session, phone, password):
url = "https://id.app.acfun.cn/rest/app/login/signin"
headers = {
"Host": "id.app.acfun.cn",
"user-agent": "AcFun/6.39.0 (iPhone; iOS 14.3; Scale/2.00)",
"devicetype": "0",
"accept-language": "zh-Hans-CN;q=1, en-CN;q=0.9, ja-CN;q=0.8, zh-Hant-HK;q=0.7, io-Latn-CN;q=0.6",
"accept": "application/json",
"content-type": self.content_type,
}
data = f"password={password}&username={phone}"
response = session.post(url=url, data=data, headers=headers, verify=False)
acpasstoken = response.json().get("acPassToken")
auth_key = str(response.json().get("auth_key"))
if acpasstoken and auth_key:
cookies = {"acPasstoken": acpasstoken, "auth_key": auth_key}
return cookies
else:
return False
def get_token(self, session, cookies):
url = "https://id.app.acfun.cn/rest/web/token/get"
data = "sid=acfun.midground.api"
headers = {"Content-Type": self.content_type}
response = session.post(
url=url, cookies=cookies, data=data, headers=headers, verify=False
)
return response.json().get("acfun.midground.api_st")
def get_video(self, session):
url = "https://www.acfun.cn/rest/pc-direct/rank/channel"
data = "channelId=0&rankPeriod=DAY"
headers = {
"Content-Type": self.content_type,
"User-Agent": self.user_agent,
}
response = session.post(url=url, data=data, headers=headers, verify=False)
self.contentid = response.json().get("rankList")[0].get("contentId")
return self.contentid
def sign(self, session, cookies):
url = "https://www.acfun.cn/rest/pc-direct/user/signIn"
headers = {"User-Agent": self.user_agent}
response = session.post(url=url, cookies=cookies, headers=headers, verify=False)
return response.json().get("msg")
def danmu(self, session, cookies):
url = "https://www.acfun.cn/rest/pc-direct/new-danmaku/add"
data = {
"mode": "1",
"color": "16777215",
"size": "25",
"body": "123321",
"videoId": "26113662",
"position": "2719",
"type": "douga",
"id": "31224739",
"subChannelId": "1",
"subChannelName": "动画",
}
headers = {
"cookie": f"acPasstoken={cookies.get('acPasstoken')};auth_key={cookies.get('auth_key')}",
"referer": "https://www.acfun.cn/",
"User-Agent": self.user_agent,
}
res = session.get(
url=f"https://www.acfun.cn/v/ac{self.contentid}", headers=headers
)
videoId = re.findall('"currentVideoId":(\d+),', res.text)
subChannel = re.findall(
'{subChannelId:(\d+),subChannelName:"([\u4e00-\u9fa5]+)"}', res.text
)
if len(videoId) > 0:
data["videoId"] = videoId[0]
data["subChannelId"] = subChannel[0][0]
data["subChannelName"] = subChannel[0][1]
response = session.post(url=url, data=data, headers=headers, verify=False)
if response.json().get("result") == 0:
msg = "弹幕成功"
else:
msg = "弹幕失败"
return msg
def throwbanana(self, session, cookies):
url = "https://www.acfun.cn/rest/pc-direct/banana/throwBanana"
data = {"resourceId": self.contentid, "count": "1", "resourceType": "2"}
headers = {
"cookie": f"acPasstoken={cookies.get('acPasstoken')};auth_key={cookies.get('auth_key')}",
"referer": "https://www.acfun.cn/",
"User-Agent": self.user_agent,
}
response = session.post(url=url, data=data, headers=headers, verify=False)
if response.json().get("result") == 0:
msg = "香蕉成功"
else:
msg = "香蕉失败"
return msg
def like(self, session, token):
like_url = "https://api.kuaishouzt.com/rest/zt/interact/add"
unlike_url = "https://api.kuaishouzt.com/rest/zt/interact/delete"
headers = {
"Content-Type": self.content_type,
"User-Agent": self.user_agent,
}
cookies = {"acfun.midground.api_st": token, "kpn": "ACFUN_APP"}
body = f"interactType=1&objectId={self.contentid}&objectType=2&subBiz=mainApp"
response = session.post(
url=like_url, cookies=cookies, data=body, headers=headers, verify=False
)
session.post(
url=unlike_url, cookies=cookies, data=body, headers=headers, verify=False
)
if response.json().get("result") == 1:
msg = "点赞成功"
else:
msg = "点赞失败"
return msg
def share(self, session, cookies):
url = "https://api-ipv6.acfunchina.com/rest/app/task/reportTaskAction?taskType=1&market=tencent&product=ACFUN_APP&appMode=0"
headers = {"Content-Type": self.content_type}
response = session.get(url=url, cookies=cookies, headers=headers, verify=False)
if response.json().get("result") == 0:
msg = "分享成功"
else:
msg = "分享失败"
return msg
def main(self):
msg_all = ""
for check_item in self.check_items:
phone = check_item.get("phone")
password = check_item.get("password")
session = requests.session()
self.get_video(session=session)
cookies = self.get_cookies(session=session, phone=phone, password=password)
token = self.get_token(session=session, cookies=cookies)
sign_msg = self.sign(session=session, cookies=cookies)
like_msg = self.like(session=session, token=token)
share_msg = self.share(session=session, cookies=cookies)
danmu_msg = self.danmu(session=session, cookies=cookies)
throwbanana_msg = self.throwbanana(session=session, cookies=cookies)
msg = (
f"帐号信息: *******{phone[-4:]}\n"
f"签到状态: {sign_msg}\n"
f"点赞任务: {like_msg}\n"
f"弹幕任务: {danmu_msg}\n"
f"香蕉任务: {throwbanana_msg}\n"
f"分享任务: {share_msg}"
)
msg_all += msg + "\n\n"
return msg_all
if __name__ == "__main__":
data = get_data()
_check_items = data.get("ACFUN", [])
res = AcFun(check_items=_check_items).main()
send("AcFun", res)
```
#### File: 58565856/checkinpanel/ck_airport.py
```python
import json
import re
import traceback
import requests
import urllib3
from notify_mtr import send
from utils import get_data
urllib3.disable_warnings()
class SspanelQd(object):
def __init__(self, check_items):
self.check_items = check_items
@staticmethod
def checkin(url, email, password):
url = url.rstrip("/")
email = email.split("@")
if len(email) > 1:
email = email[0] + "%40" + email[1]
else:
email = email[0]
session = requests.session()
"""
以下 except 都是用来捕获当 requests 请求出现异常时,
通过捕获然后等待网络情况的变化,以此来保护程序的不间断运行
"""
try:
session.get(url, verify=False)
except requests.exceptions.ConnectionError:
msg = url + "\n" + "网络不通"
return msg
except requests.exceptions.ChunkedEncodingError:
msg = url + "\n" + "分块编码错误"
return msg
except Exception:
msg = url + "\n" + "未知错误,请查看日志"
print(f"未知错误,错误信息:\n{traceback.format_exc()}")
return msg
login_url = url + "/auth/login"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
}
post_data = "email=" + email + "&passwd=" + password + "&code="
post_data = post_data.encode()
try:
res = session.post(login_url, post_data, headers=headers, verify=False)
res_str = res.text.encode("utf-8").decode("unicode_escape")
print(f"{url} 接口登录返回信息:{res_str}")
res_dict = json.loads(res_str)
if res_dict.get("ret") == 0:
msg = url + "\n" + str(res_dict.get("msg"))
return msg
except Exception:
msg = url + "\n" + "登录失败,请查看日志"
print(f"登录失败,错误信息:\n{traceback.format_exc()}")
return msg
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
"Referer": url + "/user",
}
try:
response = session.post(
url + "/user/checkin", headers=headers, verify=False
)
res_str = response.text.encode("utf-8").decode("unicode_escape")
print(f"{url} 接口签到返回信息:{res_str}")
res_dict = json.loads(res_str)
check_msg = res_dict.get("msg")
if check_msg:
msg = url + "\n" + str(check_msg)
else:
msg = url + "\n" + str(res_dict)
except Exception:
msg = url + "\n" + "签到失败,请查看日志"
print(f"签到失败,错误信息:\n{traceback.format_exc()}")
info_url = url + "/user"
response = session.get(info_url, verify=False)
"""
以下只适配了editXY主题
"""
try:
level = re.findall(r'\["Class", "(.*?)"],', response.text)[0]
day = re.findall(r'\["Class_Expire", "(.*)"],', response.text)[0]
rest = re.findall(r'\["Unused_Traffic", "(.*?)"]', response.text)[0]
msg = (
url
+ "\n- 今日签到信息:"
+ str(msg)
+ "\n- 用户等级:"
+ str(level)
+ "\n- 到期时间:"
+ str(day)
+ "\n- 剩余流量:"
+ str(rest)
)
except Exception:
pass
return msg
def main(self):
msg_all = ""
for check_item in self.check_items:
# 机场地址
url = str(check_item.get("url"))
# 登录信息
email = str(check_item.get("email"))
password = str(check_item.get("password"))
if url and email and password:
msg = self.checkin(url=url, email=email, password=password)
else:
msg = "配置错误"
msg_all += msg + "\n\n"
return msg_all
if __name__ == "__main__":
data = get_data()
_check_items = data.get("AIRPORT", [])
res = SspanelQd(check_items=_check_items).main()
send("机场签到", res)
```
#### File: 58565856/checkinpanel/ck_csdn.py
```python
import requests
from notify_mtr import send
from utils import get_data
class CSDN:
def __init__(self, check_items):
self.check_items = check_items
self.headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_2) AppleWebKit/537.36 (KHTML, like Gecko)"
"Chrome/88.0.4324.182 Safari/537.36 Edg/88.0.705.74"
}
def sign(self, cookies):
url = "https://me.csdn.net/api/LuckyDraw_v2/signIn"
response = requests.get(url=url, headers=self.headers, cookies=cookies).json()
if response.get("code") == 200:
msg = response.get("data").get("msg")
else:
msg = "签到失败"
print(response)
return msg
def draw(self, cookies):
url = "https://me.csdn.net/api/LuckyDraw_v2/goodluck"
response = requests.get(url=url, headers=self.headers, cookies=cookies).json()
if response.get("code") == 200:
if response.get("data").get("prize_title") != None:
msg = f", {response.get('data').get('prize_title')}"
else:
msg = f"{response.get('data').get('msg')}"
else:
msg = "抽奖失败\n"
return msg
def main(self):
msg_all = ""
for check_item in self.check_items:
cookie = {
item.split("=")[0]: item.split("=")[1]
for item in check_item.get("cookie").split("; ")
}
try:
user_name = cookie.get("UserName", "")
except Exception as e:
print(f"获取用户信息失败: {e}")
user_name = "未获取到用户信息"
sign_msg = self.sign(cookies=cookie)
draw_msg = self.draw(cookies=cookie)
msg = f"帐号信息: {user_name}\n签到信息: {sign_msg}\n抽奖结果: {draw_msg}"
msg_all += msg + "\n\n"
return msg_all
if __name__ == "__main__":
data = get_data()
_check_items = data.get("CSDN", [])
res = CSDN(check_items=_check_items).main()
send("CSDN", res)
```
#### File: 58565856/checkinpanel/ck_lecloud.py
```python
import requests
from notify_mtr import send
from utils import get_data
class LECloud:
def __init__(self, check_items):
self.check_items = check_items
self.total_size = ""
def userinfo(self, cookie):
url = "https://pimapi.lenovomm.com/userspaceapi/storage/userinfo"
headers = {
"cookie": cookie,
"user-agent": "Mozilla/5.0 (Linux; Android 11; PCAM00 Build/RKQ1.201217.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/83.0.4103.106 Mobile Safari/537.36 com.lenovo.leos.cloud.sync/6.3.0.99",
}
res = requests.post(url=url, headers=headers)
if "error" in res.text:
print("cookie 失效")
else:
self.total_size = res.json().get("data", {}).get("totalSize") // 1048576
# 签到
def addspace(self, cookie):
url = "https://pim.lenovo.com/lesynch5/userspaceapi/v4/addspace"
headers = {
"cookie": cookie,
"user-agent": "Mozilla/5.0 (Linux; Android 11; PCAM00 Build/RKQ1.201217.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/83.0.4103.106 Mobile Safari/537.36 com.lenovo.leos.cloud.sync/6.3.0.99",
}
res = requests.get(url=url, headers=headers)
if "spaceadd" in res.text:
data = res.json()
if "lastspaceadd" in res.text:
msg = f'今日以获{data.get("lastspaceadd")}M, 总空间{self.total_size}M'
else:
msg = f'获得{data.get("spaceadd")}M, 总空间{self.total_size + data.get("spaceadd")}M'
return msg
def main(self):
msg_all = ""
for check_item in self.check_items:
cookie = check_item.get("cookie")
self.usrinfo(cookie)
msg = self.addspace(cookie)
msg_all += msg + "\n\n"
return msg_all
if __name__ == "__main__":
data = get_data()
_check_items = data.get("LECLOUD", [])
res = LECloud(check_items=_check_items).main()
send("联想乐云", res)
```
#### File: 58565856/checkinpanel/ck_smzdm.py
```python
from urllib.parse import quote, unquote
import requests
from notify_mtr import send
from utils import get_data
class Smzdm:
def __init__(self, check_items):
self.check_items = check_items
@staticmethod
def sign(session):
try:
current = session.get(
url="https://zhiyou.smzdm.com/user/info/jsonp_get_current"
).json()
if current["checkin"]["has_checkin"]:
msg = (
f"用户信息: {current.get('nickname', '')}\n目前积分: {current.get('point', '')}\n"
f"经验值: {current.get('exp', '')}\n金币: {current.get('gold', '')}\n"
f"碎银子: {current.get('silver', '')}\n威望: {current.get('prestige', '')}\n"
f"等级: {current.get('level', '')}\n"
f"已经签到: {current.get('checkin', {}).get('daily_checkin_num', '')} 天"
)
else:
response = (
session.get(
url="https://zhiyou.smzdm.com/user/checkin/jsonp_checkin"
)
.json()
.get("data", {})
)
msg = (
f"用户信息: {current.get('nickname', '')}\n目前积分: {response.get('point', '')}\n"
f"增加积分: {response.get('add_point', '')}\n经验值: {response.get('exp', '')}\n"
f"金币: {response.get('gold', '')}\n威望: {response.get('prestige', '')}\n"
f"等级: {response.get('rank', '')}\n"
f"已经签到: {response.get('checkin_num', {})} 天"
)
except Exception as e:
msg = f"签到状态: 签到失败\n错误信息: {e},请重新获取 cookie"
return msg
def main(self):
msg_all = ""
for check_item in self.check_items:
cookie = {
item.split("=")[0]: quote(unquote(item.split("=")[1]))
for item in check_item.get("cookie").split("; ")
if item.split("=")[0] == "sess"
}
session = requests.session()
requests.utils.add_dict_to_cookiejar(session.cookies, cookie)
session.headers.update(
{
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Host": "zhiyou.smzdm.com",
"Referer": "https://www.smzdm.com/",
"Sec-Fetch-Dest": "script",
"Sec-Fetch-Mode": "no-cors",
"Sec-Fetch-Site": "same-site",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36",
}
)
sign_msg = self.sign(session=session)
msg = f"{sign_msg}"
msg_all += msg + "\n\n"
return msg_all
if __name__ == "__main__":
data = get_data()
_check_items = data.get("SMZDM", [])
res = Smzdm(check_items=_check_items).main()
send("什么值得买", res)
```
#### File: 58565856/checkinpanel/ck_wzyd.py
```python
from urllib import parse
import requests
from notify_mtr import send
from utils import get_data
class WZYD:
def __init__(self, check_items):
self.check_items = check_items
@staticmethod
def sign(data):
response = requests.post(
url="https://ssl.kohsocialapp.qq.com:10001/play/h5sign", data=data
).json()
try:
if response["result"] == 0:
msg = "签到成功"
else:
msg = response["returnMsg"]
except Exception:
msg = "请求失败,请检查接口"
return msg
def main(self):
msg_all = ""
for check_item in self.check_items:
data = check_item.get("data")
data = {k: v[0] for k, v in parse.parse_qs(data).items()}
try:
user_id = data.get("userId", "")
except Exception as e:
print(f"获取用户信息失败: {e}")
user_id = "未获取到用户信息"
sign_msg = self.sign(data=data)
msg = f"帐号信息: {user_id}\n签到信息: {sign_msg}"
msg_all += msg + "\n\n"
return msg_all
if __name__ == "__main__":
data = get_data()
_check_items = data.get("WZYD", [])
res = WZYD(check_items=_check_items).main()
send("王者营地", res)
``` |
{
"source": "589hero/putting-nerf-on-a-diet",
"score": 3
} |
#### File: putting-nerf-on-a-diet/nerf/model_utils.py
```python
import functools
from typing import Any, Callable
from flax import linen as nn
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
class MLP(nn.Module):
"""A simple MLP."""
net_depth: int = 8 # The depth of the first part of MLP.
net_width: int = 256 # The width of the first part of MLP.
net_depth_condition: int = 1 # The depth of the second part of MLP.
net_width_condition: int = 128 # The width of the second part of MLP.
net_activation: Callable[..., Any] = nn.relu # The activation function.
skip_layer: int = 4 # The layer to add skip layers to.
num_rgb_channels: int = 3 # The number of RGB channels.
num_sigma_channels: int = 1 # The number of sigma channels.
@nn.compact
def __call__(self, x, condition=None):
"""
Evaluate the MLP.
Args:
x: jnp.ndarray(float32), [batch, num_samples, feature], points.
condition: jnp.ndarray(float32), [batch, feature], if not None, this
variable will be part of the input to the second part of the MLP
concatenated with the output vector of the first part of the MLP. If
None, only the first part of the MLP will be used with input x. In the
original paper, this variable is the view direction.
Returns:
raw_rgb: jnp.ndarray(float32), with a shape of
[batch, num_samples, num_rgb_channels].
raw_sigma: jnp.ndarray(float32), with a shape of
[batch, num_samples, num_sigma_channels].
"""
feature_dim = x.shape[-1]
num_samples = x.shape[1]
x = x.reshape([-1, feature_dim])
dense_layer = functools.partial(
nn.Dense, kernel_init=jax.nn.initializers.glorot_uniform())
inputs = x
dtype = x.dtype
for i in range(self.net_depth):
x = dense_layer(self.net_width, dtype = dtype)(x)
x = self.net_activation(x)
if i % self.skip_layer == 0 and i > 0:
x = jnp.concatenate([x, inputs], axis=-1)
raw_sigma = dense_layer(self.num_sigma_channels, dtype = dtype)(x).reshape(
[-1, num_samples, self.num_sigma_channels])
if condition is not None:
# Output of the first part of MLP.
bottleneck = dense_layer(self.net_width, dtype = dtype)(x)
# Broadcast condition from [batch, feature] to
# [batch, num_samples, feature] since all the samples along the same ray
# have the same viewdir.
condition = jnp.tile(condition[:, None, :], (1, num_samples, 1))
# Collapse the [batch, num_samples, feature] tensor to
# [batch * num_samples, feature] so that it can be fed into nn.Dense.
condition = condition.reshape([-1, condition.shape[-1]])
x = jnp.concatenate([bottleneck, condition], axis=-1)
# Here use 1 extra layer to align with the original nerf model.
for i in range(self.net_depth_condition):
x = dense_layer(self.net_width_condition, dtype = dtype)(x)
x = self.net_activation(x)
raw_rgb = dense_layer(self.num_rgb_channels, dtype = dtype)(x).reshape(
[-1, num_samples, self.num_rgb_channels])
return raw_rgb, raw_sigma
def cast_rays(z_vals, origins, directions):
return origins[..., None, :] + z_vals[..., None] * directions[..., None, :]
def sample_along_rays(key, origins, directions, num_samples, near, far,
randomized, lindisp):
"""
Stratified sampling along the rays.
Args:
key: jnp.ndarray, random generator key.
origins: jnp.ndarray(float32), [batch_size, 3], ray origins.
directions: jnp.ndarray(float32), [batch_size, 3], ray directions.
num_samples: int.
near: float, near clip.
far: float, far clip.
randomized: bool, use randomized stratified sampling.
lindisp: bool, sampling linearly in disparity rather than depth.
Returns:
z_vals: jnp.ndarray, [batch_size, num_samples], sampled z values.
points: jnp.ndarray, [batch_size, num_samples, 3], sampled points.
"""
batch_size = origins.shape[0]
dtype = origins.dtype
t_vals = jnp.linspace(0., 1., num_samples, dtype = dtype)
if lindisp:
z_vals = 1. / (1. / near * (1. - t_vals) + 1. / far * t_vals)
else:
z_vals = near * (1. - t_vals) + far * t_vals
if randomized:
mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
upper = jnp.concatenate([mids, z_vals[..., -1:]], -1)
lower = jnp.concatenate([z_vals[..., :1], mids], -1)
t_rand = random.uniform(key, [batch_size, num_samples])
z_vals = lower + (upper - lower) * t_rand
else:
# Broadcast z_vals to make the returned shape consistent.
z_vals = jnp.broadcast_to(z_vals[None, ...], [batch_size, num_samples]).astype(dtype)
coords = cast_rays(z_vals, origins, directions)
return z_vals, coords
def posenc(x, min_deg, max_deg, legacy_posenc_order=False):
"""
Cat x with a positional encoding of x with scales 2^[min_deg, max_deg-1].
Instead of computing [sin(x), cos(x)], we use the trig identity
cos(x) = sin(x + pi/2) and do one vectorized call to sin([x, x+pi/2]).
Args:
x: jnp.ndarray, variables to be encoded. Note that x should be in [-pi, pi].
min_deg: int, the minimum (inclusive) degree of the encoding.
max_deg: int, the maximum (exclusive) degree of the encoding.
legacy_posenc_order: bool, keep the same ordering as the original tf code.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
if min_deg == max_deg:
return x
dtype = x.dtype
scales = jnp.array([2 ** i for i in range(min_deg, max_deg)], dtype = dtype)
if legacy_posenc_order:
xb = x[..., None, :] * scales[:, None]
four_feat = jnp.reshape(
jnp.sin(jnp.stack([xb, xb + 0.5 * jnp.pi], -2)),
list(x.shape[:-1]) + [-1])
else:
xb = jnp.reshape((x[..., None, :] * scales[:, None]),
list(x.shape[:-1]) + [-1])
four_feat = jnp.sin(jnp.concatenate([xb, xb + 0.5 * jnp.pi], axis=-1))
return jnp.concatenate([x] + [four_feat], axis=-1)
def volumetric_rendering(rgb, sigma, z_vals, dirs, white_bkgd):
"""
Volumetric Rendering Function.
Args:
rgb: jnp.ndarray(float32), color, [batch_size, num_samples, 3]
sigma: jnp.ndarray(float32), density, [batch_size, num_samples, 1].
z_vals: jnp.ndarray(float32), [batch_size, num_samples].
dirs: jnp.ndarray(float32), [batch_size, 3].
white_bkgd: bool.
Returns:
comp_rgb: jnp.ndarray(float32), [batch_size, 3].
disp: jnp.ndarray(float32), [batch_size].
acc: jnp.ndarray(float32), [batch_size].
weights: jnp.ndarray(float32), [batch_size, num_samples]
"""
dtype = rgb.dtype
eps = jnp.array(1e-10, dtype = dtype)
dists = jnp.concatenate([
z_vals[..., 1:] - z_vals[..., :-1],
jnp.broadcast_to(jnp.array([1e10]),#, dtype = dtype),
z_vals[..., :1].shape)
], -1)
dists = dists * jnp.linalg.norm(dirs[..., None, :], axis=-1)
# Note that we're quietly turning sigma from [..., 0] to [...].
alpha = 1.0 - jnp.exp(-sigma[..., 0] * dists)
accum_prod = jnp.concatenate([
jnp.ones_like(alpha[..., :1], alpha.dtype),
jnp.cumprod(1.0 - alpha[..., :-1] + eps, axis=-1)
],
axis=-1)
weights = alpha * accum_prod
weights = weights.astype(dtype)
comp_rgb = (weights[..., None] * rgb).sum(axis=-2)
depth = (weights * z_vals).sum(axis=-1)
acc = weights.sum(axis=-1)
# Equivalent to (but slightly more efficient and stable than):
# disp = 1 / max(eps, where(acc > eps, depth / acc, 0))
inv_eps = 1 / eps
disp = acc / depth
disp = jnp.where((disp > 0) & (disp < inv_eps) & (acc > eps), disp, inv_eps)
if white_bkgd:
comp_rgb = comp_rgb + (1. - acc[..., None])
return comp_rgb, disp, acc, weights
def piecewise_constant_pdf(key, bins, weights, num_samples, randomized):
"""
Piecewise-Constant PDF sampling.
Args:
key: jnp.ndarray(float32), [2,], random number generator.
bins: jnp.ndarray(float32), [batch_size, num_bins + 1].
weights: jnp.ndarray(float32), [batch_size, num_bins].
num_samples: int, the number of samples.
randomized: bool, use randomized samples.
Returns:
z_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
# Pad each weight vector (only if necessary) to bring its sum to `eps`. This
# avoids NaNs when the input is zeros or small, but has no effect otherwise.
dtype = bins.dtype
eps = 1e-5
weight_sum = jnp.sum(weights, axis=-1, keepdims=True)
padding = jnp.maximum(0, eps - weight_sum)
weights += padding / weights.shape[-1]
weight_sum += padding
# Compute the PDF and CDF for each weight vector, while ensuring that the CDF
# starts with exactly 0 and ends with exactly 1.
pdf = weights / weight_sum
cdf = jnp.minimum(1, jnp.cumsum(pdf[..., :-1], axis=-1))
cdf = jnp.concatenate([
jnp.zeros(list(cdf.shape[:-1]) + [1], dtype = dtype), cdf,
jnp.ones(list(cdf.shape[:-1]) + [1], dtype = dtype)
],
axis=-1)
# Draw uniform samples.
if randomized:
# Note that `u` is in [0, 1) --- it can be zero, but it can never be 1.
u = random.uniform(key, list(cdf.shape[:-1]) + [num_samples])
else:
# Match the behavior of random.uniform() by spanning [0, 1-eps].
u = jnp.linspace(0., 1. - jnp.finfo(dtype).eps, num_samples, dtype = dtype)
u = jnp.broadcast_to(u, list(cdf.shape[:-1]) + [num_samples])
# Identify the location in `cdf` that corresponds to a random sample.
# The final `True` index in `mask` will be the start of the sampled interval.
mask = u[..., None, :] >= cdf[..., :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[..., None], x[..., :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[..., None], x[..., -1:, None]), -2)
return x0, x1
bins_g0, bins_g1 = find_interval(bins)
cdf_g0, cdf_g1 = find_interval(cdf)
t = jnp.clip(jnp.nan_to_num((u - cdf_g0) / (cdf_g1 - cdf_g0), 0), 0, 1)
samples = bins_g0 + t * (bins_g1 - bins_g0)
# Prevent gradient from backprop-ing through `samples`.
return lax.stop_gradient(samples)
def sample_pdf(key, bins, weights, origins, directions, z_vals, num_samples,
randomized):
"""
Hierarchical sampling.
Args:
key: jnp.ndarray(float32), [2,], random number generator.
bins: jnp.ndarray(float32), [batch_size, num_bins + 1].
weights: jnp.ndarray(float32), [batch_size, num_bins].
origins: jnp.ndarray(float32), [batch_size, 3], ray origins.
directions: jnp.ndarray(float32), [batch_size, 3], ray directions.
z_vals: jnp.ndarray(float32), [batch_size, num_coarse_samples].
num_samples: int, the number of samples.
randomized: bool, use randomized samples.
Returns:
z_vals: jnp.ndarray(float32),
[batch_size, num_coarse_samples + num_fine_samples].
points: jnp.ndarray(float32),
[batch_size, num_coarse_samples + num_fine_samples, 3].
"""
z_samples = piecewise_constant_pdf(key, bins, weights, num_samples,
randomized)
# Compute united z_vals and sample points
z_vals = jnp.sort(jnp.concatenate([z_vals, z_samples], axis=-1), axis=-1)
coords = cast_rays(z_vals, origins, directions)
return z_vals, coords
def add_gaussian_noise(key, raw, noise_std, randomized):
"""
Adds gaussian noise to `raw`, which can used to regularize it.
Args:
key: jnp.ndarray(float32), [2,], random number generator.
raw: jnp.ndarray(float32), arbitrary shape.
noise_std: float, The standard deviation of the noise to be added.
randomized: bool, add noise if randomized is True.
Returns:
raw + noise: jnp.ndarray(float32), with the same shape as `raw`.
"""
if (noise_std is not None) and randomized:
return raw + random.normal(key, raw.shape, dtype=raw.dtype) * noise_std
else:
return raw
``` |
{
"source": "58DBA/ansible",
"score": 3
} |
#### File: ansible/playbooks/groups.py
```python
num = 16
target = 32
step = 5
iplist = []
groups = []
port = 7301
##################################
def getIpListFromFile(ipfile):
"""
get the ip list from file.
"""
with open(ipfile,'r') as f :
for line in f.readlines():
iplist.append(line.strip())
return(iplist)
##################################
def getIpGroups(target,step,iplist):
"""
get the ip group from iplist
"""
for time in range(0, target):
cur_g=[]
groups.append(cur_g)
for index in range(0, step):
cur_num = (time + index) % num
cur_g.append(iplist[cur_num])
return(groups)
##################################
def createInventoryFile(inventory, iplist, primary, secondary):
"""
create inventory file for playbook
"""
inventory_info="""#
# This is %s for playbook
[mongod]
%s
[primary]
%s
[secondary]
%s
""" % (inventory, '\n'.join(iplist), primary, '\n'.join(secondary))
return(inventory_info)
##################################
if __name__ == '__main__':
try:
fiplist=getIpListFromFile('./inventory/uc_hosts.txt')
groups=getIpGroups(target=32,step=5,iplist=fiplist)
for group in groups:
#inventory_file="./inventory/inventory_%s" % port
#inventory_info=createInventoryFile(inventory_file,
# group,
# group[0],
# group[1:])
#with open(inventory_file, "w") as f:
# f.write(inventory_info)
#print(inventory_file)
#print(group)
#print(inventory_info)
#print(">>" * 40)
print("sh.addShard(\"%s/%s:%s\")") % (port, group[0], port)
port+=1
except Exception as e:
print e
```
#### File: ansible/playbooks/playbook.py
```python
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.plugins.callback import CallbackBase
# Create a callback object so we can capture the output
class ResultsCollector(CallbackBase):
def __init__(self, *args, **kwargs):
super(ResultsCollector, self).__init__(*args, **kwargs)
self.host_ok = {}
self.host_unreachable = {}
self.host_failed = {}
def v2_runner_on_unreachable(self, result):
self.host_unreachable[result._host.get_name()] = result
def v2_runner_on_ok(self, result, *args, **kwargs):
self.host_ok[result._host.get_name()] = result
def v2_runner_on_failed(self, result, *args, **kwargs):
self.host_failed[result._host.get_name()] = result
def main():
Options = namedtuple('Options', ['connection','module_path', 'forks', 'remote_user',
'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args',
'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check'])
# initialize needed objects
variable_manager = VariableManager()
loader = DataLoader()
options = Options(connection='smart', module_path='/usr/share/ansible', forks=100,
remote_user=None, private_key_file=None, ssh_common_args=None, ssh_extra_args=None,
sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None,
become_user=None, verbosity=None, check=False)
passwords = dict()
# create inventory and pass to var manager
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=host_list)
variable_manager.set_inventory(inventory)
# create play with tasks
play_source = dict(
name = "Ansible Play",
gather_facts = 'no',
tasks = [ dict(action=dict(module='command', args=dict(cmd='/usr/bin/uptime'))) ]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
# actually run it
tqm = None
callback = ResultsCollector()
try:
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
)
tqm._stdout_callback = callback
result = tqm.run(play)
finally:
if tqm is not None:
tqm.cleanup()
# print "UP ***********"
for host, result in callback.host_ok.items():
print '{} >>> {}'.format(host, result._result['stdout'])
# print "FAILED *******"
# for host, result in callback.host_failed.items():
# print '{} >>> {}'.format(host, result._result['msg'])
# print "DOWN *********"
# for host, result in callback.host_unreachable.items():
# print '{} >>> {}'.format(host, result._result['msg'])
if __name__ == '__main__':
main()
``` |
{
"source": "592767809/ykdl",
"score": 2
} |
#### File: extractors/bilibili/live.py
```python
from .._common import *
class BiliLive(Extractor):
name = 'Bilibili live (哔哩哔哩直播)'
profile_2_id = {
'杜比': 'unknown', # FIXME: placeholder
'4K': '4K',
'原画': 'OG',
'蓝光': 'BD',
'超清': 'TD',
'高清': 'HD',
'流畅': 'SD'
}
def live_status(self):
id = match1(self.url, '/(\d+)')
data = get_response(
'https://api.live.bilibili.com/room/v1/Room/room_init',
params={'id': id}, cache=False).json()
assert data['code'] == 0, data['msg']
data = data['data']
self.vid = data['room_id'], str(data['uid'])
live_status = data['live_status']
assert not data['is_locked'], '房间已封禁'
assert not data['encrypted'], '房间已加密'
assert live_status > 0, '主播正在觅食......'
return live_status
def list_only(self):
return self.live_status() == 2
def prepare(self):
info = MediaInfo(self.name, True)
room_id, uid = self.vid
data = get_response(
'https://api.live.bilibili.com/room/v1/Room/get_status_info_by_uids',
params={'uids[]': uid}, cache=False).json()
assert data['code'] == 0, data['msg']
data = data['data'][uid]
info.title = '{data[title]} - {data[uname]}'.format(**vars())
info.add_comment(data['tag_name'])
g_qn_desc = None
aqlts = set()
aqlts_p = set()
size = float('inf')
def get_live_info(qn=1):
data = get_response(
'https://api.live.bilibili.com/xlive/web-room/v2/index/getRoomPlayInfo',
params={
'room_id': room_id,
'protocol': '0,1', # 0 = http_stream, 1 = http_hls
'format': '0,1,2',
'codec': '0,1', # 0 = avc, 1 = hevc
'qn': qn,
'platform': 'web',
'ptype': 8,
'dolby': 5
}, cache=False).json()
assert data['code'] == 0, data['msg']
data = data['data']['playurl_info']['playurl']
nonlocal g_qn_desc, aqlts
if g_qn_desc is None:
g_qn_desc = {x['qn']: x['desc'] for x in data['g_qn_desc']}
qlt = None
for stream in data['stream']:
for format in stream['format']:
for codec in format['codec']:
aqlts.update(x for x in codec['accept_qn']
if x not in aqlts_p)
if qlt is None:
qlt = codec['current_qn']
prf = g_qn_desc[qlt]
st = self.profile_2_id[prf]
if 'http_hls' in stream['protocol_name']:
ext = 'm3u8'
st += '-hls'
else:
ext = format['format_name']
if codec['codec_name'] == 'hevc':
st += '-h265'
if st in info.streams:
self.logger.debug('skip stream: [ %s %s %s ]',
stream['protocol_name'],
format['format_name'],
codec['codec_name'],)
continue
url_info = random.choice(codec['url_info'])
url = url_info['host'] + codec['base_url'] + url_info['extra']
info.streams[st] = {
'container': ext,
'video_profile': prf,
'src' : [url],
'size': size
}
if qn == 1:
aqlts.remove(qlt)
aqlts_p.add(qlt)
while aqlts:
qlt = aqlts.pop()
aqlts_p.add(qlt)
get_live_info(qlt)
get_live_info()
info.extra.referer= 'https://live.bilibili.com/'
return info
def prepare_list(self):
from .video import site
if self.vid is None:
self.live_status()
room_id, uid = self.vid
self.start = -1 # skip is not allowed
while True:
data = get_response(
'https://api.live.bilibili.com/live/getRoundPlayVideo',
params={'room_id': room_id}, cache=False).json()
assert data['code'] == 0, data['msg']
bvid_url = data['data'].get('bvid_url')
assert bvid_url, '轮播结束'
info = site.parser(bvid_url)
info.site = '哔哩哔哩轮播'
info.title = '(轮播) ' + info.title
yield info
site = BiliLive()
```
#### File: ykdl/extractors/_byted.py
```python
from ._common import *
js_dom = '''
var window = this,
document = {{referrer: '{url}'}},
location = {{href: '{url}', protocol: 'https'}},
navigator = {{userAgent: '{ua}'}};
'''
js_acrawler = None
def get_cookies_d(url):
return {c.name: c.value
for c in get_cookies(urlsplit(url).hostname, '/')}
def cd2cs(cd):
return ';'.join('='.join(kv) for kv in cd.items())
def get_signer(url):
assert JSEngine, "No JS Interpreter found, can't load byted acrawler!"
global js_acrawler
if js_acrawler is None:
js_acrawler = get_pkgdata_str(__name__, '_byted_acrawler.js',
'https://lf3-cdn-tos.bytescm.com/obj/rc-web-sdk/acrawler.js')
js_ctx = JSEngine(js_dom.format(url=url, ua=fake_headers['User-Agent']))
js_ctx.append(js_acrawler)
def sign(*args):
return js_ctx.call('byted_acrawler.sign', *args)
return sign
def get_signed_cookies(url):
assert JSEngine, "No JS Interpreter found, can't load byted acrawler!"
install_cookie()
get_response(url)
cookies = get_cookies_d(url)
cookies.update({
'__ac_signature': get_signer(url)('', cookies['__ac_nonce']),
'__ac_referer': '__ac_blank'
})
uninstall_cookie()
return cd2cs(cookies)
def get_ttwid_cookies(url):
install_cookie()
get_response(url)
cookies = get_cookies_d(url)
cookies['ttwid_date'] = '1'
get_response(url, headers={'Cookie': cd2cs(cookies)})
cookies = get_cookies_d(url)
uninstall_cookie()
return cd2cs(cookies)
cookies = None
_get_content = get_content
def get_content(url):
global cookies
if cookies is None:
if 'douyin.' in url:
cookies = get_signed_cookies(url)
elif 'ixigua.' in url:
cookies = get_ttwid_cookies(url)
return _get_content(url, headers={'Cookie': cookies})
```
#### File: extractors/le/__init__.py
```python
from .._common import *
def get_extractor(url):
add_header('User-Agent',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) '
'AppleWebKit/603.1.30 (KHTML, like Gecko) '
'Version/10.1 Safari/603.1.30')
if 'lunbo' in url:
from . import lunbo as s
elif match(url, '(live[\./]|/izt/)'):
from . import live as s
elif 'bcloud' in url:
from . import letvcloud as s
else:
from . import le as s
return s.site, url
```
#### File: extractors/le/le.py
```python
from .._common import *
def calcTimeKey(t):
ror = lambda val, r_bits: ((val & (2**32-1)) >> r_bits%32) | \
(val << (32-(r_bits%32)) & (2**32-1))
magic = 185025305
return ror(t, magic % 17) ^ magic
def decode(data):
version = data[0:5]
if version.lower() == b'vc_01':
#get real m3u8
loc2 = bytearray(data[5:])
length = len(loc2)
loc4 = [0]*(2*length)
for i in range(length):
loc4[2*i] = loc2[i] >> 4
loc4[2*i+1]= loc2[i] & 15;
loc6 = loc4[len(loc4)-11:]+loc4[:len(loc4)-11]
loc7 = bytearray(length)
for i in range(length):
loc7[i] = (loc6[2 * i] << 4) +loc6[2*i+1]
return loc7
else:
# directly return
return data
class Letv(Extractor):
name = '乐视 (Letv)'
stream_2_id_profile = {
'1080p': ['BD', '1080P'],
'1300': ['TD', '超清'],
'1000': ['HD', '高清'],
'720p': ['SD', '标清'],
'350': ['LD', '流畅']
}
__STREAM_TEMP__ = []
def prepare(self):
info = MediaInfo(self.name)
stream_temp = {st: None for st in self.stream_2_id_profile.keys()}
self.__STREAM_TEMP__.append(stream_temp)
if not self.vid:
self.vid = match1(self.url, 'vplay/(\d+).html', '#record/(\d+)')
#normal process
data = get_response('http://player-pc.le.com/mms/out/video/playJson',
params={
'id': self.vid,
'platid': 1,
'splatid': 105,
'format': 1,
'tkey': calcTimeKey(int(time.time())),
'domain': 'www.le.com',
'region': 'cn',
'source': 1000,
'accessyx': 1
}).json()['msgs']
info.title = data['playurl']['title']
for stream, sdp in data['playurl']['dispatch'].items():
s_url = data['playurl']['domain'][0] + sdp[0]
data2 = get_response(s_url, params={
'm3v': 1,
'termid': 1,
'format': 1,
'hwtype': 'un',
'ostype': 'MacOS10.12.4',
'p1': 1,
'p2': 10,
'p3': '-',
'expect': '3',
'tn': random.random(),
'vid': self.vid,
'uuid': hash.sha1(s_url) + '_0',
'tss': 'ios'
}).json()
# hold on ! more things to do
# to decode m3u8 (encoded)
m3u8 = get_content(data2['location'],
params={
'r': int(time.time() * 1000),
'appid': 500
},
charset='ignore')
m3u8_list = decode(m3u8)
stream_id, video_profile = self.stream_2_id_profile[stream]
info.streams[stream_id] = {
'container': 'm3u8',
'video_profile': video_profile,
'size' : 0
}
stream_temp[stream] = compact_tempfile(mode='w+b', suffix='.m3u8')
stream_temp[stream].write(m3u8_list)
info.streams[stream_id]['src'] = [stream_temp[stream].name]
stream_temp[stream].flush()
return info
def prepare_list(self):
html = get_content(self.url)
return matchall(html, 'vid="(\d+)"')
site = Letv()
```
#### File: extractors/le/live.py
```python
from .._common import *
def get_playback(vid):
from .le import Letv
site = Letv()
site.name = 'Le Live(乐视直播回看)'
site.vid = vid
return site.prepare()
class LeLive(Extractor):
name = 'Le Live(乐视直播)'
stream_2_id_profile = {
'flv_1080p3m': ['BD', '1080p'],
'flv_1080p' : ['BD', '1080p'],
'flv_1300' : ['TD', '超清'],
'flv_1000' : ['HD', '高清'],
'flv_720p' : ['SD', '标清'],
'flv_350' : ['LD', '流畅']
}
def prepare(self):
self.vid = match1(self.url, 'd=(\d+)', 'live/(\d+)')
if '/izt/' in self.url:
vid = self.vid
if not vid:
html = get_content(self.url)
vid = match1(html, 'vid\s*:\s*"(\d+)",', 'vid="(\d+)"')
return get_playback(vid)
else:
if not self.vid:
html = get_content(self.url)
self.vid = match1(html, 'liveId\s*:\s*"(\d+)"')
live_data = get_response(
'http://api.live.letv.com/v1/liveRoom/single/1001',
params={'id': self.vid}).json()
if live_data.get('status') != 2:
return get_playback(live_data['recordingId'])
# live video is dead, the followed code will not be used
live_data = get_response(
'http://player.pc.le.com/player/startup_by_pid/1001/'
+ self.vid,
params={'host': 'live.le.com'}).json()
info = MediaInfo(self.name, True)
info.title = live_data['title']
for st in live_data['rows']:
stream, profile = self.stream_2_id_profile[st['rateType']]
data = get_response(st['streamUrl'],
params={
'format': 1,
'expect': 2,
'termid': 1,
'platid': 10,
'playid': 1,
'sign': 'live_web',
'splatid': 1001,
'vkit': 20161017,
'station': self.vid
}).json()
src = data['location']
info.streams[stream] = {
'container': 'm3u8',
'video_profile': profile,
'size' : float('inf'),
'src' : [src]
}
return info
def prepare_list(self):
html = get_content(self.url)
vids = matchall(html, 'vid="(\d+)"')
# fake urls
return ['http://live.le.com/izt/vid={}'.format(vid) for vid in vids]
site = LeLive()
```
#### File: extractors/sina/video.py
```python
from .._common import *
def get_realurl(url, vid):
resp = get_response(url, params={'vid': vid})
if resp.locations:
return resp.url
else:
return matchall(resp, 'CDATA\[([^\]]+)')[1]
class Sina(Extractor):
name = '新浪视频 (sina)'
def prepare(self):
info = MediaInfo(self.name)
if not self.vid:
self.vid = match1(self.url, 'video_id=(\d+)',
'#(\d{5,})',
'(\d{5,})\.swf')
if not self.vid:
html = get_content(self.url)
self.vid = match1(html, 'video_id[\'"]?\s*[:=]\s*[\'"]?(\d+)')
assert self.vid, "can't get vid"
data = get_response('http://s.video.sina.com.cn/video/h5play',
params={'video_id': self.vid}).json()
data = data['data']
info.title = data['title']
for t in ['mp4', 'flv', '3gp']:
video_info = data['videos'].get(t)
if video_info:
break
for profile in video_info:
v = video_info[profile]
r_url = get_realurl(v['file_api'], v['file_id'])
info.streams[profile] = {
'container': v['type'],
'video_profile': profile,
'src': [r_url],
'size' : 0
}
return info
def prepare_list(self):
html = get_content(self.url)
return matchall(html, 'video_id: ([^,]+)')
site = Sina()
```
#### File: ykdl/extractors/yinyuetai.py
```python
from ._common import *
class YinYueTai(Extractor):
name = '音悦台 (YinYueTai)'
def prepare(self):
info = MediaInfo(self.name)
info.extra.referer = 'https://www.yinyuetai.com/'
if not self.vid:
self.vid = match1(self.url,'\Wid=(\d+)')
data = get_response('https://data.yinyuetai.com/video/getVideoInfo',
params={'id': self.vid}).json()
assert not data['delFlag'], 'MTV has been deleted!'
info.title = data['videoName']
info.artist = data['artistName']
url = data['videoUrl']
info.streams['current'] = {
'container': url_info(url)[1],
'video_profile': 'current',
'src' : [url]
}
return info
site = YinYueTai()
``` |
{
"source": "59-29/bot",
"score": 4
} |
#### File: 59-29/bot/commands.py
```python
from discord.ext import commands
import discord
import collections
import datetime
import time
import pprint
import sys
def find_color(ctx):
"""Find the bot's rendered color. If it's the default color or we're in a DM, return Discord's "greyple" color"""
try:
if ctx.guild.me.color == discord.Color.default():
color = discord.Color.greyple()
else:
color = ctx.guild.me.color
except AttributeError: #* If it's a DM channel
color = discord.Color.greyple()
return color
class Commands(commands.Cog):
"""Commands for the N-Word Counter"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def help(self, ctx):
"""This help command!"""
cmds = sorted([c for c in self.bot.commands if not c.hidden], key=lambda c: c.name)
embed = discord.Embed(
title="N-Word Counter: Help Command",
description="I keep track of every time a user says the N-word, hard R or not. I'm a "
"pretty simple bot to use. My prefix is an @mention, meaning you'll have "
f"to put {self.bot.user.mention} before every command."
"\n\nHere's a short list of my commands:",
color=find_color(ctx))
embed.set_footer(
text="Note: I don't count N-words said in the past before I joined this server")
for c in cmds:
embed.add_field(name=c.name, value=c.help, inline=False)
await ctx.send(embed=embed)
@commands.command(aliases=["info"])
async def about(self, ctx):
"""Some basic info about me"""
embed = discord.Embed(
title=str(self.bot.user), description=self.bot.app_info.description +
f"\n\n**User/Client ID**: {self.bot.app_info.id}", color=find_color(ctx))
embed.set_thumbnail(url=self.bot.app_info.icon_url)
embed.add_field(name="Owner", value=self.bot.app_info.owner)
embed.add_field(name="Server Count", value=len(self.bot.guilds))
embed.add_field(name="User Count", value=len(self.bot.users))
embed.add_field(
name="Language",
value=f"Python {sys.version_info[0]}.{sys.version_info[1]}.{sys.version_info[2]}")
embed.add_field(
name="Library", value="[discord.py](https://github.com/Rapptz/discord.py)")
embed.add_field(
name="License",
value="[CC0 1.0 Universal](https://creativecommons.org/publicdomain/zero/1.0/)")
embed.add_field(
name="Source Code", value="https://github.com/NWordCounter/bot", inline=False)
await ctx.send(embed=embed)
@commands.command()
async def count(self, ctx, user: discord.User=None):
"""Get the number of times a user has said the N-Word
Format like this: `count <@mention user>`
If you don't mention a user, I'll get **your** N-word count
"""
if user is None:
user = ctx.author
if user == self.bot.user:
return await ctx.send("You crazy? I'd never, ever say the ***nigga***-word")
if user.bot:
return await ctx.send(
"I don't count N-Words said by bots. Can you imagine how hectic that would be?")
try:
count = self.bot.nwords[user.id]
except:
return await ctx.send(f"{user.mention} has not said the N-word yet. Good for them")
if count["total"]:
msg = (f"{user.mention} has said the N-word **{count['total']:,} "
f"time{'' if count['total'] == 1 else 's'}**")
if count["hard_r"]:
msg += f", __{count['hard_r']:,} of which had a hard-R__"
if "last_time" in count:
since_last = count["total"] - count["last_time"]
if since_last:
msg += (f".\n\nThey've said the N-word __{since_last:,} "
f"time{'' if since_last == 1 else 's'}__ since they were last "
"investigated")
await ctx.send(msg)
self.bot.nwords[user.id]["last_time"] = self.bot.nwords[user.id]["total"]
else:
await ctx.send(f"{user.mention} has not said the N-word yet. Good for them")
@count.error
async def count_error(self, ctx, exc):
if isinstance(exc, commands.BadArgument):
return await ctx.send(exc)
@commands.command()
async def invite(self, ctx):
"""Sends an invite link so you can invite me to your own server"""
await ctx.send("Here's my invite link so I can count N-words on your server too:\n"
f"https://discordapp.com/oauth2/authorize?client_id={self.bot.app_info.id}"
"&scope=bot&permissions=8")
@commands.command()
async def stats(self, ctx):
"""View my statistics"""
await ctx.channel.trigger_typing()
uptime = datetime.datetime.utcnow() - self.bot.started_at
#* This code was copied from my other bot, MAT
y = int(uptime.total_seconds()) // 31557600 #* Number of seconds in 356.25 days
mo = int(uptime.total_seconds()) // 2592000 % 12 #* Number of seconds in 30 days
d = int(uptime.total_seconds()) // 86400 % 30 #* Number of seconds in 1 day
h = int(uptime.total_seconds()) // 3600 % 24 #* Number of seconds in 1 hour
mi = int(uptime.total_seconds()) // 60 % 60 #* etc.
se = int(uptime.total_seconds()) % 60
frmtd_uptime = []
if y != 0:
frmtd_uptime.append(f"{y}y")
if mo != 0:
frmtd_uptime.append(f"{mo}mo")
if d != 0:
frmtd_uptime.append(f"{d}d")
if h != 0:
frmtd_uptime.append(f"{h}hr")
if mi != 0:
frmtd_uptime.append(f"{mi}m")
if se != 0:
frmtd_uptime.append(f"{se}s")
embed = discord.Embed(
description=f"User ID: {self.bot.user.id}",
timestamp=datetime.datetime.utcnow(),
color=find_color(ctx))
embed.add_field(name="Server Count", value=f"{len(self.bot.guilds):,} servers")
embed.add_field(name="User Count", value=f"{len(self.bot.users):,} unique users")
embed.add_field(
name="Channel Count",
value=f"{len(list(self.bot.get_all_channels()) + self.bot.private_channels):,} "
"channels")
embed.add_field(
name="Memory Usage",
value=f"{round(self.bot.process.memory_info().rss / 1000000, 2)} MB")
embed.add_field(name="Latency/Ping", value=f"{round(self.bot.latency * 1000, 2)}ms")
embed.add_field(name="Uptime", value=" ".join(frmtd_uptime) + " since last restart")
embed.add_field(
name="Number of Users Who Have Said the N-Word",
value=f"{len(self.bot.nwords):,}",
inline=False)
embed.add_field(
name="Total N-Words Counted",
value=f"{self.bot.nwords[0]['total']:,} "
f"({self.bot.nwords[0]['hard_r']:,} with hard-R)",
inline=False)
embed.set_author(name="N-Word Counter Bot: Statistics", icon_url=self.bot.user.avatar_url)
embed.set_footer(text="These statistics are accurate as of:")
await ctx.send(embed=embed)
@commands.command(aliases=["leaderboard", "high"])
@commands.guild_only()
async def top(self, ctx, param: str=None):
"""See the leaderboard of the top N-word users of this server. Do `top global` to see the top users across all servers
Note: If a user said N-words on another server that I'm also on, those will be taken into account
"""
await ctx.channel.trigger_typing()
def create_leaderboard():
leaderboard = {}
if param == "global":
for u, n in self.bot.nwords.items():
if self.bot.get_user(u):
leaderboard.update({self.bot.get_user(u): n["total"]})
leaderboard = dict(collections.Counter(leaderboard).most_common(10))
else:
for m in ctx.guild.members:
if m.id in self.bot.nwords and not m.bot:
if self.bot.nwords[m.id]["total"]:
leaderboard.update({m: self.bot.nwords[m.id]["total"]})
leaderboard = dict(collections.Counter(leaderboard).most_common(10))
return leaderboard
leaderboard = await self.bot.loop.run_in_executor(None, create_leaderboard)
if not len(leaderboard):
return await ctx.send("No one on this server has said the N-word yet")
description = "\n"
counter = 1
for m, c in leaderboard.items():
description += (f"**{counter}.** {m if param == 'global' else m.mention} - __{c:,} "
f"time{'' if c == 1 else 's'}__ ({self.bot.nwords[m.id]['hard_r']:,} "
"with hard-R)\n")
counter += 1
description = description.replace("**1.**", ":first_place:").replace("**2.**", ":second_place:").replace("**3.**", ":third_place:")
embed = discord.Embed(description=description, color=find_color(ctx),
timestamp=datetime.datetime.utcnow())
if param == "global":
embed.set_author(
name=f"Top N-Word Users of All Time")
else:
embed.set_author(
name=f"Top N-Word Users of {ctx.guild.name}", icon_url=ctx.guild.icon_url)
embed.set_footer(
text="These listings are accurate as of:", icon_url=self.bot.user.avatar_url)
await ctx.send(embed=embed)
@top.error
async def top_error(self, ctx, exc):
if isinstance(exc, commands.NoPrivateMessage):
return await ctx.send(exc)
@commands.command(hidden=True)
@commands.is_owner()
async def edit(self, ctx, user_id: int, total: int, hard_r: int, last_time: int=None):
"""Edit a user's entry in the dict or add a new one"""
if last_time:
self.bot.nwords[user_id] = {"id": user_id, "total": total, "hard_r": hard_r, "last_time": last_time}
else:
self.bot.nwords[user_id] = {"id": user_id, "total": total, "hard_r": hard_r}
await ctx.send("Done")
@commands.command(hidden=True)
@commands.is_owner()
async def pop(self, ctx, user_id: int):
"""Delete a user's entry from the dict"""
try:
self.bot.nwords.pop(user_id)
await ctx.send("Done")
except KeyError as e:
await ctx.send(f"KeyError: ```{e}```")
@commands.command(hidden=True)
@commands.is_owner()
async def execute(self, ctx, *, query):
"""Execute a query in the database"""
try:
with ctx.channel.typing():
async with self.bot.pool.acquire() as conn:
result = await conn.execute(query)
await ctx.send(f"Query complete:```{result}```")
except Exception as e:
await ctx.send(f"Query failed:```{e}```")
@commands.command(hidden=True)
@commands.is_owner()
async def fetch(self, ctx, *, query):
"""Run a query in the database and fetch the result"""
try:
with ctx.channel.typing():
async with self.bot.pool.acquire() as conn:
result = await conn.fetch(query)
fmtd_result = pprint.pformat([dict(i) for i in result])
await ctx.send(f"Query complete:```{fmtd_result}```")
except Exception as e:
await ctx.send(f"Query failed:```{e}```")
@commands.command(aliases=["resetstatus"], hidden=True)
@commands.is_owner()
async def restartstatus(self, ctx):
await self.bot.change_presence(status=discord.Status.dnd, activity=discord.Activity(
name=f"for N-Words on {len(self.bot.guilds)} servers",
type=discord.ActivityType.watching))
await ctx.send("Reset playing status")
@commands.command(hidden=True)
@commands.is_owner()
async def setstatus(self, ctx, status):
"""Change the bot's presence"""
if status.startswith("on"):
await self.bot.change_presence(status=discord.Status.online)
elif status.startswith("id"):
await self.bot.change_presence(status=discord.Status.idle)
elif status.startswith("d"):
await self.bot.change_presence(status=discord.Status.dnd)
elif status.startswith("off") or status.startswith("in"):
await self.bot.change_presence(status=discord.Status.invisible)
else:
await ctx.send("Invalid status")
await ctx.send("Set new status")
@commands.command(hidden=True)
@commands.is_owner()
async def updatedb(self, ctx):
temp = await ctx.send("Manually updating... This may take a few minutes... Please wait...")
with ctx.channel.typing():
start = time.perf_counter()
async with self.bot.pool.acquire() as conn:
await conn.execute("""
INSERT INTO nwords
(id)
VALUES {}
ON CONFLICT
DO NOTHING
;""".format(", ".join([f"({u})" for u in self.bot.nwords])))
for data in self.bot.nwords.copy().values():
await conn.execute("""
UPDATE nwords
SET total = {},
hard_r = {}
WHERE id = {}
;""".format(data["total"], data["hard_r"], data["id"]))
delta = time.perf_counter() - start
mi = int(delta) // 60
sec = int(delta) % 60
ms = round(delta * 1000 % 1000)
await temp.delete()
await ctx.send(f"Finished updating database ({mi}m {sec}s {ms}ms)")
def setup(bot):
bot.add_cog(Commands(bot))
``` |
{
"source": "594422814/ContrastCorr",
"score": 2
} |
#### File: ContrastCorr/libs/model.py
```python
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from libs.net_utils import NLM_NC_woSoft
from libs.utils import *
from libs.autoencoder import encoder3, decoder3, encoder_res18, encoder_res50
import pdb
class NLM_woSoft(nn.Module):
"""
Non-local mean layer w/o softmax on affinity
"""
def __init__(self):
super(NLM_woSoft, self).__init__()
def forward(self, in1, in2):
n,c,h,w = in1.size()
in1 = in1.view(n,c,-1)
in2 = in2.view(n,c,-1)
affinity = torch.bmm(in1.permute(0,2,1), in2)
return affinity
def transform(aff, frame1):
"""
Given aff, copy from frame1 to construct frame2.
INPUTS:
- aff: (h*w)*(h*w) affinity matrix
- frame1: n*c*h*w feature map
"""
b,c,h,w = frame1.size()
frame1 = frame1.view(b,c,-1)
frame2 = torch.bmm(frame1, aff)
return frame2.view(b,c,h,w)
class normalize(nn.Module):
"""Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
"""
def __init__(self, mean, std = (1.0,1.0,1.0)):
super(normalize, self).__init__()
self.mean = nn.Parameter(torch.FloatTensor(mean).cuda(), requires_grad=False)
self.std = nn.Parameter(torch.FloatTensor(std).cuda(), requires_grad=False)
def forward(self, frames):
b,c,h,w = frames.size()
frames = (frames - self.mean.view(1,3,1,1).repeat(b,1,h,w))/self.std.view(1,3,1,1).repeat(b,1,h,w)
return frames
def create_flat_grid(F_size, GPU=True):
"""
INPUTS:
- F_size: feature size
OUTPUT:
- return a standard grid coordinate
"""
b, c, h, w = F_size
theta = torch.tensor([[1,0,0],[0,1,0]])
theta = theta.unsqueeze(0).repeat(b,1,1)
theta = theta.float()
# grid is a uniform grid with left top (-1,1) and right bottom (1,1)
# b * (h*w) * 2
grid = torch.nn.functional.affine_grid(theta, F_size)
grid[:,:,:,0] = (grid[:,:,:,0]+1)/2 * w
grid[:,:,:,1] = (grid[:,:,:,1]+1)/2 * h
grid_flat = grid.view(b,-1,2)
if(GPU):
grid_flat = grid_flat.cuda()
return grid_flat
class Model_switchGTfixdot_swCC_Res(nn.Module):
def __init__(self, encoder_dir = None, decoder_dir = None,
temp = None, pretrainRes = False, uselayer=4):
'''
For switchable concenration loss
Using Resnet18
'''
super(Model_switchGTfixdot_swCC_Res, self).__init__()
self.gray_encoder = encoder_res18(pretrained = pretrainRes, uselayer=uselayer)
# self.gray_encoder = encoder_res50(pretrained = pretrainRes, uselayer=uselayer)
self.rgb_encoder = encoder3(reduce = True)
# self.nlm = NLM_woSoft()
# testing stage: mutual correlation for affinity computation
self.nlm = NLM_NC_woSoft()
self.decoder = decoder3(reduce = True)
self.temp = temp
self.softmax = nn.Softmax(dim=1)
self.cos_window = torch.Tensor(np.outer(np.hanning(40), np.hanning(40))).cuda()
self.normalize = normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
self.R = 8 # window size
self.P = self.R * 2 + 1
self.topk = 5
if(not encoder_dir is None):
print("Using pretrained encoders: %s."%encoder_dir)
self.rgb_encoder.load_state_dict(torch.load(encoder_dir))
if(not decoder_dir is None):
print("Using pretrained decoders: %s."%decoder_dir)
self.decoder.load_state_dict(torch.load(decoder_dir))
for param in self.decoder.parameters():
param.requires_grad = False
for param in self.rgb_encoder.parameters():
param.requires_grad = False
def forward(self, gray1, gray2, color1=None, color2=None):
gray1 = (gray1 + 1) / 2
gray2 = (gray2 + 1) / 2
gray1 = self.normalize(gray1)
gray2 = self.normalize(gray2)
Fgray1 = self.gray_encoder(gray1)
Fgray2 = self.gray_encoder(gray2)
aff = self.nlm(Fgray1, Fgray2)
aff_norm = self.softmax(aff * self.temp)
if(color1 is None):
# for testing
return aff_norm, Fgray1, Fgray2
Fcolor1 = self.rgb_encoder(color1)
Fcolor2 = self.rgb_encoder(color2)
Fcolor2_est = transform(aff_norm, Fcolor1)
pred2 = self.decoder(Fcolor2_est)
Fcolor1_est = transform(aff_norm.transpose(1,2), Fcolor2)
pred1 = self.decoder(Fcolor1_est)
return pred1, pred2, aff_norm, aff, Fgray1, Fgray2
def propagate_neighbor_frames(self, gray2, gray1, mask):
# propagate the mask of gray1 to gray2
gray1 = (gray1 + 1) / 2
gray2 = (gray2 + 1) / 2
gray1 = self.normalize(gray1)
gray2 = self.normalize(gray2)
Fgray1 = self.gray_encoder(gray1)
Fgray2 = self.gray_encoder(gray2)
# prepare mask
b, feat_c, feat_h, feat_w = Fgray1.size()
_, mask_c, _, _ = mask.size()
pad_mask = F.pad(mask, (self.R, self.R, self.R, self.R), mode='replicate')
window_mask = F.unfold(pad_mask, kernel_size=self.P)
window_mask = window_mask.reshape([b, mask_c, self.P*self.P, feat_h*feat_w])
# affinity
pad_Fgray1 = F.pad(Fgray1, (self.R, self.R, self.R, self.R), mode='constant', value=0)
window_Fgray1 = F.unfold(pad_Fgray1, kernel_size=self.P)
window_Fgray1 = window_Fgray1.reshape([b, feat_c, self.P*self.P, feat_h*feat_w])
Fgray2 = Fgray2.reshape([b, feat_c, 1, -1]) # [B, C, 1, window_num]
aff = (Fgray2 * window_Fgray1).sum(dim=1)
aff[aff == 0] = -1e10 # discount padding at edge for softmax
aff = F.softmax(aff*self.temp, dim=1) # temp: 1
# top-k selection
b, N1, N2 = aff.size()
tk_val, tk_idx = torch.topk(aff, dim = 1, k = self.topk)
tk_val_min, _ = torch.min(tk_val, dim=1)
tk_val_min = tk_val_min.view(b, 1, N2)
aff[tk_val_min > aff] = 0
aff = aff.unsqueeze(1)
# predicted mask of gray2
out = (aff * window_mask).sum(dim=2).reshape([b, mask_c, feat_h, feat_w])
return out
```
#### File: 594422814/ContrastCorr/model.py
```python
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from libs.net_utils import NLM, NLM_dot, NLM_woSoft, NLM_NC_woSoft, Batch_Contrastive
from torchvision.models import resnet18
from libs.autoencoder import encoder3, decoder3, encoder_res18, encoder_res50
from libs.utils import *
import pdb
def transform(aff, frame1):
"""
Given aff, copy from frame1 to construct frame2.
INPUTS:
- aff: (h*w)*(h*w) affinity matrix
- frame1: n*c*h*w feature map
"""
b,c,h,w = frame1.size()
frame1 = frame1.view(b,c,-1)
frame2 = torch.bmm(frame1, aff)
return frame2.view(b,c,h,w)
class normalize(nn.Module):
"""Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
"""
def __init__(self, mean, std = (1.0,1.0,1.0)):
super(normalize, self).__init__()
self.mean = nn.Parameter(torch.FloatTensor(mean).cuda(), requires_grad=False)
self.std = nn.Parameter(torch.FloatTensor(std).cuda(), requires_grad=False)
def forward(self, frames):
b,c,h,w = frames.size()
frames = (frames - self.mean.view(1,3,1,1).repeat(b,1,h,w))/self.std.view(1,3,1,1).repeat(b,1,h,w)
return frames
def create_flat_grid(F_size, GPU=True):
"""
INPUTS:
- F_size: feature size
OUTPUT:
- return a standard grid coordinate
"""
b, c, h, w = F_size
theta = torch.tensor([[1,0,0],[0,1,0]])
theta = theta.unsqueeze(0).repeat(b,1,1)
theta = theta.float()
# grid is a uniform grid with left top (-1,1) and right bottom (1,1)
# b * (h*w) * 2
grid = torch.nn.functional.affine_grid(theta, F_size)
grid[:,:,:,0] = (grid[:,:,:,0]+1)/2 * w
grid[:,:,:,1] = (grid[:,:,:,1]+1)/2 * h
grid_flat = grid.view(b,-1,2)
if(GPU):
grid_flat = grid_flat.cuda()
return grid_flat
def coords2bbox(coords, patch_size, h_tar, w_tar):
"""
INPUTS:
- coords: coordinates of pixels in the next frame
- patch_size: patch size
- h_tar: target image height
- w_tar: target image widthg
"""
b = coords.size(0)
center = torch.mean(coords, dim=1) # b * 2
center_repeat = center.unsqueeze(1).repeat(1,coords.size(1),1)
dis_x = torch.sqrt(torch.pow(coords[:,:,0] - center_repeat[:,:,0], 2))
dis_x = torch.mean(dis_x, dim=1).detach()
dis_y = torch.sqrt(torch.pow(coords[:,:,1] - center_repeat[:,:,1], 2))
dis_y = torch.mean(dis_y, dim=1).detach()
left = (center[:,0] - dis_x*2).view(b,1)
left[left < 0] = 0
right = (center[:,0] + dis_x*2).view(b,1)
right[right > w_tar] = w_tar
top = (center[:,1] - dis_y*2).view(b,1)
top[top < 0] = 0
bottom = (center[:,1] + dis_y*2).view(b,1)
bottom[bottom > h_tar] = h_tar
new_center = torch.cat((left,right,top,bottom),dim=1)
return new_center
def dropout2d(img1, img2):
# drop same layers for all images
if np.random.random() < 0.3:
return img1, img2
drop_ch_num = int(np.random.choice(np.arange(1, 3), 1))
drop_ch_ind = np.random.choice(np.arange(3), drop_ch_num, replace=False)
for dropout_ch in drop_ch_ind:
img1[:, dropout_ch] = 0
img2[:, dropout_ch] = 0
img1 *= (3 / (3 - drop_ch_num))
img2 *= (3 / (3 - drop_ch_num))
return img1, img2
class track_match_comb(nn.Module):
def __init__(self, pretrained, encoder_dir = None, decoder_dir = None, temp=1, Resnet = "r18", color_switch=True, coord_switch=True, contrastive=True):
super(track_match_comb, self).__init__()
if Resnet in "r18":
self.gray_encoder = encoder_res18(pretrained=pretrained, uselayer=4)
elif Resnet in "r50":
self.gray_encoder = encoder_res50(pretrained=pretrained, uselayer=4)
self.rgb_encoder = encoder3(reduce=True)
self.decoder = decoder3(reduce=True)
self.rgb_encoder.load_state_dict(torch.load(encoder_dir))
self.decoder.load_state_dict(torch.load(decoder_dir))
for param in self.decoder.parameters():
param.requires_grad = False
for param in self.rgb_encoder.parameters():
param.requires_grad = False
self.nlm = NLM_woSoft()
self.cont_model = Batch_Contrastive(temp=temp)
self.normalize = normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
self.softmax = nn.Softmax(dim=1)
self.temp = temp
self.grid_flat = None
self.grid_flat_crop = None
self.color_switch = color_switch
self.coord_switch = coord_switch
self.contrastive_flag = contrastive
self.self_reconstruction = True
def forward(self, img_ref, img_tar, warm_up=True, patch_size=None):
n, c, h_ref, w_ref = img_ref.size() # [b, 3, 256, 256]
n, c, h_tar, w_tar = img_tar.size()
# UVC algorithm uses the gray-scale images as the inputs.
# We do not use the gray-scale image but randomly drop the image channel, which slightly outperforms the gray-scale baseline.
gray_ref = copy.deepcopy(img_ref)
gray_tar = copy.deepcopy(img_tar)
gray_ref = (gray_ref + 1) / 2
gray_tar = (gray_tar + 1) / 2
gray_ref = self.normalize(gray_ref)
gray_tar = self.normalize(gray_tar)
# following CorrFlow method, channel dropout
gray_ref, gray_tar = dropout2d(gray_ref, gray_tar)
Fgray1 = self.gray_encoder(gray_ref)
Fgray2 = self.gray_encoder(gray_tar)
Fcolor1 = self.rgb_encoder(img_ref)
output = []
if warm_up:
aff = self.nlm(Fgray1, Fgray2)
aff_norm = self.softmax(aff * self.temp)
Fcolor2_est = transform(aff_norm, Fcolor1)
color2_est = self.decoder(Fcolor2_est)
output.append(color2_est)
output.append(aff)
if self.color_switch:
Fcolor2 = self.rgb_encoder(img_tar)
Fcolor1_est = transform(aff_norm.transpose(1,2), Fcolor2)
color1_est = self.decoder(Fcolor1_est)
output.append(color1_est)
if self.self_reconstruction:
self_aff1 = self.nlm(Fgray1, Fgray1)
self_aff1 -= (torch.eye(self_aff1.size(-1)).unsqueeze(0) * 1e10).cuda()
self_aff1_norm = self.softmax(self_aff1)
Fcolor1_reconstruct = transform(self_aff1_norm, Fcolor1)
color1_reconstruct = self.decoder(Fcolor1_reconstruct)
output.append(color1_reconstruct)
else:
if(self.grid_flat is None):
self.grid_flat = create_flat_grid(Fgray2.size())
aff_ref_tar = self.nlm(Fgray1, Fgray2)
aff_ref_tar = torch.nn.functional.softmax(aff_ref_tar * self.temp, dim=2)
coords = torch.bmm(aff_ref_tar, self.grid_flat)
center = torch.mean(coords, dim=1) # b * 2
# new_c = center2bbox(center, patch_size, h_tar, w_tar)
new_c = center2bbox(center, patch_size, Fgray2.size(2), Fgray2.size(3))
# print("center2bbox:", new_c, h_tar, w_tar)
Fgray2_crop = diff_crop(Fgray2, new_c[:,0], new_c[:,2], new_c[:,1], new_c[:,3], patch_size[1], patch_size[0])
# print("HERE: ", Fgray2.size(), Fgray1.size(), Fgray2_crop.size())
aff_p = self.nlm(Fgray1, Fgray2_crop)
aff_norm = self.softmax(aff_p * self.temp)
Fcolor2_est = transform(aff_norm, Fcolor1)
color2_est = self.decoder(Fcolor2_est)
Fcolor2_full = self.rgb_encoder(img_tar)
Fcolor2_crop = diff_crop(Fcolor2_full, new_c[:,0], new_c[:,2], new_c[:,1], new_c[:,3], patch_size[1], patch_size[0])
output.append(color2_est)
output.append(aff_p)
output.append(new_c*8)
output.append(coords)
# color orthorganal
if self.color_switch:
Fcolor1_est = transform(aff_norm.transpose(1,2), Fcolor2_crop)
color1_est = self.decoder(Fcolor1_est)
output.append(color1_est)
# coord orthorganal
if self.coord_switch:
aff_norm_tran = self.softmax(aff_p.permute(0,2,1) * self.temp)
if self.grid_flat_crop is None:
self.grid_flat_crop = create_flat_grid(Fp_tar.size()).permute(0,2,1).detach()
C12 = torch.bmm(self.grid_flat_crop, aff_norm)
C11 = torch.bmm(C12, aff_norm_tran)
output.append(self.grid_flat_crop)
output.append(C11)
if self.self_reconstruction:
self_aff1 = self.nlm(Fgray1, Fgray1)
self_aff1 -= (torch.eye(self_aff1.size(-1)).unsqueeze(0) * 1e10).cuda()
self_aff1_norm = self.softmax(self_aff1)
Fcolor1_reconstruct = transform(self_aff1_norm, Fcolor1)
color1_reconstruct = self.decoder(Fcolor1_reconstruct)
output.append(color1_reconstruct)
if self.contrastive_flag:
# contrastive loss on a pair of features
Fcolor2_est_batch, sparse_loss = self.cont_model(Fgray2_crop, Fgray1, Fcolor1)
Fcolor2_est_batch = self.decoder(Fcolor2_est_batch)
output.append(Fcolor2_est_batch)
output.append(sparse_loss)
return output
```
#### File: 594422814/ContrastCorr/train_trackingnet.py
```python
import os
import cv2
import sys
import time
import copy
import torch
import logging
import argparse
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from libs.loader_trackingnet import VidListv1, VidListv2
import torch.backends.cudnn as cudnn
import libs.transforms_multi as transforms
from model import track_match_comb as Model
from libs.loss import L1_loss
from libs.concentration_loss import ConcentrationSwitchLoss as ConcentrationLoss
from libs.train_utils import save_vis, AverageMeter, save_checkpoint, log_current
from libs.utils import diff_crop
import pdb
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
############################## helper functions ##############################
def parse_args():
parser = argparse.ArgumentParser(description='')
# file/folder pathes
parser.add_argument("--videoRoot", type=str, default="/data1/trackingnet/", help='train video path') ### change the dataset path
parser.add_argument("--encoder_dir",type=str, default='weights/encoder_single_gpu.pth', help="pretrained encoder")
parser.add_argument("--decoder_dir",type=str, default='weights/decoder_single_gpu.pth', help="pretrained decoder")
parser.add_argument('--resume', type=str, default='', metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument("-c","--savedir",type=str,default="trackingnet_contrastive/", help='checkpoints path')
parser.add_argument("--Resnet", type=str, default="r18", help="choose from r18 or r50")
# main parameters
parser.add_argument("--pretrainRes",action="store_true")
parser.add_argument("--batchsize",type=int, default=16, help="batchsize")
parser.add_argument('--workers', type=int, default=16)
parser.add_argument("--patch_size", type=int, default=256, help="crop size for localization.")
parser.add_argument("--full_size", type=int, default=640, help="full size for one frame.")
parser.add_argument("--rotate",type=int,default=10, help='degree to rotate training images')
parser.add_argument("--scale",type=float,default=1.2, help='random scale')
parser.add_argument("--lr",type=float,default=1e-4, help='learning rate')
parser.add_argument('--lr-mode', type=str, default='step') # poly step
parser.add_argument("--window_len",type=int,default=2, help='number of images (2 for pair and 3 for triple)')
parser.add_argument("--log_interval",type=int,default=10, help='')
parser.add_argument("--save_interval",type=int,default=200, help='save model every x epoch')
parser.add_argument("--momentum",type=float,default=0.9, help='momentum')
parser.add_argument("--weight_decay",type=float,default=0.005, help='weight decay')
parser.add_argument("--device", type=int, default=2, help="0~device_count-1 for single GPU, device_count for dataparallel.")
parser.add_argument("--temp", type=int, default=1, help="temprature for softmax.")
# set epoches
parser.add_argument("--wepoch",type=int,default=100, help='warmup epoch')
parser.add_argument("--nepoch",type=int,default=200, help='max epoch')
parser.add_argument("--step",type=int,default=40, help='step for lr')
# concenration regularization
parser.add_argument("--lc",type=float,default=0.3, help='weight of concentration loss')
parser.add_argument("--lc_win",type=int,default=8, help='win_len for concentration loss')
# orthorganal regularization
parser.add_argument("--color_switch",type=float,default=0.1, help='weight of color switch loss')
parser.add_argument("--coord_switch",type=float,default=0, help='weight of color switch loss')
# contrastive loss
parser.add_argument("--contrastive",type=float,default=1, help='weight of contrastive loss')
print("Begin parser arguments.")
args = parser.parse_args()
assert args.videoRoot is not None
if not os.path.exists(args.savedir):
os.mkdir(args.savedir)
args.savepatch = os.path.join(args.savedir,'savepatch')
args.logfile = open(os.path.join(args.savedir,"logargs.txt"),"w")
args.multiGPU = args.device == torch.cuda.device_count()
if not args.multiGPU:
torch.cuda.set_device(args.device)
if not os.path.exists(args.savepatch):
os.mkdir(args.savepatch)
args.vis = True
if args.color_switch > 0:
args.color_switch_flag = True
else:
args.color_switch_flag = False
if args.coord_switch > 0:
args.coord_switch_flag = True
else:
args.coord_switch_flag = False
if args.contrastive > 0:
args.contrastive_flag = True
else:
args.contrastive_flag = False
try:
from tensorboardX import SummaryWriter
global writer
writer = SummaryWriter()
except ImportError:
args.vis = False
print(' '.join(sys.argv))
print('\n')
args.logfile.write(' '.join(sys.argv))
args.logfile.write('\n')
for k, v in args.__dict__.items():
print(k, ':', v)
args.logfile.write('{}:{}\n'.format(k,v))
args.logfile.close()
return args
def adjust_learning_rate(args, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if args.lr_mode == 'step':
lr = args.lr * (0.5 ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.nepoch) ** 0.9
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def create_loader(args):
dataset_train_warm = VidListv1(args.videoRoot, args.patch_size, args.rotate, args.scale)
dataset_train = VidListv2(args.videoRoot, args.patch_size, args.window_len, args.rotate, args.scale, args.full_size)
if args.multiGPU:
train_loader_warm = torch.utils.data.DataLoader(
dataset_train_warm, batch_size=args.batchsize, shuffle = True, num_workers=args.workers, pin_memory=True, drop_last=True)
train_loader = torch.utils.data.DataLoader(
dataset_train, batch_size=args.batchsize, shuffle = True, num_workers=args.workers, pin_memory=True, drop_last=True)
else:
train_loader_warm = torch.utils.data.DataLoader(
dataset_train_warm, batch_size=args.batchsize, shuffle = True, num_workers=0, drop_last=True)
train_loader = torch.utils.data.DataLoader(
dataset_train, batch_size=args.batchsize, shuffle = True, num_workers=0, drop_last=True)
return train_loader_warm, train_loader
def train(args):
loader_warm, loader = create_loader(args)
cudnn.benchmark = True
best_loss = 1e10
start_epoch = 0
model = Model(args.pretrainRes, args.encoder_dir, args.decoder_dir, temp = args.temp, Resnet = args.Resnet,
color_switch = args.color_switch_flag, coord_switch = args.coord_switch_flag, contrastive = args.contrastive_flag)
if args.multiGPU:
model = torch.nn.DataParallel(model).cuda()
closs = ConcentrationLoss(win_len=args.lc_win, stride=args.lc_win,
F_size=torch.Size((args.batchsize//torch.cuda.device_count(),2, args.patch_size//8, args.patch_size//8)), temp = args.temp)
closs = nn.DataParallel(closs).cuda()
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model._modules['module'].parameters()),args.lr)
else:
closs = ConcentrationLoss(win_len=args.lc_win, stride=args.lc_win,
F_size=torch.Size((args.batchsize,2,
args.patch_size//8,
args.patch_size//8)), temp = args.temp)
model.cuda()
closs.cuda()
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),args.lr)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{} ({})' (epoch {})"
.format(args.resume, best_loss, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
for epoch in range(start_epoch, args.nepoch):
if epoch < args.wepoch:
lr = adjust_learning_rate(args, optimizer, epoch)
print("Base lr for epoch {}: {}.".format(epoch, optimizer.param_groups[0]['lr']))
best_loss = train_iter(args, loader_warm, model, closs, optimizer, epoch, best_loss)
else:
lr = adjust_learning_rate(args, optimizer, epoch-args.wepoch)
print("Base lr for epoch {}: {}.".format(epoch, optimizer.param_groups[0]['lr']))
best_loss = train_iter(args, loader, model, closs, optimizer, epoch, best_loss)
def forward(frame1, frame2, model, warm_up, patch_size=None):
n, c, h, w = frame1.size()
if warm_up:
output = model(frame1, frame2)
else:
output = model(frame1, frame2, warm_up=False, patch_size=[patch_size//8, patch_size//8])
new_c = output[2]
color2_gt = diff_crop(frame2, new_c[:,0], new_c[:,2], new_c[:,1], new_c[:,3],
patch_size, patch_size)
output.append(color2_gt)
return output
def train_iter(args, loader, model, closs, optimizer, epoch, best_loss):
losses = AverageMeter()
batch_time = AverageMeter()
c_losses = AverageMeter()
MSE_Loss = torch.nn.MSELoss(reduction = 'mean')
p_losses = AverageMeter()
model.train()
end = time.time()
if args.coord_switch_flag:
coord_switch_loss = nn.L1Loss()
sc_losses = AverageMeter()
if args.contrastive_flag:
cont_losses = AverageMeter()
s_losses = AverageMeter()
if epoch < 1 or (epoch>=args.wepoch and epoch< args.wepoch+2):
thr = None
else:
thr = 2.5
for i,frames in enumerate(loader):
frame1_var = frames[0].cuda()
frame2_var = frames[1].cuda()
if epoch < args.wepoch:
output = forward(frame1_var, frame2_var, model, warm_up=True)
color2_est = output[0]
aff = output[1]
b,x,_ = aff.size()
color1_est = None
if args.color_switch_flag:
color1_est = output[2]
loss_ = L1_loss(color2_est, frame2_var, 10, 10, thr=thr, pred1=color1_est, frame1_var = frame1_var)
# frame self reconstruction
reconstruction_loss = L1_loss(output[-1], frame1_var, 10, 10, thr=thr)
if epoch >= 10 and args.lc > 0:
constraint_loss = torch.sum(closs(aff.view(b,1,x,x))) * args.lc
c_losses.update(constraint_loss.item(), frame1_var.size(0))
loss = loss_ + reconstruction_loss + constraint_loss
else:
loss = loss_ + reconstruction_loss
if(i % args.log_interval == 0):
save_vis(color2_est, frame2_var, frame1_var, frame2_var, args.savepatch)
else:
output = forward(frame1_var, frame2_var, model, warm_up=False, patch_size = args.patch_size)
color2_est = output[0]
aff = output[1]
new_c = output[2]
coords = output[3]
Fcolor2_crop = output[-1]
b,x,x = aff.size()
color1_est = None
count = 3
constraint_loss = torch.sum(closs(aff.view(b,1,x,x))) * args.lc
c_losses.update(constraint_loss.item(), frame1_var.size(0))
if args.color_switch_flag:
count += 1
color1_est = output[count]
# intra-video transformation
loss_color = L1_loss(color2_est, Fcolor2_crop, 10, 10, thr=thr, pred1=color1_est, frame1_var = frame1_var)
reconstruction_loss = L1_loss(output[-4], frame1_var, 10, 10, thr=thr)
loss_ = loss_color + reconstruction_loss + constraint_loss
if args.coord_switch_flag:
count += 1
grids = output[count]
C11 = output[count+1]
loss_coord = args.coord_switch * coord_switch_loss(C11, grids)
loss = loss_ + loss_coord
sc_losses.update(loss_coord.item(), frame1_var.size(0))
else:
loss = loss_
if args.contrastive_flag:
sparse_loss = output[-2]
color2_est_batch = output[-3]
# intra- and inter-video transformation consistency
loss_color2_contrastive = L1_loss(color2_est_batch, color2_est, 10, 10, thr=thr)
# sparsity constraint for the batch-level affinity
sparse_loss = sparse_loss.mean() * args.contrastive
# total loss
loss = loss + sparse_loss + loss_color2_contrastive
cont_losses.update(loss_color2_contrastive.item(), frame1_var.size(0))
s_losses.update(sparse_loss.item(), frame1_var.size(0))
if(i % args.log_interval == 0):
save_vis(color2_est, Fcolor2_crop, frame1_var, frame2_var, args.savepatch, new_c)
losses.update(loss.item(), frame1_var.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if epoch >= args.wepoch and args.contrastive_flag:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Total Loss {loss.val:.4f} ({loss.avg:.4f})\t '
'Contrastive Loss {ct_loss.val:.4f} ({ct_loss.avg:.4f})\t '
'Sparse Loss {s_loss.val:.4f} ({s_loss.avg:.4f})\t '
'Constraint Loss {c_loss.val:.4f} ({c_loss.avg:.4f})\t '.format(
epoch, i+1, len(loader), batch_time=batch_time, loss=losses, ct_loss=cont_losses, s_loss=s_losses, c_loss=c_losses))
else:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Total Loss {loss.val:.4f} ({loss.avg:.4f})\t '
'Constraint Loss {c_loss.val:.4f} ({c_loss.avg:.4f})\t '.format(
epoch, i+1, len(loader), batch_time=batch_time, loss=losses, c_loss=c_losses))
if((i + 1) % args.save_interval == 0):
is_best = losses.avg < best_loss
best_loss = min(losses.avg, best_loss)
checkpoint_path = os.path.join(args.savedir, 'checkpoint_latest.pth.tar')
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_loss': best_loss,
}, is_best, filename=checkpoint_path, savedir = args.savedir)
log_current(epoch, losses.avg, best_loss, filename = "log_current.txt", savedir=args.savedir)
return best_loss
if __name__ == '__main__':
args = parse_args()
train(args)
writer.close()
``` |
{
"source": "594422814/TransformerTrack",
"score": 2
} |
#### File: models/target_classifier/multihead_attention.py
```python
import torch.nn as nn
import torch
import math
import torch.nn.functional as F
import numpy as np
import pdb
class MultiheadAttention(nn.Module):
def __init__(self, feature_dim=512, n_head=8, key_feature_dim=64):
super(MultiheadAttention, self).__init__()
self.Nh = n_head
self.head = nn.ModuleList()
for N in range(self.Nh):
self.head.append(RelationUnit(feature_dim, key_feature_dim))
# self.out_conv = nn.Linear(n_head*key_feature_dim, feature_dim) # bias=False
def forward(self, query=None, key=None, value=None):
isFirst = True
for N in range(self.Nh):
if(isFirst):
concat = self.head[N](query, key, value)
isFirst = False
else:
concat = torch.cat((concat, self.head[N](query, key, value)), -1)
# output = self.out_conv(concat)
output = concat
return output
class RelationUnit(nn.Module):
def __init__(self, feature_dim=512, key_feature_dim=64):
super(RelationUnit, self).__init__()
self.temp = 30
self.WK = nn.Linear(feature_dim, key_feature_dim) # bias=False
# self.WQ = nn.Linear(feature_dim, key_feature_dim)
self.WV = nn.Linear(feature_dim, feature_dim)
# Init weights
for m in self.WK.modules():
m.weight.data.normal_(0, math.sqrt(2. / m.out_features))
# m.weight.data.normal_(1. / m.in_features, math.sqrt(2. / m.out_features))
if m.bias is not None:
m.bias.data.zero_()
'''
for m in self.WQ.modules():
m.weight.data.normal_(0, math.sqrt(2. / m.out_features))
# m.weight.data.normal_(1. / m.in_features, math.sqrt(2. / m.out_features))
if m.bias is not None:
m.bias.data.zero_()
'''
for m in self.WV.modules():
m.weight.data.normal_(0, math.sqrt(2. / m.out_features))
# m.weight.data.normal_(1. / m.in_features, math.sqrt(2. / m.out_features))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, query=None, key=None, value=None):
w_k = self.WK(key)
w_k = F.normalize(w_k, p=2, dim=-1)
w_k = w_k.permute(1,2,0) # Batch, Dim, Len_1
w_q = self.WK(query)
w_q = F.normalize(w_q, p=2, dim=-1)
w_q = w_q.permute(1,0,2) # Batch, Len_2, Dim
dot_prod = torch.bmm(w_q, w_k) # Batch, Len_2, Len_1
affinity = F.softmax(dot_prod*self.temp, dim=-1)
w_v = value.permute(1,0,2) # Batch, Len_1, Dim
output = torch.bmm(affinity, w_v) # Batch, Len_2, Dim
output = output.permute(1,0,2)
return output
```
#### File: TransformerTrack/pytracking/GOT10k_UAV.py
```python
from got10k.trackers import Tracker as GOT_Tracker
from got10k.experiments import ExperimentUAV123
import numpy as np
import os
import sys
import argparse
import importlib
import pdb
env_path = os.path.join(os.path.dirname(__file__), '..')
if env_path not in sys.path:
sys.path.append(env_path)
from pytracking.evaluation import Sequence, Tracker
parser = argparse.ArgumentParser(description='Run UAV.')
parser.add_argument('--tracker_name', type=str, default='trdimp')
parser.add_argument('--tracker_param', type=str, default='trdimp') # trsiam / trdimp
parser.add_argument('--run_id', type=int, default=None)
parser.add_argument('--debug', type=int, default=0, help='Debug level.')
args = parser.parse_args()
TrTracker = Tracker(args.tracker_name, args.tracker_param, args.run_id)
class GOT_Tracker(GOT_Tracker):
def __init__(self):
super(GOT_Tracker, self).__init__(name='GOT_Tracker')
self.tracker = TrTracker.tracker_class(TrTracker.get_parameters())
def init(self, image, box):
image = np.array(image)
self.tracker.initialize(image, box)
def update(self, image):
image = np.array(image)
self.box = self.tracker.track(image)
return self.box
if __name__ == '__main__':
# setup tracker
tracker = GOT_Tracker()
# run experiments on UAV
experiment = ExperimentUAV123('/data3/wangning/UAV123', version='UAV123') ### version='UAV123' 'UAV20L'
experiment.run(tracker, visualize=False)
# report performance
experiment.report([tracker.name])
``` |
{
"source": "594zyc/alfworld",
"score": 2
} |
#### File: agents/controller/oracle.py
```python
import os
import cv2
import sys
import json
import re
import random
import traceback
import numpy as np
import gen.constants as constants
from collections import Counter
from agents.controller.base import BaseAgent
from agents.utils.misc import extract_admissible_commands_with_heuristics
class OracleAgent(BaseAgent):
def __init__(self, env, traj_data, traj_root,
load_receps=False, debug=False,
goal_desc_human_anns_prob=0.0,
use_gt_relations=False):
self.openable_points = self.get_openable_points(traj_data)
self.use_gt_relations = use_gt_relations
self.exploration_frames = []
super().__init__(env, traj_data, traj_root,
load_receps=load_receps, debug=debug,
goal_desc_human_anns_prob=goal_desc_human_anns_prob)
def get_openable_points(self, traj_data):
scene_num = traj_data['scene']['scene_num']
openable_json_file = os.path.join(os.environ['ALFRED_ROOT'], 'gen/layouts/FloorPlan%d-openable.json' % scene_num)
with open(openable_json_file, 'r') as f:
openable_points = json.load(f)
return openable_points
def get_obj_cls_from_metadata(self, name):
objs = [obj for obj in self.env.last_event.metadata['objects'] if obj['visible'] and name in obj['objectType']]
return objs[0] if len(objs) > 0 else None
def get_obj_id_from_metadata(self, object_id):
objs = [obj for obj in self.env.last_event.metadata['objects'] if object_id == obj['objectId']]
return objs[0] if len(objs) > 0 else None
def get_num_interactable_objs(self, recep_id):
return len([obj for obj in self.env.last_event.metadata['objects'] if obj['visible'] and obj['parentReceptacles'] and recep_id in obj['parentReceptacles']])
def get_exploration_frames(self):
return self.exploration_frames
# use pre-computed openable points from ALFRED to store receptacle locations
def explore_scene(self):
agent_height = self.env.last_event.metadata['agent']['position']['y']
for object_id, point in self.openable_points.items():
action = {'action': 'TeleportFull',
'x': point[0],
'y': agent_height,
'z': point[1],
'rotateOnTeleport': False,
'rotation': point[2],
'horizon': point[3]}
event = self.env.step(action)
if event.metadata['lastActionSuccess']:
self.exploration_frames.append(np.array(self.env.last_event.frame[:,:,::-1]))
instance_segs = np.array(self.env.last_event.instance_segmentation_frame)
color_to_object_id = self.env.last_event.color_to_object_id
# find unique instance segs
color_count = Counter()
for x in range(instance_segs.shape[0]):
for y in range(instance_segs.shape[1]):
color = instance_segs[y, x]
color_count[tuple(color)] += 1
for color, num_pixels in color_count.most_common():
if color in color_to_object_id:
object_id = color_to_object_id[color]
object_type = object_id.split('|')[0]
if "Basin" in object_id:
object_type += "Basin"
if object_type in self.STATIC_RECEPTACLES:
if object_id not in self.receptacles:
self.receptacles[object_id] = {
'object_id': object_id,
'object_type': object_type,
'locs': action,
'num_pixels': num_pixels,
'num_id': "%s %d" % (object_type.lower(), self.get_next_num_id(object_type, self.receptacles)),
'closed': True if object_type in constants.OPENABLE_CLASS_LIST else None
}
elif object_id in self.receptacles and num_pixels > self.receptacles[object_id]['num_pixels']:
self.receptacles[object_id]['locs'] = action # .append(action)
self.receptacles[object_id]['num_pixels'] = num_pixels
# self.save_receps()
# ground-truth instance segemetations (with consistent object IDs) from THOR
def get_instance_seg(self):
instance_segs = np.array(self.env.last_event.instance_segmentation_frame)
inst_color_to_object_id = self.env.last_event.color_to_object_id
# find unique instance segs
inst_color_count = Counter()
for x in range(instance_segs.shape[0]):
for y in range(instance_segs.shape[1]):
color = instance_segs[y, x]
inst_color_count[tuple(color)] += 1
return inst_color_count, inst_color_to_object_id
# ground-truth object state info maintained by ThorEnv
def get_object_state(self, object_id):
is_clean = object_id in self.env.cleaned_objects
is_hot = object_id in self.env.heated_objects
is_cool = object_id in self.env.cooled_objects
is_sliced = 'Sliced' in object_id
return is_clean, is_cool, is_hot, is_sliced
def get_admissible_commands(self):
return extract_admissible_commands_with_heuristics(self.intro, self.frame_desc, self.feedback,
self.curr_recep, self.inventory)
def print_frame(self, recep, loc):
inst_color_count, inst_color_to_object_id = self.get_instance_seg()
recep_object_id = recep['object_id']
# for each unique seg add to object dictionary if it's more visible than before
visible_objects = []
for color, num_pixels in inst_color_count.most_common():
if color in inst_color_to_object_id:
object_id = inst_color_to_object_id[color]
object_type = object_id.split("|")[0]
object_metadata = self.get_obj_id_from_metadata(object_id)
is_obj_in_recep = (object_metadata and object_metadata['parentReceptacles'] and len(object_metadata['parentReceptacles']) > 0 and recep_object_id in object_metadata['parentReceptacles'])
if object_type in self.OBJECTS and object_metadata and (not self.use_gt_relations or is_obj_in_recep):
if object_id not in self.objects:
self.objects[object_id] = {
'object_id': object_id,
'object_type': object_type,
'parent': recep['object_id'],
'loc': loc,
'num_pixels': num_pixels,
'num_id': "%s %d" % (object_type.lower() if "Sliced" not in object_id else "sliced-%s" % object_type.lower(),
self.get_next_num_id(object_type, self.objects))
}
elif object_id in self.objects and num_pixels > self.objects[object_id]['num_pixels']:
self.objects[object_id]['loc'] = loc
self.objects[object_id]['num_pixels'] = num_pixels
if self.objects[object_id]['num_id'] not in self.inventory:
visible_objects.append(self.objects[object_id]['num_id'])
visible_objects_with_articles = ["a %s," % vo for vo in visible_objects]
feedback = ""
if len(visible_objects) > 0:
feedback = "On the %s, you see %s" % (recep['num_id'], self.fix_and_comma_in_the_end(' '.join(visible_objects_with_articles)))
elif not recep['closed'] and len(visible_objects) == 0:
feedback = "On the %s, you see nothing." % (recep['num_id'])
return visible_objects, feedback
def step(self, action_str):
event = None
self.feedback = "Nothing happens."
try:
cmd = self.parse_command(action_str)
if cmd['action'] == self.Action.GOTO:
target = cmd['tar']
recep = self.get_object(target, self.receptacles)
if recep and recep['num_id'] == self.curr_recep:
return self.feedback
self.curr_loc = recep['locs']
event = self.navigate(self.curr_loc)
self.curr_recep = recep['num_id']
self.visible_objects, self.feedback = self.print_frame(recep, self.curr_loc)
# feedback conditions
loc_id = list(self.receptacles.keys()).index(recep['object_id'])
loc_feedback = "You arrive at loc %s. " % loc_id
state_feedback = "The {} is {}. ".format(self.curr_recep, "closed" if recep['closed'] else "open") if recep['closed'] is not None else ""
loc_state_feedback = loc_feedback + state_feedback
self.feedback = loc_state_feedback + self.feedback if "closed" not in state_feedback else loc_state_feedback
self.frame_desc = str(self.feedback)
elif cmd['action'] == self.Action.PICK:
obj, rel, tar = cmd['obj'], cmd['rel'], cmd['tar']
if obj in self.visible_objects:
object = self.get_object(obj, self.objects)
event = self.env.step({'action': "PickupObject",
'objectId': object['object_id'],
'forceAction': True})
self.inventory.append(object['num_id'])
self.feedback = "You pick up the %s from the %s." % (obj, tar)
elif cmd['action'] == self.Action.PUT:
obj, rel, tar = cmd['obj'], cmd['rel'], cmd['tar']
recep = self.get_object(tar, self.receptacles)
event = self.env.step({'action': "PutObject",
'objectId': self.env.last_event.metadata['inventoryObjects'][0]['objectId'],
'receptacleObjectId': recep['object_id'],
'forceAction': True})
if event.metadata['lastActionSuccess']:
self.inventory.pop()
self.feedback = "You put the %s %s the %s." % (obj, rel, tar)
elif cmd['action'] == self.Action.OPEN:
target = cmd['tar']
recep = self.get_object(target, self.receptacles)
event = self.env.step({'action': "OpenObject",
'objectId': recep['object_id'],
'forceAction': True})
self.receptacles[recep['object_id']]['closed'] = False
self.visible_objects, self.feedback = self.print_frame(recep, self.curr_loc)
action_feedback = "You open the %s. The %s is open. " % (target, target)
self.feedback = action_feedback + self.feedback.replace("On the %s" % target, "In it")
self.frame_desc = str(self.feedback)
elif cmd['action'] == self.Action.CLOSE:
target = cmd['tar']
recep = self.get_object(target, self.receptacles)
event = self.env.step({'action': "CloseObject",
'objectId': recep['object_id'],
'forceAction': True})
self.receptacles[recep['object_id']]['closed'] = True
self.feedback = "You close the %s." % target
elif cmd['action'] == self.Action.TOGGLE:
target = cmd['tar']
obj = self.get_object(target, self.objects)
event = self.env.step({'action': "ToggleObjectOn",
'objectId': obj['object_id'],
'forceAction': True})
self.feedback = "You turn on the %s." % target
elif cmd['action'] == self.Action.HEAT:
obj, rel, tar = cmd['obj'], cmd['rel'], cmd['tar']
obj_id = self.env.last_event.metadata['inventoryObjects'][0]['objectId']
recep = self.get_object(tar, self.receptacles)
# open the microwave, heat the object, take the object, close the microwave
events = []
events.append(self.env.step({'action': 'OpenObject', 'objectId': recep['object_id'], 'forceAction': True}))
events.append(self.env.step({'action': 'PutObject', 'objectId': obj_id, 'receptacleObjectId': recep['object_id'], 'forceAction': True}))
events.append(self.env.step({'action': 'CloseObject', 'objectId': recep['object_id'], 'forceAction': True}))
events.append(self.env.step({'action': 'ToggleObjectOn', 'objectId': recep['object_id'], 'forceAction': True}))
events.append(self.env.step({'action': 'Pass'}))
events.append(self.env.step({'action': 'ToggleObjectOff', 'objectId': recep['object_id'], 'forceAction': True}))
events.append(self.env.step({'action': 'OpenObject', 'objectId': recep['object_id'], 'forceAction': True}))
events.append(self.env.step({'action': 'PickupObject', 'objectId': obj_id, 'forceAction': True}))
events.append(self.env.step({'action': 'CloseObject', 'objectId': recep['object_id'], 'forceAction': True}))
if all(e.metadata['lastActionSuccess'] for e in events) and self.curr_recep == tar:
self.feedback = "You heat the %s using the %s." % (obj, tar)
elif cmd['action'] == self.Action.CLEAN:
obj, rel, tar = cmd['obj'], cmd['rel'], cmd['tar']
object = self.env.last_event.metadata['inventoryObjects'][0]
sink = self.get_obj_cls_from_metadata('BathtubBasin' if "bathtubbasin" in tar else "SinkBasin")
faucet = self.get_obj_cls_from_metadata('Faucet')
# put the object in the sink, turn on the faucet, turn off the faucet, pickup the object
events = []
events.append(self.env.step({'action': 'PutObject', 'objectId': object['objectId'], 'receptacleObjectId': sink['objectId'], 'forceAction': True}))
events.append(self.env.step({'action': 'ToggleObjectOn', 'objectId': faucet['objectId'], 'forceAction': True}))
events.append(self.env.step({'action': 'Pass'}))
events.append(self.env.step({'action': 'ToggleObjectOff', 'objectId': faucet['objectId'], 'forceAction': True}))
events.append(self.env.step({'action': 'PickupObject', 'objectId': object['objectId'], 'forceAction': True}))
if all(e.metadata['lastActionSuccess'] for e in events) and self.curr_recep == tar:
self.feedback = "You clean the %s using the %s." % (obj, tar)
elif cmd['action'] == self.Action.COOL:
obj, rel, tar = cmd['obj'], cmd['rel'], cmd['tar']
object = self.env.last_event.metadata['inventoryObjects'][0]
fridge = self.get_obj_cls_from_metadata('Fridge')
# open the fridge, put the object inside, close the fridge, open the fridge, pickup the object
events = []
events.append(self.env.step({'action': 'OpenObject', 'objectId': fridge['objectId'], 'forceAction': True}))
events.append(self.env.step({'action': 'PutObject', 'objectId': object['objectId'], 'receptacleObjectId': fridge['objectId'], 'forceAction': True}))
events.append(self.env.step({'action': 'CloseObject', 'objectId': fridge['objectId'], 'forceAction': True}))
events.append(self.env.step({'action': 'Pass'}))
events.append(self.env.step({'action': 'OpenObject', 'objectId': fridge['objectId'], 'forceAction': True}))
events.append(self.env.step({'action': 'PickupObject', 'objectId': object['objectId'], 'forceAction': True}))
events.append(self.env.step({'action': 'CloseObject', 'objectId': fridge['objectId'], 'forceAction': True}))
if all(e.metadata['lastActionSuccess'] for e in events) and self.curr_recep == tar:
self.feedback = "You cool the %s using the %s." % (obj, tar)
elif cmd['action'] == self.Action.SLICE:
obj, rel, tar = cmd['obj'], cmd['rel'], cmd['tar']
object = self.get_object(obj, self.objects)
inventory_objects = self.env.last_event.metadata['inventoryObjects']
if 'Knife' in inventory_objects[0]['objectType']:
event = self.env.step({'action': "SliceObject",
'objectId': object['object_id']})
self.feedback = "You slice %s with the %s" % (obj, tar)
elif cmd['action'] == self.Action.INVENTORY:
if len(self.inventory) > 0:
self.feedback = "You are carrying: a %s" % (self.inventory[0])
else:
self.feedback = "You are not carrying anything."
elif cmd['action'] == self.Action.EXAMINE:
target = cmd['tar']
receptacle = self.get_object(target, self.receptacles)
object = self.get_object(target, self.objects)
if receptacle:
self.visible_objects, self.feedback = self.print_frame(receptacle, self.curr_loc)
self.frame_desc = str(self.feedback)
elif object:
self.feedback = self.print_object(object)
elif cmd['action'] == self.Action.LOOK:
if self.curr_recep == "nothing":
self.feedback = "You are in the middle of a room. Looking quickly around you, you see nothing."
else:
self.feedback = "You are facing the %s. Next to it, you see nothing." % self.curr_recep
except:
if self.debug:
print(traceback.format_exc())
if event and not event.metadata['lastActionSuccess']:
self.feedback = "Nothing happens."
if self.debug:
print(self.feedback)
return self.feedback
```
#### File: agents/dqn/train_dqn.py
```python
import datetime
import os
import random
import time
import copy
import json
import glob
import importlib
import numpy as np
import sys
sys.path.insert(0, os.environ['ALFRED_ROOT'])
sys.path.insert(0, os.path.join(os.environ['ALFRED_ROOT'], 'agents'))
from agent import TextDQNAgent
import modules.generic as generic
from eval import evaluate_dqn
from modules.generic import HistoryScoreCache, EpisodicCountingMemory, ObjCentricEpisodicMemory
from agents.utils.misc import extract_admissible_commands
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def train():
time_1 = datetime.datetime.now()
config = generic.load_config()
agent = TextDQNAgent(config)
env_type = config["env"]["type"]
id_eval_env, num_id_eval_game = None, 0
ood_eval_env, num_ood_eval_game = None, 0
if env_type == "Hybrid":
thor = getattr(importlib.import_module("environment"), "AlfredThorEnv")(config)
tw = getattr(importlib.import_module("environment"), "AlfredTWEnv")(config)
thor_env = thor.init_env(batch_size=agent.batch_size)
tw_env = tw.init_env(batch_size=agent.batch_size)
else:
alfred_env = getattr(importlib.import_module("environment"), config["env"]["type"])(config, train_eval="train")
env = alfred_env.init_env(batch_size=agent.batch_size)
if agent.run_eval:
# in distribution
if config['dataset']['eval_id_data_path'] is not None:
alfred_env = getattr(importlib.import_module("environment"), config["general"]["evaluate"]["env"]["type"])(config, train_eval="eval_in_distribution")
id_eval_env = alfred_env.init_env(batch_size=agent.eval_batch_size)
num_id_eval_game = alfred_env.num_games
# out of distribution
if config['dataset']['eval_ood_data_path'] is not None:
alfred_env = getattr(importlib.import_module("environment"), config["general"]["evaluate"]["env"]["type"])(config, train_eval="eval_out_of_distribution")
ood_eval_env = alfred_env.init_env(batch_size=agent.eval_batch_size)
num_ood_eval_game = alfred_env.num_games
output_dir = config["general"]["save_path"]
data_dir = config["general"]["save_path"]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# visdom
if config["general"]["visdom"]:
import visdom
viz = visdom.Visdom()
reward_win, step_win = None, None
dqn_loss_win = None
viz_game_points, viz_step, viz_overall_rewards = [], [], []
viz_id_eval_game_points, viz_id_eval_step = [], []
viz_ood_eval_game_points, viz_ood_eval_step = [], []
viz_dqn_loss = []
step_in_total = 0
episode_no = 0
running_avg_game_points = HistoryScoreCache(capacity=500)
running_avg_overall_rewards = HistoryScoreCache(capacity=500)
running_avg_game_steps = HistoryScoreCache(capacity=500)
running_avg_dqn_loss = HistoryScoreCache(capacity=500)
json_file_name = agent.experiment_tag.replace(" ", "_")
best_performance_so_far, best_ood_performance_so_far = 0.0, 0.0
episodic_counting_memory = EpisodicCountingMemory() # episodic counting based memory
obj_centric_episodic_counting_memory = ObjCentricEpisodicMemory()
# load model from checkpoint
if agent.load_pretrained:
if os.path.exists(data_dir + "/" + agent.load_from_tag + ".pt"):
agent.load_pretrained_model(data_dir + "/" + agent.load_from_tag + ".pt")
agent.update_target_net()
while(True):
if episode_no > agent.max_episode:
break
# hybrid env switching
if env_type == "Hybrid":
if random.uniform(0, 1) < config["env"]["hybrid_tw_prob"]:
env = tw_env
else:
env = thor_env
np.random.seed(episode_no)
env.seed(episode_no)
obs, infos = env.reset()
batch_size = len(obs)
agent.train()
agent.init(batch_size)
episodic_counting_memory.reset() # reset episodic counting based memory
obj_centric_episodic_counting_memory.reset() # reset object centric episodic counting based memory
previous_dynamics = None
chosen_actions = []
prev_step_dones, prev_rewards = [], []
for _ in range(batch_size):
chosen_actions.append("restart")
prev_step_dones.append(0.0)
prev_rewards.append(0.0)
observation_strings = list(obs)
task_desc_strings, observation_strings = agent.get_task_and_obs(observation_strings)
task_desc_strings = agent.preprocess_task(task_desc_strings)
observation_strings = agent.preprocess_observation(observation_strings)
first_sight_strings = copy.deepcopy(observation_strings)
agent.observation_pool.push_first_sight(first_sight_strings)
if agent.action_space == "exhaustive":
action_candidate_list = [extract_admissible_commands(intro, obs) for intro, obs in zip(first_sight_strings, observation_strings)]
else:
action_candidate_list = list(infos["admissible_commands"])
action_candidate_list = agent.preprocess_action_candidates(action_candidate_list)
observation_only = observation_strings
observation_strings = [item + " [SEP] " + a for item, a in zip(observation_strings, chosen_actions)] # appending the chosen action at previous step into the observation
episodic_counting_memory.push(observation_only) # update init observation into memory
obj_centric_episodic_counting_memory.push(observation_only)
# it requires to store sequences of transitions into memory with order,
# so we use a cache to keep what agents returns, and push them into memory
# altogether in the end of game.
transition_cache = []
still_running_mask = []
sequence_game_rewards, sequence_count_rewards, sequence_novel_object_rewards, sequence_game_points = [], [], [], []
print_actions = []
act_randomly = False if agent.noisy_net else episode_no < agent.learn_start_from_this_episode
for step_no in range(agent.max_nb_steps_per_episode):
# push obs into observation pool
agent.observation_pool.push_batch(observation_strings)
# get most recent k observations
most_recent_observation_strings = agent.observation_pool.get()
if agent.noisy_net:
agent.reset_noise() # Draw a new set of noisy weights
# predict actions
if agent.action_space == "generation":
chosen_actions, chosen_indices, current_dynamics = agent.command_generation_act(most_recent_observation_strings, task_desc_strings, previous_dynamics, random=act_randomly)
elif agent.action_space == "beam_search_choice":
chosen_actions, chosen_indices, current_dynamics, action_candidate_list = agent.beam_search_choice_act(most_recent_observation_strings, task_desc_strings, previous_dynamics, random=act_randomly)
elif agent.action_space in ["admissible", "exhaustive"]:
chosen_actions, chosen_indices, current_dynamics = agent.admissible_commands_act(most_recent_observation_strings, task_desc_strings, action_candidate_list, previous_dynamics, random=act_randomly)
else:
raise NotImplementedError()
replay_info = [most_recent_observation_strings, task_desc_strings, action_candidate_list, chosen_indices]
transition_cache.append(replay_info)
obs, _, dones, infos = env.step(chosen_actions)
scores = [float(item) for item in infos["won"]]
dones = [float(item) for item in dones]
observation_strings = list(obs)
observation_strings = agent.preprocess_observation(observation_strings)
if agent.action_space == "exhaustive":
action_candidate_list = [extract_admissible_commands(intro, obs) for intro, obs in zip(first_sight_strings, observation_strings)]
else:
action_candidate_list = list(infos["admissible_commands"])
action_candidate_list = agent.preprocess_action_candidates(action_candidate_list)
observation_only = observation_strings
observation_strings = [item + " [SEP] " + a for item, a in zip(observation_strings, chosen_actions)] # appending the chosen action at previous step into the observation
seeing_new_states = episodic_counting_memory.is_a_new_state(observation_only)
seeing_new_objects = obj_centric_episodic_counting_memory.get_object_novelty_reward(observation_only)
episodic_counting_memory.push(observation_only) # update new observation into memory
obj_centric_episodic_counting_memory.push(observation_only) # update new observation into memory
previous_dynamics = current_dynamics
if agent.noisy_net and step_in_total % agent.update_per_k_game_steps == 0:
agent.reset_noise() # Draw a new set of noisy weights
if episode_no >= agent.learn_start_from_this_episode and step_in_total % agent.update_per_k_game_steps == 0:
dqn_loss, _ = agent.update_dqn()
if dqn_loss is not None:
running_avg_dqn_loss.push(dqn_loss)
if step_no == agent.max_nb_steps_per_episode - 1:
# terminate the game because DQN requires one extra step
dones = [1.0 for _ in dones]
step_in_total += 1
still_running = [1.0 - float(item) for item in prev_step_dones] # list of float
prev_step_dones = dones
step_rewards = [float(curr) - float(prev) for curr, prev in zip(scores, prev_rewards)] # list of float
count_rewards = [r * agent.count_reward_lambda for r in seeing_new_states] # list of float
novel_object_rewards = [r * agent.novel_object_reward_lambda for r in seeing_new_objects] # list of novel object rewards
sequence_game_points.append(copy.copy(step_rewards))
prev_rewards = scores
still_running_mask.append(still_running)
sequence_game_rewards.append(step_rewards)
sequence_count_rewards.append(count_rewards)
sequence_novel_object_rewards.append(novel_object_rewards)
print_actions.append(chosen_actions[0] if still_running[0] else "--")
# if all ended, break
if np.sum(still_running) == 0:
break
still_running_mask_np = np.array(still_running_mask)
game_rewards_np = np.array(sequence_game_rewards) * still_running_mask_np # step x batch
count_rewards_np = np.array(sequence_count_rewards) * still_running_mask_np # step x batch
novel_object_rewards_np = np.array(sequence_novel_object_rewards) * still_running_mask_np
game_points_np = np.array(sequence_game_points) * still_running_mask_np # step x batch
game_rewards_pt = generic.to_pt(game_rewards_np, enable_cuda=False, type='float') # step x batch
count_rewards_pt = generic.to_pt(count_rewards_np, enable_cuda=False, type='float') # step x batch
novel_object_rewards_pt = generic.to_pt(novel_object_rewards_np, enable_cuda=False, type='float')
# push experience into replay buffer (dqn)
avg_reward_in_replay_buffer = agent.dqn_memory.get_avg_rewards()
for b in range(game_rewards_np.shape[1]):
if still_running_mask_np.shape[0] == agent.max_nb_steps_per_episode and still_running_mask_np[-1][b] != 0:
# need to pad one transition
avg_reward = game_rewards_np[:, b].tolist() + [0.0]
_need_pad = True
else:
avg_reward = game_rewards_np[:, b]
_need_pad = False
avg_reward = np.mean(avg_reward)
is_prior = avg_reward >= avg_reward_in_replay_buffer
mem = []
for i in range(game_rewards_np.shape[0]):
observation_strings, task_strings, action_candidate_list, chosen_indices = transition_cache[i]
mem.append([observation_strings[b],
task_strings[b],
action_candidate_list[b],
chosen_indices[b],
game_rewards_pt[i][b], count_rewards_pt[i][b], novel_object_rewards_pt[i][b]])
if still_running_mask_np[i][b] == 0.0:
break
if _need_pad:
observation_strings, task_strings, action_candidate_list, chosen_indices = transition_cache[-1]
mem.append([observation_strings[b],
task_strings[b],
action_candidate_list[b],
chosen_indices[b],
game_rewards_pt[-1][b] * 0.0, count_rewards_pt[-1][b] * 0.0, novel_object_rewards_pt[-1][b] * 0.0])
agent.dqn_memory.push(is_prior, avg_reward, mem)
for b in range(batch_size):
running_avg_game_points.push(np.sum(game_points_np, 0)[b])
running_avg_overall_rewards.push(np.sum(game_rewards_np, 0)[b] + np.sum(count_rewards_np, 0)[b] + np.sum(novel_object_rewards_np, 0)[b])
running_avg_game_steps.push(np.sum(still_running_mask_np, 0)[b])
# finish game
agent.finish_of_episode(episode_no, batch_size)
episode_no += batch_size
if episode_no < agent.learn_start_from_this_episode:
continue
if agent.report_frequency == 0 or (episode_no % agent.report_frequency > (episode_no - batch_size) % agent.report_frequency):
continue
time_2 = datetime.datetime.now()
print("Episode: {:3d} | time spent: {:s} | dqn loss: {:2.3f} | overall rewards: {:2.3f}/{:2.3f} | game points: {:2.3f}/{:2.3f} | used steps: {:2.3f}/{:2.3f}".format(episode_no, str(time_2 - time_1).rsplit(".")[0], running_avg_dqn_loss.get_avg(), np.mean(np.sum(game_rewards_np, 0) + np.sum(count_rewards_np, 0) + np.sum(novel_object_rewards_np, 0)), running_avg_overall_rewards.get_avg(), np.mean(np.sum(game_points_np, 0)), running_avg_game_points.get_avg(), np.mean(np.sum(still_running_mask_np, 0)), running_avg_game_steps.get_avg()))
# print(game_id + ": " + " | ".join(print_actions))
print(" | ".join(print_actions))
# evaluate
id_eval_game_points, id_eval_game_step = 0.0, 0.0
ood_eval_game_points, ood_eval_game_step = 0.0, 0.0
if agent.run_eval:
if id_eval_env is not None:
id_eval_res = evaluate_dqn(id_eval_env, agent, num_id_eval_game)
id_eval_game_points, id_eval_game_step = id_eval_res['average_points'], id_eval_res['average_steps']
if ood_eval_env is not None:
ood_eval_res = evaluate_dqn(ood_eval_env, agent, num_ood_eval_game)
ood_eval_game_points, ood_eval_game_step = ood_eval_res['average_points'], ood_eval_res['average_steps']
if id_eval_game_points >= best_performance_so_far:
best_performance_so_far = id_eval_game_points
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_id.pt")
if ood_eval_game_points >= best_ood_performance_so_far:
best_ood_performance_so_far = ood_eval_game_points
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_ood.pt")
else:
if running_avg_game_points.get_avg() >= best_performance_so_far:
best_performance_so_far = running_avg_game_points.get_avg()
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + ".pt")
# plot using visdom
if config["general"]["visdom"]:
viz_game_points.append(running_avg_game_points.get_avg())
viz_overall_rewards.append(running_avg_overall_rewards.get_avg())
viz_step.append(running_avg_game_steps.get_avg())
viz_dqn_loss.append(running_avg_dqn_loss.get_avg())
viz_id_eval_game_points.append(id_eval_game_points)
viz_id_eval_step.append(id_eval_game_step)
viz_ood_eval_game_points.append(ood_eval_game_points)
viz_ood_eval_step.append(ood_eval_game_step)
viz_x = np.arange(len(viz_game_points)).tolist()
if reward_win is None:
reward_win = viz.line(X=viz_x, Y=viz_game_points,
opts=dict(title=agent.experiment_tag + "_game_points"),
name="game points")
viz.line(X=viz_x, Y=viz_overall_rewards,
opts=dict(title=agent.experiment_tag + "_overall_rewards"),
win=reward_win, update='append', name="overall rewards")
viz.line(X=viz_x, Y=viz_id_eval_game_points,
opts=dict(title=agent.experiment_tag + "_id_eval_game_points"),
win=reward_win, update='append', name="id eval game points")
viz.line(X=viz_x, Y=viz_ood_eval_game_points,
opts=dict(title=agent.experiment_tag + "_ood_eval_game_points"),
win=reward_win, update='append', name="ood eval game points")
else:
viz.line(X=[len(viz_game_points) - 1], Y=[viz_game_points[-1]],
opts=dict(title=agent.experiment_tag + "_game_points"),
win=reward_win,
update='append', name="game points")
viz.line(X=[len(viz_overall_rewards) - 1], Y=[viz_overall_rewards[-1]],
opts=dict(title=agent.experiment_tag + "_overall_rewards"),
win=reward_win,
update='append', name="overall rewards")
viz.line(X=[len(viz_id_eval_game_points) - 1], Y=[viz_id_eval_game_points[-1]],
opts=dict(title=agent.experiment_tag + "_id_eval_game_points"),
win=reward_win,
update='append', name="id eval game points")
viz.line(X=[len(viz_ood_eval_game_points) - 1], Y=[viz_ood_eval_game_points[-1]],
opts=dict(title=agent.experiment_tag + "_ood_eval_game_points"),
win=reward_win,
update='append', name="ood eval game points")
if step_win is None:
step_win = viz.line(X=viz_x, Y=viz_step,
opts=dict(title=agent.experiment_tag + "_step"),
name="step")
viz.line(X=viz_x, Y=viz_id_eval_step,
opts=dict(title=agent.experiment_tag + "_id_eval_step"),
win=step_win, update='append', name="id eval step")
viz.line(X=viz_x, Y=viz_ood_eval_step,
opts=dict(title=agent.experiment_tag + "_ood_eval_step"),
win=step_win, update='append', name="ood eval step")
else:
viz.line(X=[len(viz_step) - 1], Y=[viz_step[-1]],
opts=dict(title=agent.experiment_tag + "_step"),
win=step_win,
update='append', name="step")
viz.line(X=[len(viz_id_eval_step) - 1], Y=[viz_id_eval_step[-1]],
opts=dict(title=agent.experiment_tag + "_id_eval_step"),
win=step_win,
update='append', name="id eval step")
viz.line(X=[len(viz_ood_eval_step) - 1], Y=[viz_ood_eval_step[-1]],
opts=dict(title=agent.experiment_tag + "_ood_eval_step"),
win=step_win,
update='append', name="ood eval step")
if dqn_loss_win is None:
dqn_loss_win = viz.line(X=viz_x, Y=viz_dqn_loss,
opts=dict(title=agent.experiment_tag + "_dqn_loss"),
name="dqn loss")
else:
viz.line(X=[len(viz_dqn_loss) - 1], Y=[viz_dqn_loss[-1]],
opts=dict(title=agent.experiment_tag + "_dqn_loss"),
win=dqn_loss_win,
update='append', name="dqn loss")
# write accuracies down into file
_s = json.dumps({"time spent": str(time_2 - time_1).rsplit(".")[0],
"dqn loss": str(running_avg_dqn_loss.get_avg()),
"overall rewards": str(running_avg_overall_rewards.get_avg()),
"train game points": str(running_avg_game_points.get_avg()),
"train steps": str(running_avg_game_steps.get_avg()),
"id eval game points": str(id_eval_game_points),
"id eval steps": str(id_eval_game_step),
"ood eval game points": str(ood_eval_game_points),
"ood eval steps": str(ood_eval_game_step)})
with open(output_dir + "/" + json_file_name + '.json', 'a+') as outfile:
outfile.write(_s + '\n')
outfile.flush()
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_final.pt")
if __name__ == '__main__':
train()
```
#### File: agents/detector/mrcnn.py
```python
import torch
import torchvision
from torchvision.models.detection.rpn import AnchorGenerator, RPNHead
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
import os
import sys
sys.path.insert(0, os.environ["ALFRED_ROOT"])
import gen.constants as constants
def get_model_instance_segmentation(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
anchor_generator = AnchorGenerator(
sizes=tuple([(4, 8, 16, 32, 64, 128, 256, 512) for _ in range(5)]),
aspect_ratios=tuple([(0.25, 0.5, 1.0, 2.0) for _ in range(5)]))
model.rpn.anchor_generator = anchor_generator
# 256 because that's the number of features that FPN returns
model.rpn.head = RPNHead(256, anchor_generator.num_anchors_per_location()[0])
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
return model
def load_pretrained_model(path, num_classes):
mask_rcnn = get_model_instance_segmentation(num_classes)
mask_rcnn.load_state_dict(torch.load(path))
return mask_rcnn
```
#### File: alfworld/scripts/play_alfred_thor.py
```python
import os
import sys
sys.path.insert(0, os.environ['ALFRED_ROOT'])
import json
import argparse
from env.thor_env import ThorEnv
from agents.detector.mrcnn import load_pretrained_model
from agents.controller import OracleAgent, OracleAStarAgent, MaskRCNNAgent, MaskRCNNAStarAgent
def setup_scene(env, traj_data, r_idx, args, reward_type='dense'):
# scene setup
scene_num = traj_data['scene']['scene_num']
object_poses = traj_data['scene']['object_poses']
dirty_and_empty = traj_data['scene']['dirty_and_empty']
object_toggles = traj_data['scene']['object_toggles']
scene_name = 'FloorPlan%d' % scene_num
env.reset(scene_name)
env.restore_scene(object_poses, object_toggles, dirty_and_empty)
# initialize to start position
env.step(dict(traj_data['scene']['init_action']))
# print goal instr
print("Task: %s" % (traj_data['turk_annotations']['anns'][r_idx]['task_desc']))
# setup task for reward
env.set_task(traj_data, args, reward_type=reward_type)
def main(args):
# start THOR
env = ThorEnv()
# load traj_data
root = args.problem
json_file = os.path.join(root, 'traj_data.json')
with open(json_file, 'r') as f:
traj_data = json.load(f)
# setup scene
setup_scene(env, traj_data, 0, args)
# choose controller
if args.controller == "oracle":
AgentModule = OracleAgent
agent = AgentModule(env, traj_data, traj_root=root, load_receps=args.load_receps, debug=args.debug)
elif args.controller == "oracle_astar":
AgentModule = OracleAStarAgent
agent = AgentModule(env, traj_data, traj_root=root, load_receps=args.load_receps, debug=args.debug)
elif args.controller == "mrcnn":
AgentModule = MaskRCNNAgent
mask_rcnn = load_pretrained_model('./agents/detector/models/mrcnn.pth')
agent = AgentModule(env, traj_data, traj_root=root,
pretrained_model=mask_rcnn,
load_receps=args.load_receps, debug=args.debug)
elif args.controller == "mrcnn_astar":
AgentModule = MaskRCNNAStarAgent
mask_rcnn = load_pretrained_model('./agents/detector/models/mrcnn.pth')
agent = AgentModule(env, traj_data, traj_root=root,
pretrained_model=mask_rcnn,
load_receps=args.load_receps, debug=args.debug)
else:
raise NotImplementedError()
print(agent.feedback)
while True:
cmd = input()
if cmd == "ipdb":
from ipdb import set_trace; set_trace()
continue
agent.step(cmd)
if not args.debug:
print(agent.feedback)
done = env.get_goal_satisfied()
if done:
print("You won!")
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("problem", help="Path to folder containing pddl and traj_data files")
parser.add_argument("--controller", default="oracle", choices=["oracle", "oracle_astar", "mrcnn", "mrcnn_astar"])
parser.add_argument("--debug", action="store_true")
parser.add_argument('--load_receps', action="store_true")
parser.add_argument('--reward_config', type=str, default="agents/config/rewards.json")
args = parser.parse_args()
main(args)
```
#### File: alfworld/scripts/play_alfred_tw.py
```python
import os
import json
import glob
import argparse
import textworld
from textworld.agents import HumanAgent
import gym
import textworld.gym
import sys
sys.path.append(os.environ["ALFRED_ROOT"])
from agents.utils.misc import Demangler, get_templated_task_desc, clean_alfred_facts, add_task_to_grammar
class AlfredDemangler(textworld.core.Wrapper):
def load(self, *args, **kwargs):
super().load(*args, **kwargs)
demangler = Demangler(game_infos=self._game.infos)
for info in self._game.infos.values():
info.name = demangler.demangle_alfred_name(info.id)
def main(args):
GAME_LOGIC = {
"pddl_domain": open(args.domain).read(),
"grammar": "\n".join(open(f).read() for f in glob.glob("data/textworld_data/logic/*.twl2")),
}
# load state and trajectory files
pddl_file = os.path.join(args.problem, 'initial_state.pddl')
json_file = os.path.join(args.problem, 'traj_data.json')
with open(json_file, 'r') as f:
traj_data = json.load(f)
GAME_LOGIC['grammar'] = add_task_to_grammar(GAME_LOGIC['grammar'], traj_data)
# dump game file
gamedata = dict(**GAME_LOGIC, pddl_problem=open(pddl_file).read())
gamefile = os.path.join(os.path.dirname(pddl_file), 'game.tw-pddl')
json.dump(gamedata, open(gamefile, "w"))
# register a new Gym environment.
infos = textworld.EnvInfos(won=True, admissible_commands=True)
env_id = textworld.gym.register_game(gamefile, infos,
max_episode_steps=1000000,
wrappers=[AlfredDemangler])
# reset env
env = gym.make(env_id)
obs, infos = env.reset()
# human agent
agent = HumanAgent(True)
agent.reset(env)
while True:
print(obs)
cmd = agent.act(infos, 0, False)
if cmd == "ipdb":
from ipdb import set_trace; set_trace()
continue
obs, score, done, infos = env.step(cmd)
if done:
print("You won!")
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("problem", help="Path to folder containing pddl and traj_data files")
parser.add_argument("--domain",
default=os.environ.get("ALFRED_ROOT", ".") + "/gen/planner/domains/PutTaskExtended_domain.pddl",
help="Path to a PDDL file describing the domain."
" Default: `%(default)s`.")
args = parser.parse_args()
main(args)
``` |
{
"source": "594zyc/CMCC_DialogSystem",
"score": 2
} |
#### File: 594zyc/CMCC_DialogSystem/Agent.py
```python
import os
import sys
import time
import argparse
import logging
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../..'))
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from DM.DST.StateTracking import DialogStateTracker
from DM.policy.RuleMapping import RulePolicy
from data.DataManager import DataManager
from NLU.NLUManager import NLUManager
from NLG.NLGManager import rule_based_NLG
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--print', type=bool, default=True, help='print details')
FLAGS= parser.parse_args()
UserPersonal = {
"已购业务": ["180元档幸福流量年包", "18元4G飞享套餐升级版"], # 这里应该是完整的业务的信息dict
"套餐使用情况": "剩余流量 11.10 GB,剩余通话 0 分钟,话费余额 110.20 元,本月已产生话费 247.29 元",
"号码": "18811369685",
"归属地" : "北京",
"品牌": "动感地带",
"是否转品牌过渡期": "否",
"话费查询": "话费余额 110.20 元",
"流量查询": "剩余流量 11.10 GB",
"订购时间": "订购时间 2017-04-04, 生效时间 2017-05-01",
"是否停机": "否",
"话费充值": "请登录网上营业厅、微厅或 APP 充值",
"流量充值": "请登录网上营业厅、微厅或 APP 充值",
"账单查询": "请登录网上营业厅、微厅或 APP 查询"
}
NLU_save_path_dict = {
'domain': os.path.join(BASE_DIR, 'NLU/DomDect/model/ckpt'),
'useract': os.path.join(BASE_DIR, 'NLU/UserAct/model/ckpt'),
'slotfilling': os.path.join(BASE_DIR, 'NLU/SlotFilling/model/ckpt'),
'entity': os.path.join(BASE_DIR, 'NLU/ER/entity_list.txt'),
'sentiment': os.path.join(BASE_DIR, 'NLU/SentiDect')
}
class DialogAgent:
def __init__(self):
self.history_savedir = None
self.detail_savedir = None
self.logger = None
self.user = self.create_user()
self.rule_policy = RulePolicy()
self.dst = DialogStateTracker(UserPersonal, FLAGS.print, self.logger)
self.data_manager = DataManager(os.path.join(BASE_DIR, 'data/tmp'))
self.nlu_manager = NLUManager(NLU_save_path_dict)
# self.nlg_template = NLG_template
self.turn_num = 1
self.dialog_history = []
def create_user(self):
user_name = input("请输入您的用户名:")
user_path = os.path.join(BASE_DIR, 'user', user_name)
log_path = os.path.join(user_path, 'log')
if not os.path.exists(user_path):
os.mkdir(user_path)
os.mkdir(log_path)
self.history_savedir = user_path + '/dialogs.txt'
log_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
self.detail_savedir = log_path +'/' + log_name + '.log'
self.logger = self.create_logger(self.detail_savedir)
return user_name
def create_logger(self, logdir):
fmt = '%(message)s'
# datefmt = "%y-%m-%d %H:%M:%S"
logging.basicConfig(level=logging.INFO,
format=fmt)
# datefmt=datefmt)
logger = logging.getLogger('mylogger')
logger.setLevel(logging.INFO)
fh = logging.FileHandler(logdir)
fh.setLevel(logging.INFO)
logger.addHandler(fh)
return logger
def run(self):
if FLAGS.print:
self.logger.info('对话记录时间:'+time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime()))
try:
while True:
user_utter = input("用户输入:")
if FLAGS.print:
with open(self.detail_savedir, 'a') as f:
f.write('-------------- Turn ' + str(self.turn_num) + '--------------\n')
f.write('用户:' + user_utter + '\n')
self.dialog_history.append('用户:' + user_utter)
if user_utter in ['restart' , '重来' , '重新开始']:
self.dst = DialogStateTracker(UserPersonal, FLAGS.print, self.logger)
self.rule_policy = RulePolicy()
if FLAGS.print:
self.logger.info('对话状态已重置')
else:
print('对话状态已重置')
continue
if '再见' in user_utter or '结束' in user_utter or '谢谢' in user_utter:
self.close()
break
nlu_results = self.nlu_manager.get_NLU_results(user_utter, self.data_manager)
self.dst.update(nlu_results, self.rule_policy, self.data_manager)
reply = rule_based_NLG(self.dst)
if FLAGS.print:
self.logger.info('系统:' + reply + '\n')
else:
print('系统:', reply, '\n')
self.dialog_history.append('系统:' + reply)
self.turn_num += 1
except KeyboardInterrupt:
self.close()
def close(self):
self.nlu_manager.close()
reply = '感谢您的使用,再见!'
if FLAGS.print:
self.logger.info('系统:' + reply + '\n')
else:
print('系统:', reply, '\n')
with open(os.path.join(BASE_DIR, self.history_savedir), 'a') as f:
f.write('对话记录时间:')
f.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+'\n\n')
for dialog in self.dialog_history:
dialog = '\n'.join(dialog.split())
f.write(dialog+'\n\n')
f.write('系统:感谢您的使用,再见!\n')
f.write('————————————————————————————————\n')
if __name__ == '__main__':
agent = DialogAgent()
agent.run()
```
#### File: DomDect/model/model.py
```python
import tensorflow as tf
# requestable slots 对应的 DialogData20180613 的 id, 用于指导生成训练数据
class DomainModel:
"""
一个简单的 CNN
文字描述,输出为 高、中、低、无
功能费、流量、通话
"""
def __init__(self, name,
max_sent_length=40,
max_word_length=6, # 没用到,因为我 charCNN 取得是均值
word_embed_size=20,
char_embed_size=20,
hidden_size=60,
learning_rate=0.01,
word_feature_map=100, # window 1,2,3
dropout_rate = 0.7,
noise_scale = 0,
LSR_mix_rate = 0.1,
):
self.name = name
with tf.device('/gpu:0'), tf.variable_scope(name_or_scope=self.name,
initializer=tf.truncated_normal_initializer(0, 0.1)):
self.is_training = tf.placeholder(dtype=tf.bool)
self.word_emb_matrix = tf.placeholder(dtype=tf.float32,
shape=[None, max_sent_length, word_embed_size])
self.char_emb_matrix = tf.placeholder(dtype=tf.float32,
shape=[None, max_sent_length, char_embed_size])
self.input = tf.concat([self.word_emb_matrix, self.char_emb_matrix], 2)
self.input_noise = noise_scale * tf.random_normal(tf.shape(self.input))
self.input = tf.cond(self.is_training, lambda: self.input + self.input_noise,
lambda: self.input )
self.output = tf.placeholder(dtype=tf.int32, shape=(None))
self.batch_size = tf.shape(self.word_emb_matrix)[0]
self.dropout_rate = dropout_rate
self.LSR = LSR_mix_rate
def conv_relu(inputs, filters, kernel, poolsize):
conv = tf.layers.conv1d(
inputs=inputs,
filters=filters,
kernel_size=kernel,
strides=1,
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(0, 0.01)
)
# conv_bn = tf.contrib.layers.batch_norm(conv, is_training=self.is_training)
# conv_bn_activated = tf.nn.relu(conv_bn)
# print('conv:', conv.get_shape())
pool = tf.layers.max_pooling1d(
inputs=conv,
pool_size=poolsize,
strides=1,
)
# print('pool:', pool.get_shape())
_pool = tf.squeeze(pool, [1])
# print('_pool:', _pool.get_shape())
return _pool
def cnn(inputs, maxlength):
with tf.variable_scope("winsize2"):
conv2 = conv_relu(inputs, word_feature_map, 1, maxlength)
with tf.variable_scope("winsize3"):
conv3 = conv_relu(inputs, word_feature_map, 2, maxlength)
with tf.variable_scope("winsize4"):
conv4 = conv_relu(inputs, word_feature_map, 3, maxlength)
return tf.concat([conv2, conv3, conv4], 1)
with tf.variable_scope("CNN_output"):
self.feature = cnn(self.input, max_sent_length)
# self.feature = tf.layers.batch_normalization(self.feature, training=self.is_training)
# self.feature =
self.feature = tf.layers.dropout(self.feature, rate=self.dropout_rate,
training=self.is_training)
# print('cnn_output:', self.feature.get_shape())
# with tf.variable_scope("hidden"):
# self.hidden_layer = tf.layers.dense(inputs=self.feature, units=hidden_size,
# activation=tf.nn.relu)
# # self.hidden_layer = tf.layers.batch_normalization(self.hidden_layer, training=self.is_training)
# self.hidden_layer = tf.layers.dropout(self.hidden_layer , rate=self.dropout_rate,
# training=self.is_training)
with tf.variable_scope("projection"):
self.final_output_logits = tf.layers.dense(inputs=self.feature ,
units=7,
activation=tf.nn.relu)
# print(self.final_output_logits.get_shape())
with tf.variable_scope("loss"):
self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.final_output_logits,
labels=self.output)
# print(self.loss.get_shape())
# self.final_loss = tf.reduce_mean(self.loss)
# print(self.final_loss.get_shape())
with tf.variable_scope("train"):
self.lr = tf.Variable(learning_rate, trainable=False)
self.new_lr = tf.placeholder(dtype=tf.float32, shape=[], name="new_learning_rate")
self.lr_update = tf.assign(self.lr, self.new_lr)
self.tvars = tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
self.optimizer = tf.train.AdamOptimizer(self.lr)
self.l2_loss = [tf.nn.l2_loss(v) for v in self.tvars]
self.unbalance_loss = self.loss
self.final_loss = tf.reduce_mean(self.loss)*(1-self.LSR) + 1/7*self.LSR
# tf.reduce_mean(self.loss) + 0.001 * tf.add_n(self.l2_loss)
self.train_op = self.optimizer.minimize(self.final_loss)
with tf.variable_scope("predict"):
# probability
self.probs = tf.nn.softmax(self.final_output_logits)
# predict
self.predict = tf.argmax(self.final_output_logits, axis=1)
# print(self.predict.get_shape())
self.correct = tf.equal(tf.cast(self.predict, tf.int32),
tf.cast(self.output, tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(self.correct, tf.float32))
def assign_lr(self, session, lr_value):
session.run(self.lr_update, feed_dict={self.new_lr: lr_value})
if __name__ == '__main__':
informable_slots = DomainModel(name='test')
```
#### File: DomDect/model/train.py
```python
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../../..'))
import tensorflow as tf
import copy, pprint
from data.DataManager import DataManager
from NLU.DomDect.model.input_data import *
from NLU.DomDect.model.model import *
os.environ["CUDA_VISIBLE_DEVICES"]="0"
def train(domain_dataset):
"""
用于训练模型,先训练完存好了才能用
:param data_tmp_path: data tmp 文件夹位置
"""
print('载入 Domain 模型...')
model = DomainModel("DomDect")
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_config.allow_soft_placement=True
with tf.Session(config=tf_config) as sess:
sess.run(tf.group(tf.global_variables_initializer()))
saver = tf.train.Saver()
# saver.restore(sess, "./ckpt/model.ckpt")
# 训练 domain detector model
average_loss = 0
average_accu = 0
display_step = 100
valid_data, valid_output = domain_dataset.produce_valid_data()
valid_char_emb, valid_word_emb, seqlen = data_manager.sent2num(valid_data, 40, 6)
for step in range(40000):
batch_data, batch_output = domain_dataset.next_train_batch()
train_char_emb, train_word_emb, seqlen = data_manager.sent2num(batch_data, 40, 6)
_, training_loss, training_accu = sess.run([model.train_op, model.final_loss, model.accuracy],
feed_dict={
model.char_emb_matrix: train_char_emb,
model.word_emb_matrix: train_word_emb,
model.output: batch_output,
model.is_training: True
})
average_loss += training_loss / display_step
average_accu += training_accu / display_step
if step % display_step == 0:
valid_loss, valid_accu = sess.run([model.final_loss, model.accuracy],
feed_dict={
model.char_emb_matrix: valid_char_emb,
model.word_emb_matrix: valid_word_emb,
model.output: valid_output,
model.is_training: False
})
print("step % 4d, train - loss: %0.4f accu: %0.4f, valid - loss: %.4f accu: %.4f"
% (step, average_loss, average_accu, valid_loss, valid_accu))
average_loss = 0
average_accu = 0
if step == 20000:
model.assign_lr(sess, 0.001)
if step == 30000:
model.assign_lr(sess, 0.0001)
# 保存模型
saver.save(sess, "./ckpt/model.ckpt")
def evaluate(domain_dataset):
"""
用于训练模型,先训练完存好了才能用
:param data_tmp_path: data tmp 文件夹位置
"""
print('载入 Domain 模型...')
model = DomainModel("DomDect")
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_config.allow_soft_placement=True
with tf.Session(config=tf_config) as sess:
sess.run(tf.group(tf.global_variables_initializer()))
saver = tf.train.Saver()
saver.restore(sess, "./ckpt/model.ckpt")
# 训练 domain detector model
for domain in domains:
step_count = 0
average_loss = 0
average_accu = 0
domain_dataset.batch_id[domain] = 0
while True:
step_count += 1
batch_data, batch_output, end_flag = domain_dataset.test_by_domain(domain, 100)
char_emb_matrix, word_emb_matrix, seqlen = data_manager.sent2num(batch_data, 40, 6)
loss, accu, predictions = sess.run(
[model.final_loss, model.accuracy, model.predict],
feed_dict={ model.char_emb_matrix: char_emb_matrix,
model.word_emb_matrix: word_emb_matrix,
model.output: batch_output,
model.is_training: False})
average_loss += loss
average_accu += accu
# print(testing_accu)
if end_flag:
average_loss /= step_count
average_accu /= step_count
break
print("domain:"+domain+ ", loss %0.4f, accu %0.4f" % (average_loss, average_accu))
print("domain:"+domain+ ", num %d" %len(domain_dataset.valid_data[domain]))
with open('test_result.txt', 'a') as f:
f.write("domain:"+domain+ ", loss %0.4f, accu %0.4f\n" % (average_loss, average_accu))
with open('test_result.txt', 'a') as f:
f.write('\n')
if __name__ == '__main__':
print('载入数据管理器...')
data_manager = DataManager('../../../data/tmp')
print('载入训练数据...')
domain_dataset = generate_domain_dataset(data_manager.DialogData,
domain_data_ids)
train(domain_dataset)
tf.reset_default_graph()
evaluate(domain_dataset)
```
#### File: SlotFilling/model/train.py
```python
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../../..'))
import tensorflow as tf
import numpy as np
import copy, pprint
from data.DataManager import DataManager
from NLU.SlotFilling.model.input_data import InformableSlotDataset, RequestableSlotDataset
from NLU.SlotFilling.model.model import InformableSlotDector, RequestableSlotDector
# requestable slots 对应的 DialogData20180613 的 id, 用于指导生成训练数据
All_requestable_slots = {
'已购业务':['id1', 'id14'],
'订购时间':['id2', 'id14'],
'使用情况':['id3'],
'号码':['id4'],
'归属地':['id5'],
'品牌':['id6'],
'是否转品牌过渡期':['id7'],
'是否停机':['id8'],
'账单查询':['id9'],
'话费充值':['id10','id15','id16'],
'流量充值':['id11','id15','id17'],
'话费查询':['id12','id16','id18'],
'流量查询':['id13','id17','id18'],
'功能费':['id22','id24','id28','id30','id71','id38'],
'套餐内容_国内主叫':['id20','id26','id72','id149', 'id43'],
'套餐内容_国内流量':['id19','id23','id25','id29','id75','id152','id68','id48'],
'产品介绍':['id76'],
'计费方式':['id77'],
'适用品牌':['id70'],
'套餐内容_国内短信':['id21','id27','id73','id150'],
'套餐内容_国内彩信':['id74','id151'],
'套餐内容_其他功能':['id78'],
'套餐内容':['id79'],
'超出处理_国内主叫':['id80','id153'],
'超出处理_国内流量':['id81'],
'超出处理':['id82','id115'],
'结转规则_国内主叫':['id83'],
'结转规则_国内流量':['id84'],
'结转规则_赠送流量':['id85'],
'结转规则':['id86'],
'是否全国接听免费':['id87'],
'能否结转滚存':['id88'],
'能否分享':['id89','id91','id126','id127','id128'],
'能否转赠':['id90','id92'],
'转户转品牌管理':['id93'],
'停机销号管理':['id94'],
'赠送优惠活动':['id95'],
'使用限制':['id96','id113'],
'使用有效期':['id97'],
'使用方式设置':['id98','id114'],
'封顶规则':['id99'],
'限速说明':['id100'],
'受理时间':['id101'],
'互斥业务':['id102','id129','id130'],
'开通客户限制':['id103','id131','id132'],
'累次叠加规则':['id104'],
'开通方式':['id105'],
'开通生效规则':['id106'],
'是否到期自动取消':['id107'],
'能否变更或取消':['id108'],
'取消方式':['id109'],
'取消变更生效规则':['id110'],
'变更方式':['id111'],
'密码重置方式':['id112'],
'激活方式':['id116'],
'副卡数量上限':['id125'],
'主卡添加成员':['id134'],
'主卡删除成员':['id135'],
'副卡成员主动退出':['id136'],
'主卡查询副卡':['id137'],
'副卡查询主卡':['id138'],
'恢复流量功能':['id139'],
'开通方向':['id148']
}
All_requestable_slots_order = dict(zip(All_requestable_slots.keys(), range(len(All_requestable_slots.keys()))))
def extract_informable_data(DialogData, dict):
dialog_data = copy.deepcopy(DialogData)
high_data = []
for id in dict['高']:
high_data.extend(dialog_data[id]["用户回复示例"])
del dialog_data[id]
medium_data = []
for id in dict['中']:
medium_data.extend(dialog_data[id]["用户回复示例"])
del dialog_data[id]
low_data = []
for id in dict['低']:
low_data.extend(dialog_data[id]["用户回复示例"])
del dialog_data[id]
none_data = []
for id, item in dialog_data.items():
none_data.extend(item["用户回复示例"])
return high_data, medium_data, low_data, none_data
def extract_requestable_data(DialogData, list):
dialog_data = copy.deepcopy(DialogData)
positive_data = []
negative_data = []
for id in list:
positive_data.extend(dialog_data[id]["用户回复示例"])
del dialog_data[id]
for id, item in dialog_data.items():
negative_data.extend(item["用户回复示例"])
return positive_data, negative_data
def generate_dataset(DialogData):
"""
生成informable slots 和 requestable slots 的训练数据集
"""
# 生成 功能费 相关的训练数据
informable_slot_dataset_cost = InformableSlotDataset(
*extract_informable_data(DialogData,
{"高": ["id36", "id51"],
"中": ["id35", "id50"],
"低": ["id34", "id49"]}))
# 通话时长 相关的训练数据
informable_slot_dataset_time = InformableSlotDataset(
*extract_informable_data(DialogData,
{"高": ["id39", "id52"],
"中": ["id40", "id53"],
"低": ["id41", "id54"]}))
# 流量 相关的训练数据
informable_slot_dataset_data = InformableSlotDataset(
*extract_informable_data(DialogData,
{"高": ["id44", "id55"],
"中": ["id45", "id56"],
"低": ["id46", "id57"]}))
informable_slot_datasets = {
"功能费":informable_slot_dataset_cost,
"流量": informable_slot_dataset_data,
"通话时长":informable_slot_dataset_time
}
requestable_slot_datasets = {}
for k,v in All_requestable_slots.items():
requestable_slot_datasets[k] = \
RequestableSlotDataset(*extract_requestable_data(DialogData, v))
return informable_slot_datasets,requestable_slot_datasets
def get_F1score(correct, predict):
"""
correct like [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
predict like [0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0]
:return: F1
"""
hit = 0
precision_set = 0
recall_set = 0
for i in range(len(correct)):
if correct[i] == predict[i] and predict[i] == 0:
hit += 1
if correct[i] == 0 :
precision_set += 1
if predict[i] == 0:
recall_set += 1
return 2*hit/(precision_set + recall_set)
def train_informable(data_tmp_path):
"""
用于训练模型,先训练完存好了才能用
训练用 early stopping
:param data_tmp_path: data tmp 文件夹位置
"""
print('载入数据管理器...')
data_manager = DataManager(data_tmp_path)
print('载入训练数据...')
informable_slot_datasets, requestable_slot_datasets = generate_dataset(data_manager.DialogData)
print('载入 informable slot detector ...')
init_learning_rate = 0.005
informable_batch_ratios = { # 不同slot 的minibatch ratio
"通话时长": [2, 8, 8, 8],
"流量": [4, 8, 8, 8],
"功能费": [4, 8, 8, 8]
}
graph = tf.Graph()
with graph.as_default():
informable_slots_models = {
"功能费": InformableSlotDector('cost', learning_rate=init_learning_rate),
"流量": InformableSlotDector('data', learning_rate=init_learning_rate),
"通话时长": InformableSlotDector('time', learning_rate=init_learning_rate),
}
with tf.Session(graph=graph,config=tf.ConfigProto(
allow_soft_placement=True)) as sess:
sess.run(tf.group(tf.global_variables_initializer()))
saver = tf.train.Saver()
# saver.restore(sess, "./ckpt/informable/model.ckpt")
# 训练 informable slots
informable_slots_accus = []
for slot , model in informable_slots_models.items():
learning_rate = init_learning_rate
average_loss = 0
best_accu = 0
tolerance = 20
tolerance_count = 0
display_step = 10
for step in range(5000):
step += 1
batch_data, batch_output = informable_slot_datasets[slot].next_batch(informable_batch_ratios[slot])
char_emb_matrix, word_emb_matrix, seqlen = data_manager.sent2num(batch_data)
_, training_loss = sess.run([model.train_op, model.final_loss],
feed_dict={
model.char_emb_matrix: char_emb_matrix,
model.word_emb_matrix: word_emb_matrix,
model.output: batch_output
})
average_loss += training_loss / display_step
if step % display_step == 0:
batch_data, batch_output = informable_slot_datasets[slot].get_testset()
char_emb_matrix, word_emb_matrix, seqlen = data_manager.sent2num(batch_data)
pred, accu = sess.run([model.predict, model.accuracy],
feed_dict={
model.char_emb_matrix: char_emb_matrix,
model.word_emb_matrix: word_emb_matrix,
model.output: batch_output
})
if best_accu < accu:
best_accu = accu
tolerance_count = 0
if not os.path.exists("./ckpt/informable/"):
os.makedirs("./ckpt/informable/")
saver.save(sess, "./ckpt/informable/model.ckpt")
if tolerance_count == tolerance:
break
print("%s, step % 4d, loss %0.4f, accu %0.4f" % (slot, step, average_loss, accu))
average_loss = 0
tolerance_count += 1
learning_rate = max(learning_rate*0.95, 0.0001)
sess.run(model.update_lr, feed_dict={model.new_lr: learning_rate})
print("informable slot: %s, best accu %0.4f" % (slot, best_accu))
informable_slots_accus.append(best_accu)
def train_requestable(data_tmp_path):
"""
用于训练模型,先训练完存好了才能用
训练用 early stopping
:param data_tmp_path: data tmp 文件夹位置
"""
print('载入数据管理器...')
data_manager = DataManager(data_tmp_path)
print('载入训练数据...')
informable_slot_datasets, requestable_slot_datasets = generate_dataset(data_manager.DialogData)
print('载入 requestable slot detector...')
init_learning_rate = 0.005
graph = tf.Graph()
with graph.as_default():
requestable_slots_models = {}
for k, v in All_requestable_slots_order.items():
requestable_slots_models[k] = RequestableSlotDector(str(v), learning_rate=init_learning_rate)
with tf.Session(graph=graph,config=tf.ConfigProto(
allow_soft_placement=True)) as sess:
sess.run(tf.group(tf.global_variables_initializer()))
saver = tf.train.Saver()
# saver.restore(sess, "./ckpt/requestable/model.ckpt")
# 训练 requestable slots
requestable_slots_F1s = {}
for slot, model in requestable_slots_models.items():
average_loss = 0
learning_rate = init_learning_rate
best_F1 = 0
tolerance = 30
tolerance_count = 0
display_step = 10
for step in range(5000):
step += 1
batch_data, batch_output = requestable_slot_datasets[slot].next_batch()
char_emb_matrix, word_emb_matrix, seqlen = data_manager.sent2num(batch_data)
_, training_loss = sess.run([model.train_op, model.final_loss],
feed_dict={
model.char_emb_matrix: char_emb_matrix,
model.word_emb_matrix: word_emb_matrix,
model.output: batch_output
})
average_loss += training_loss / display_step
if step % display_step == 0:
batch_data, batch_output = requestable_slot_datasets[slot].get_testset()
char_emb_matrix, word_emb_matrix, seqlen = data_manager.sent2num(batch_data)
pred, accu = sess.run([model.predict, model.accuracy],
feed_dict={
model.char_emb_matrix: char_emb_matrix,
model.word_emb_matrix: word_emb_matrix,
model.output: batch_output
})
F1 = get_F1score(batch_output, pred.tolist())
if best_F1 < F1:
best_F1 = F1
tolerance_count = 0
if not os.path.exists("./ckpt/requestable/"):
os.makedirs("./ckpt/requestable/")
saver.save(sess, "./ckpt/requestable/model.ckpt")
if tolerance_count == tolerance:
break
print("%s, step % 4d, loss %0.4f, F1 %0.4f, accu %0.4f" % (slot, step, average_loss, F1, accu))
average_loss = 0
tolerance_count += 1
learning_rate = max(learning_rate * 0.98, 0.001)
sess.run(model.update_lr, feed_dict={model.new_lr: learning_rate})
print("requestable slot: %s, best F1 %0.4f" % (slot, best_F1))
requestable_slots_F1s[slot] = best_F1
print(requestable_slots_F1s)
print(sum(requestable_slots_F1s.values())/len(requestable_slots_F1s.values()))
if __name__ == '__main__':
# train_informable('../../../data/tmp')
# tf.reset_default_graph()
train_requestable('../../../data/tmp')
``` |
{
"source": "594zyc/HiTUT",
"score": 2
} |
#### File: HiTUT/data/dataset.py
```python
import os, sys, json, pickle, io, time, random, copy
import h5py
import pprint
import threading, queue
from tqdm import tqdm
from collections import Counter
from transformers import MobileBertTokenizer
import cv2
from PIL import Image
import numpy as np
import revtok
import torch
sys.path.append(os.path.join(os.environ['ALFRED_ROOT']))
from gen.constants import *
from gen.utils.py_util import remove_spaces_and_lower
from gen.utils.bb_util import bb_IoU
from models.config.configs import Config
from models.utils.vocab import Vocab
from models.nn.mrcnn import MaskRCNNDetector
from models.utils.bert_utils import get_bert_tknz, mmt_word_ids_to_bert_ids
lock = threading.Lock()
class AlfredPyTorchDataset(torch.utils.data.Dataset):
def __init__(self, alfred_data, split, task_type, args):
self.data = alfred_data
self.split = split
self.task_type = task_type
self.args = args
self.detector_type = args.detector_type
self.topk = args.topk_objs
self.max_length = args.max_enc_length
self.image_size = args.image_size
self.action_vocab = self.data.dec_in_vocab
self.level = task_type.split('_')[0]
self.low_data = task_type.split('_')[1] if self.level == 'low' else None
self.dataset = self.get_data_instances()
# if self.args.use_bert:
# with open('data/full_2.1.0_pp/language_%s.json'%self.split, 'r') as f:
# self.language_data = json.load(f)
# self.resnet18_feats = h5py.File('data/resnet18_feats.hdf5', 'r')
# with open('data/full_2.1.0_pp/img_map_%s.json'%split, 'r') as f:
# self.img_map = json.load(f)
def get_data_instances(self):
det_sp = os.path.join(self.data.pp_path, '%s_det_res_%s.json'%(self.split, self.args.detector_type))
with open(det_sp, 'rb') as f:
self.obj_det_res = json.load(f)
if self.level == 'high':
sp = os.path.join(self.data.pp_path, '%s_high_action_instances.json'%(self.split))
elif self.level == 'low':
sp = os.path.join(self.data.pp_path, '%s_low_action_instances_%s.json'%(self.split, self.low_data))
if self.args.train_one_shot and self.split == 'train':
sp = sp.replace('instances', 'seed')
if not os.path.exists(sp) or not os.path.exists(det_sp):
self.data.prepare_data_instances()
with open(sp, 'rb') as f:
self.dataset = json.load(f)
if self.split == 'train':
if self.args.train_one_shot:
total_len = 209331 if self.low_data == 'mani' else 983260
self.args.train_proportion = len(self.dataset) / total_len * 100
elif self.args.train_proportion != 100:
random.shuffle(self.dataset)
prop = int(len(self.dataset) * self.args.train_proportion / 100)+1
self.dataset = self.dataset[:prop]
# if self.args.low_data == 'navi':
# self.dataset_new = []
# add_list = []
# for idx, d in enumerate(self.dataset):
# if d['actype_output'] == 12:
# add_list.append(d)
# self.dataset_new.append(d)
# elif d['actype_output'] == 0 and random.random()>0.5:
# pass
# else:
# self.dataset_new.append(d)
# self.dataset = self.dataset_new + add_list
print('%s: %s action data instance: #%d'%(self.split, self.level, len(self)))
return self.dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
# get a data instance
args = self.args
instance = self.dataset[idx]
# record the item in which data are transformed to PyTorch Tensors
item = {'batch_type': self.task_type}
item['interact'] = instance['interact'] if 'interact' in instance else 0
for i in ['path', 'high_idx', 'low_idx']:
if i in instance:
item[i] = instance[i]
# process the visual input
task_path = instance['path']
ins_obj_det_res = self.obj_det_res[task_path+str(instance['vision_input'])]
vis_len = args.topk_objs + 1
obj_num = min(vis_len - 1, len(ins_obj_det_res['score']))
vis_feat_col = np.zeros((vis_len, 7)) # vis_len x 7
vis_cls_col = np.zeros((vis_len, ), dtype=int) # vis_len
if not args.disable_feat_vis:
for obj_idx in range(obj_num):
bbox = [i/self.image_size for i in ins_obj_det_res['bbox'][obj_idx]]
cls_idx = self.action_vocab.w2id(ins_obj_det_res['class'][obj_idx])
vis_feat_col[obj_idx+1][:4] = bbox
vis_feat_col[obj_idx+1][4] = bbox[2]-bbox[0]
vis_feat_col[obj_idx+1][5] = bbox[3]-bbox[1]
vis_feat_col[obj_idx+1][6] = ins_obj_det_res['score'][obj_idx] # 1d
vis_cls_col[obj_idx + 1] = cls_idx
vis_cls_col[0] = 1
item['vision_feats'] = vis_feat_col.astype(np.float32) # np: vis_len x 7
item['vision_cls'] = vis_cls_col # np: vis_len
# history visual input
if self.args.enable_feat_vis_his and 'navi' in self.task_type:
his_len = args.history_max_length - 1
max_obj_num = 10
his_vis_feat = np.zeros((his_len, max_obj_num, 7), dtype=np.float32) # his_len x max_obj_num x 7
his_vis_cls = np.zeros((his_len, max_obj_num), dtype=int) # his_len x max_obj_num
for his_idx, img_idx in enumerate(instance['vis_history_input'][-his_len:]):
his_ins_obj_det_res = self.obj_det_res[task_path+str(img_idx)]
obj_num = min(max_obj_num, len(his_ins_obj_det_res['class']))
for obj_idx in range(obj_num):
bbox = [i/self.image_size for i in his_ins_obj_det_res['bbox'][obj_idx]]
cls_idx = self.action_vocab.w2id(his_ins_obj_det_res['class'][obj_idx])
his_vis_feat[his_idx][obj_idx] = bbox + [bbox[2]-bbox[0], bbox[3]-bbox[1], his_ins_obj_det_res['score'][obj_idx]]
his_vis_cls[his_idx][obj_idx] = cls_idx
item['his_vis_feat'] = his_vis_feat
item['his_vis_cls'] = his_vis_cls
# dp = os.path.join('../../moca', task_path.replace('full_2.1.0_pp', 'json_feat_2.1.0'), 'feat_conv.pt')
# img_idx = self.img_map[task_path][str(instance['vision_input'])]
# item['resnet18feat'] = self.resnet18_feats[dp][img_idx]
# process mask selection label
label = ins_obj_det_res['label'] # None or an obj index
if label is None or instance['arg_output'] == 105:
label = -1
elif isinstance(label, int):
label += 1
if label >= vis_len:
label = 0
elif item['interact']:
# leave the vis idx 0 as the indicator of not sure about how to ground
label = 0
item['mask_label'] = label # int
# process the language input
lang_len = args.lang_max_length
lang_widx = np.zeros((lang_len,), dtype=int) # lang_len
# if not self.args.use_bert:
# lang_widx[:2] = self.data.vocab.w2id('[SEP]')
if not args.disable_feat_lang:
wids = instance['lang_input']
if self.args.use_bert:
wids = mmt_word_ids_to_bert_ids(wids[1:-1], self.data.vocab, self.data.bert_tknz)
actual_len = min(lang_len, len(wids))
if actual_len != len(wids):
print('warning: %d truncated to %s'%(len(wids), lang_len))
lang_widx[:actual_len] = wids[:actual_len]
if not self.args.use_bert:
lang_widx[lang_widx >= self.data.vocab.vocab_size] = 0
item['lang_input'] = lang_widx #np: lang_len
# else:
# if self.level == 'high':
# item['lang_input'] = random.choice(self.language_data[task_path]['goal'])
# else:
# item['lang_input'] = random.choice(self.language_data[task_path]['instr'])[instance['high_idx']]
# process history actions
action_len = args.history_max_length * 2 #*2 because an action consists of a type and an arg
action_seq = np.zeros((action_len,), dtype=int) # max_his_len*2
if not args.disable_feat_action_his:
for aidx, a in enumerate(instance['actype_history_input'][-args.history_max_length:]):
action_seq[aidx*2] = a
action_seq[aidx*2+1] = instance['arg_history_input'][-args.history_max_length:][aidx]
item['action_history_input'] = action_seq #np: max_his_len*2
# action type/arg labels
item['actype_label'] = instance['actype_output'] #int
item['arg_label'] = instance['arg_output'] #int
if 'navi' in self.task_type:
item['arg_label'] = -1 # do not predict arguments for non-interactable low actions
# # process the sequence mask
seq_mask = np.zeros((args.max_enc_length,), dtype=int) + 1 # max_enc_length
offset = 1
for l, seq in [(vis_len, vis_cls_col), (lang_len, lang_widx) , (action_len, action_seq)]:
seq_mask[offset: offset+l] = (seq!=0).astype(int)
offset += l
assert offset == args.max_enc_length
item['seq_mask'] = seq_mask #np: max_enc_length
item['arg_pos'] = seq_mask.nonzero()[0][-1]
item['type_pos'] = item['arg_pos'] - 1
if 'navi' in self.task_type:
if self.args.auxiliary_loss_navi:
item['visible_label'] = instance['visible']
item['reached_label'] = instance['reached']
item['progress_label'] = instance['progress']
if self.args.enable_feat_posture:
item['rotation'] =int(instance['rotation'])
item['horizon'] = int(instance['horizon'])%12
return item
class AlfredDataset(object):
def __init__(self, args):
self.args = args
self.raw_path = args.raw_data
self.pp_path = args.pp_data # preprocessed data saving path
self.image_size = args.image_size
with open(self.args.splits) as f:
self.dataset_splits = json.load(f)
pprint.pprint({k: len(v) for k, v in self.dataset_splits.items()})
if not os.path.isdir(self.pp_path):
os.makedirs(self.pp_path)
# load/construct vocabularies
self.prepare_vocab()
# preprocess data
if args.preprocess:
if not args.skip_detection:
# load trajectory images recorded in the full dataset
self.image_hdf_path = args.img_data
self.image_data = h5py.File(self.image_hdf_path, 'r')
# self.image_data.visit(lambda x: print(x))
self.mrcnn = MaskRCNNDetector(args, ['all', 'sep'])
self.batch_size = 10
self.init_statistics()
self.preprocess_data()
self.prepare_data_instances()
self.save_statistics()
def init_statistics(self):
self.stats = {
'goal length': Counter(),
'instr length': Counter(),
'high action steps': Counter(),
'low action steps': Counter(),
'detection num': Counter(),
'object num': Counter(),
'receptacle num': Counter(),
}
self.interact_num = 0
self.good_detect_num = {'all':0, 'sep':0}
def save_statistics(self):
for k,v in self.stats.items():
if isinstance(v, dict):
self.stats[k] = dict(sorted(v.items(), key=lambda item: item[0]))
with open(os.path.join(self.pp_path, 'statistics.json'), 'w') as f:
json.dump(self.stats, f, indent=2)
print('interact_num:', int(self.interact_num/2))
print('good_detect_num:', self.good_detect_num)
def prepare_vocab(self):
# vocab save/load paths
self.language_vocab_save = os.path.join(self.pp_path, 'vocab')
self.dec_in_vocab_save = os.path.join(self.pp_path, 'dec_in_vocab')
self.dec_out_vocab_high_save = os.path.join(self.pp_path, 'dec_out_vocab_high')
self.dec_out_vocab_low_save = os.path.join(self.pp_path, 'dec_out_vocab_low')
self.dec_out_vocab_args_save = os.path.join(self.pp_path, 'dec_out_vocab_arg')
preprocess_vocab = not os.path.exists(self.language_vocab_save+'.w2id.json')
# preprocess_vocab= True
if preprocess_vocab:
# natural language vocabulary (word <-> idx for encoder)
self.vocab = Vocab(self.args.vocab_size, special_tokens=[PAD, UNK, SEP, SOS, 'None'])
print('Constructing vocabulary for natural language')
for k, d in self.dataset_splits.items():
if 'test' in k:
continue # should not see test sets even for vocabulary construction
print(' - dataset: {}'.format(k))
for task in tqdm(d):
# load json file
json_path = os.path.join(self.args.raw_data, k, task['task'], 'traj_data.json')
with open(json_path) as f:
traj_raw = json.load(f)
self.process_language(traj_raw, {}, 0, for_vocab_construction=True)
# save vocab in data path
self.vocab.construct(self.language_vocab_save)
print('Constructing vocabularies for encoder/decoder actions and objects')
# decoder input (action/object names <-> idx)
task_tokens = [PAD, UNK, SOS]+list(ACTION_TO_WORDS.keys())
task_tokens += ALL_OBJECTS+['None']
self.dec_in_vocab = Vocab(special_tokens=task_tokens)
self.dec_in_vocab.construct(self.dec_in_vocab_save)
# high-level decoder action output (high actions <-> idx)
self.dec_out_vocab_high = Vocab(special_tokens=HIGH_ACTIONS)
self.dec_out_vocab_high.construct(self.dec_out_vocab_high_save)
# low-level decoder action output (high actions <-> idx)
self.dec_out_vocab_low = Vocab(special_tokens=LOW_ACTIONS)
self.dec_out_vocab_low.construct(self.dec_out_vocab_low_save)
# decoder arguments output (object names <-> idx)
self.dec_out_vocab_arg = Vocab(special_tokens=ACTION_ARGS)
self.dec_out_vocab_arg.construct(self.dec_out_vocab_args_save)
else:
print('Loading vocabularies')
self.vocab = Vocab()
self.vocab.load(self.language_vocab_save, self.args.vocab_size)
self.dec_in_vocab = Vocab()
self.dec_in_vocab.load(self.dec_in_vocab_save)
self.dec_out_vocab_high = Vocab()
self.dec_out_vocab_high.load(self.dec_out_vocab_high_save)
self.dec_out_vocab_low = Vocab()
self.dec_out_vocab_low.load(self.dec_out_vocab_low_save)
self.dec_out_vocab_arg = Vocab()
self.dec_out_vocab_arg.load(self.dec_out_vocab_args_save)
if self.args.use_bert:
self.bert_tknz = get_bert_tknz(self.args)
def preprocess_data(self):
'''
saves preprocessed data as jsons in specified folder
'''
if self.args.num_threads in [0,1]:
for k, d in self.dataset_splits.items():
print('Preprocessing {}'.format(k))
# debugging:
if self.args.fast_epoch:
d = d[:10]
for task in tqdm(d):
self.preprocess_traj(k, task)
else:
task_queue = queue.Queue()
for k, d in self.dataset_splits.items():
if 'tests' in k:
continue
if self.args.fast_epoch:
d = d[:30]
for task in d:
task_queue.put((k, task))
pbar = tqdm(total=task_queue.qsize())
# start threads
threads = []
for n in range(self.args.num_threads):
thread = threading.Thread(target=run, args=(self.preprocess_traj, task_queue, pbar))
threads.append(thread)
thread.start()
for t in threads:
t.join()
def preprocess_traj(self, k, task):
train_mode = 'test' not in k
# load json file
json_path = os.path.join(self.args.raw_data, k, task['task'], 'traj_data.json')
with open(json_path) as f:
traj_raw = json.load(f)
# check if preprocessing storage folder exists
pp_save_path = os.path.join(self.pp_path, k, task['task'])
if not os.path.isdir(pp_save_path):
os.makedirs(pp_save_path)
traj_pp = {}
# root & split
traj_pp['raw_path'] = os.path.join(self.raw_path, k, task['task'])
traj_pp['pp_path'] = pp_save_path
traj_pp['split'] = k
traj_pp['repeat_idx'] = r_idx = task['repeat_idx'] # index of the annotation for each trajectory
# preprocess language
self.process_language(traj_raw, traj_pp, r_idx)
# for train/valid splits only
if train_mode:
self.process_actions(traj_raw, traj_pp)
self.process_images(traj_raw, traj_pp)
# save preprocessed json
pp_json_path = os.path.join(pp_save_path, "ann_%d.json" % r_idx)
with open(pp_json_path, 'w') as f:
json.dump(traj_pp, f, indent=2)
def tokenize(self, lang):
return [SEP] + revtok.tokenize(remove_spaces_and_lower(lang)) + [SEP]
def process_language(self, ex, traj, r_idx, for_vocab_construction=False):
# goal instruction
task_desc = ex['turk_annotations']['anns'][r_idx]['task_desc']
# step-by-step instructions
high_descs = ex['turk_annotations']['anns'][r_idx]['high_descs']
# tokenize language
traj['lang'] = {
'repeat_idx': r_idx,
'goal_tokenize': self.tokenize(task_desc),
'instr_tokenize': [self.tokenize(x) for x in high_descs]
}
if for_vocab_construction:
# add to vocab
for w in traj['lang']['goal_tokenize']:
self.vocab.add_word_counts(w)
for instr in traj['lang']['instr_tokenize']:
for w in instr:
self.vocab.add_word_counts(w)
return
self.stats['goal length'][len(traj['lang']['goal_tokenize'])] += 1
for instr in traj['lang']['instr_tokenize']:
self.stats['instr length'][len(instr)] += 1
# word2idx
traj['lang']['goal'] = self.vocab.seq_encode(traj['lang']['goal_tokenize'])
traj['lang']['instr'] = [self.vocab.seq_encode(x) for x in traj['lang']['instr_tokenize']]
def process_actions(self, ex, traj):
def get_normalized_arg(a, level):
if level == 'high':
arg = a['discrete_action']['args']
if arg == [] or arg == ['']:
return 'None'
else:
arg =arg[-1] #argument for action PutObject is the receptacle (2nd item in the list)
elif level == 'low':
if 'objectId' in a['api_action']:
if a['api_action']['action'] == 'PutObject':
arg = a['api_action']['receptacleObjectId'].split('|')[0]
elif len(a['api_action']['objectId'].split('|')) == 4:
arg = a['api_action']['objectId'].split('|')[0]
else:
arg = a['api_action']['objectId'].split('|')[4].split('_')[0]
else:
return 'None'
if arg in OBJECTS_LOWER_TO_UPPER:
arg = OBJECTS_LOWER_TO_UPPER[arg]
# fix high argument for sliced objects
if level == 'high' and arg in {'Apple', 'Bread', 'Lettuce', 'Potato', 'Tomato'} and \
'objectId' in a['planner_action'] and 'Sliced' in a['planner_action']['objectId']:
arg += 'Sliced'
return arg
def fix_missing_high_pddl_end_action(ex):
'''
appends a terminal action to a sequence of high-level actions
'''
if ex['plan']['high_pddl'][-1]['planner_action']['action'] != 'End':
ex['plan']['high_pddl'].append({
'discrete_action': {'action': 'NoOp', 'args': []},
'planner_action': {'value': 1, 'action': 'End'},
'high_idx': len(ex['plan']['high_pddl'])
})
# deal with missing end high-level action
fix_missing_high_pddl_end_action(ex)
# process high-level actions
picked = None
actions, args = [SOS], ['None']
for idx, a in enumerate(ex['plan']['high_pddl']):
high_action = a['discrete_action']['action']
high_arg = get_normalized_arg(a, 'high')
# change destinations into ones can be inferred
# e.g. For task "clean a knife" turn GotoLocation(SideTable) to GotoLocation(Knife)
if high_action == 'GotoLocation' and idx+1 < len(ex['plan']['high_pddl']):
next_a = ex['plan']['high_pddl'][idx+1]
if next_a['discrete_action']['action'] == 'PickupObject':
next_high_arg = get_normalized_arg(next_a, 'high')
high_arg = next_high_arg
# fix argument of sliced object for Clean, Cool and Heat
if high_action == 'PickupObject':
picked = high_arg
if high_action == 'PutObject':
picked = None
if picked is not None and 'Sliced' in picked and picked[:-6] == high_arg:
high_arg = picked
actions.append(high_action)
args.append(high_arg)
self.stats['high action steps'][len(actions)] += 1
# high actions to action decoder input ids (including all task special tokens)
traj['high'] = {}
traj['high']['dec_in_high_actions'] = self.dec_in_vocab.seq_encode(actions)
traj['high']['dec_in_high_args'] = self.dec_in_vocab.seq_encode(args)
# high actions to high action decoder output ids
traj['high']['dec_out_high_actions'] = self.dec_out_vocab_high.seq_encode(actions)[1:]
traj['high']['dec_out_high_args'] = self.dec_out_vocab_arg.seq_encode(args)[1:]
# process low-level actions
num_hl_actions = len(ex['plan']['high_pddl'])
# temporally aligned with HL actions
traj['low'] = {}
traj['low']['dec_in_low_actions'] = [list() for _ in range(num_hl_actions)]
traj['low']['dec_in_low_args'] = [list() for _ in range(num_hl_actions)]
traj['low']['dec_out_low_actions'] = [list() for _ in range(num_hl_actions)]
traj['low']['dec_out_low_args'] = [list() for _ in range(num_hl_actions)]
traj['low']['bbox'] = [list() for _ in range(num_hl_actions)]
traj['low']['centroid'] = [list() for _ in range(num_hl_actions)]
traj['low']['mask'] = [list() for _ in range(num_hl_actions)]
traj['low']['interact'] = [list() for _ in range(num_hl_actions)]
low_actions = [list() for _ in range(num_hl_actions)]
low_args = [list() for _ in range(num_hl_actions)]
prev_high_idx = -1
for idx, a in enumerate(ex['plan']['low_actions']):
# high-level action index (subgoals)
high_idx = a['high_idx']
if high_idx != prev_high_idx:
# add NoOp to indicate the terimination of low-level action prediction
low_actions[prev_high_idx].append('NoOp')
low_args[prev_high_idx].append('None')
# add the high-level action name as the first input of low-level action decoding
high_action = ex['plan']['high_pddl'][high_idx]
high_arg = get_normalized_arg(high_action, 'high')
low_actions[high_idx].append(high_action['discrete_action']['action'])
low_args[high_idx].append(high_arg)
prev_high_idx = high_idx
low_arg = get_normalized_arg(a, 'low')
low_action = a['discrete_action']['action']
if '_' in low_action:
low_action = low_action.split('_')[0]
low_actions[high_idx].append(low_action)
low_args[high_idx].append(low_arg)
# low-level bounding box (not used in the model)
if 'bbox' in a['discrete_action']['args']:
traj['low']['bbox'][high_idx].append(a['discrete_action']['args']['bbox'])
xmin, ymin, xmax, ymax = [float(x) if x != 'NULL' else -1 for x in a['discrete_action']['args']['bbox']]
traj['low']['centroid'][high_idx].append([
(xmin + (xmax - xmin) / 2) / self.image_size,
(ymin + (ymax - ymin) / 2) / self.image_size,
])
else:
traj['low']['bbox'][high_idx].append([])
traj['low']['centroid'][high_idx].append([])
# low-level interaction mask (Note: this mask needs to be decompressed)
mask = a['discrete_action']['args']['mask'] if 'mask' in a['discrete_action']['args'] else None
traj['low']['mask'][high_idx].append(mask)
# interaction validity
has_interact = 0 if low_action in NON_INTERACT_ACTIONS else 1
traj['low']['interact'][high_idx].append(has_interact)
# add termination indicator for the last low-level action sequence
low_actions[high_idx].append('NoOp')
low_args[high_idx].append('None')
for high_idx in range(num_hl_actions):
actions, args = low_actions[high_idx], low_args[high_idx]
traj['low']['dec_in_low_actions'][high_idx] = self.dec_in_vocab.seq_encode(actions)
traj['low']['dec_in_low_args'][high_idx] = self.dec_in_vocab.seq_encode(args)
traj['low']['dec_out_low_actions'][high_idx] = self.dec_out_vocab_low.seq_encode(actions)[1:]
traj['low']['dec_out_low_args'][high_idx] = self.dec_out_vocab_arg.seq_encode(args)[1:]
self.stats['low action steps'][len(actions)] += 1
# check alignment between step-by-step language and action sequence segments
action_low_seg_len = num_hl_actions
lang_instr_seg_len = len(traj['lang']['instr'])
seg_len_diff = action_low_seg_len - lang_instr_seg_len
if seg_len_diff != 1:
assert (seg_len_diff == 2) # sometimes the alignment is off by one ¯\_(ツ)_/¯
# print('Non align data file:', traj['raw_path'])
# Because 1) this bug only in a few trajs 2) merge is very troublesome
# we simply duplicate the last language instruction to align
traj['lang']['instr_tokenize'].append(traj['lang']['instr_tokenize'][-1])
traj['lang']['instr'].append(traj['lang']['instr'][-1])
def process_images(self, ex, traj):
def name2id(img_name):
return int(img_name.split('.')[0])
num_hl_actions = len(ex['plan']['high_pddl'])
traj['high']['images'] = []
traj['low']['images'] = [list() for _ in range(num_hl_actions)]
prev_high_idx, prev_low_idx = -1, -1
for img in ex['images']:
high_idx, low_idx = img['high_idx'], img['low_idx']
if high_idx != prev_high_idx:
# reach a new high action, use the current image as the visual observation
# 1) to predict the current high action
# 2) to predict the termination low action of the previous high action
traj['high']['images'].append(name2id(img['image_name']))
if prev_high_idx >= 0:
traj['low']['images'][prev_high_idx].append(name2id(img['image_name']))
prev_high_idx = high_idx
if low_idx != prev_low_idx:
# reach a new low action, use the current image as the visual observation
# to predict the current low action
traj['low']['images'][high_idx].append(name2id(img['image_name']))
prev_low_idx = low_idx
# add the last frame for predicting termination action NoOp
traj['high']['images'].append(name2id(img['image_name']))
traj['low']['images'][high_idx].append(name2id(img['image_name']))
# length check
assert(len(traj['high']['images']) == len(traj['high']['dec_in_high_actions']) - 1)
for hi in range(num_hl_actions):
assert(len(traj['low']['images'][hi]) == len(traj['low']['dec_in_low_actions'][hi]) - 1)
# use mask rcnn for object detection
if traj['repeat_idx'] != 0 or self.args.skip_detection:
return # for different annotations only need do object detection once
all_imgs = [i for i in traj['high']['images']]
for img_list in traj['low']['images']:
all_imgs += img_list
all_imgs = sorted(list(set(all_imgs)))
dp = traj['raw_path'].replace('data/', '')
results_masks, results_others = {}, {}
for model_type in ['all', 'sep']:
for idx in range(0, len(all_imgs), self.batch_size):
batch = all_imgs[idx: idx + self.batch_size]
img_path_batch = [self.image_data[dp][b] for b in batch]
lock = threading.Lock()
if model_type == 'all':
masks, boxes, classes, scores = self.mrcnn.get_mrcnn_preds_all(img_path_batch)
else:
masks, boxes, classes, scores = self.mrcnn.get_mrcnn_preds_sep(img_path_batch)
lock.release()
# results_masks += masks
for i, img_name in enumerate(batch):
results_others[img_name] = {
'bbox': [[int(coord) for coord in box] for box in boxes[i]],
'score': [float(s) for s in scores[i]],
'class': classes[i],
'label': None,
}
results_masks[img_name] = [np.packbits(m) for m in masks[i]]
self.stats['detection num'][len(classes[i])] += 1
self.stats['object num'][len([j for j in classes[i] if j in OBJECTS_DETECTOR])] += 1
self.stats['receptacle num'][len([j for j in classes[i] if j in STATIC_RECEPTACLES])] += 1
# get object grounding labels
for hidx, bbox_seq in enumerate(traj['low']['bbox']):
for lidx, gt in enumerate(bbox_seq):
if gt:
self.interact_num += 1
img_idx = traj['low']['images'][hidx][lidx]
preds = results_others[img_idx]['bbox']
if not preds:
continue
max_iou = -1
for obj_idx, pred in enumerate(preds):
iou = bb_IoU(pred, gt)
if iou > max_iou:
max_iou = iou
best_obj_id, best_pred = obj_idx, pred
true_cls = ACTION_ARGS[traj['low']['dec_out_low_args'][hidx][lidx]]
try:
pred_cls = results_others[img_idx]['class'][best_obj_id]
except:
print('-'*10)
print('traj:', traj['raw_path'], 'img:', img_idx)
if max_iou > 0.7 or true_cls in pred_cls or pred_cls in true_cls:
results_others[img_idx]['label'] = best_obj_id
self.good_detect_num[model_type] += 1
# else:
# print('-'*30)
# print('traj:', traj['raw_path'], 'img:', img_idx)
# print('iou: %.3f'%max_iou)
# print('true class:', true_cls)
# print('pred class:', pred_cls)
# print('true:', gt)
# print('pred:', best_pred)
# save object detection results
pp_save_path = traj['pp_path']
pk_save_path = os.path.join(pp_save_path, "masks_%s.pkl"%model_type)
json_save_path = os.path.join(pp_save_path, "bbox_cls_scores_%s.json"%model_type)
with open(pk_save_path, 'wb') as f:
pickle.dump(results_masks, f)
with open(json_save_path, 'w') as f:
json.dump(results_others, f, indent=4)
def prepare_data_instances(self):
statistic = {'horizon': Counter() ,'rotation': Counter(), 'mani': Counter(), 'navi': Counter()}
mani_subgoal = {}
for split, tasks in self.dataset_splits.items():
if 'test' in split:
continue
print('Preparing %s data instances'%split)
high_instances = []
low_instances_mani = []
low_instances_navi = []
low_mani_seed = []
low_navi_seed = []
det_res_all = {}
det_res_sep = {}
cc=0
for task in tqdm(tasks):
# cc+=1
# if cc == 30:
# break
task_path = os.path.join(self.pp_path, split, task['task'])
traj_path = os.path.join(task_path, 'ann_%d.json'%task['repeat_idx'])
with open(traj_path, 'r') as f:
traj = json.load(f)
with open(os.path.join(traj['raw_path'], 'traj_data.json'), 'r') as f:
traj_raw = json.load(f)
init_action = traj_raw['scene']['init_action']
obj_det_path_all = os.path.join(task_path, 'bbox_cls_scores_all.json')
obj_det_path_sep = os.path.join(task_path, 'bbox_cls_scores_sep.json')
with open(obj_det_path_all, 'r') as f:
obj_det_all = json.load(f)
with open(obj_det_path_sep, 'r') as f:
obj_det_sep = json.load(f)
for img_idx in obj_det_all:
det_res_all[task_path+img_idx] = obj_det_all[img_idx]
det_res_sep[task_path+img_idx] = obj_det_sep[img_idx]
# process the vision input
num_high_without_NoOp = len(traj['lang']['instr'])
for hidx in range(num_high_without_NoOp + 1):
lang_input = traj['lang']['goal'] # list of int
vision_input = traj['high']['images'][hidx]
actype_history_input = traj['high']['dec_in_high_actions'][:hidx+1]
arg_history_input = traj['high']['dec_in_high_args'][:hidx+1]
actype_output = traj['high']['dec_out_high_actions'][hidx]
arg_output = traj['high']['dec_out_high_args'][hidx]
instance = {
'path': task_path,
'high_idx': hidx,
'lang_input': lang_input,
'vision_input': vision_input,
'actype_history_input': actype_history_input,
'arg_history_input': arg_history_input,
'actype_output': actype_output,
'arg_output': arg_output,
}
high_instances.append(instance)
horizon = init_action['horizon']
rotation = init_action['rotation']
for hidx in range(num_high_without_NoOp):
# check the category of subgoal
sg_type = self.dec_in_vocab.id2w(traj['low']['dec_in_low_actions'][hidx][0])
sg_arg = self.dec_in_vocab.id2w(traj['low']['dec_in_low_args'][hidx][0])
subgoal = '%s(%s)'%(sg_type, sg_arg)
low_action_seq = ' '.join(self.dec_in_vocab.seq_decode(traj['low']['dec_in_low_actions'][hidx][1:]))
add_to_mani = split == 'train' and sg_type != 'GotoLocation' and subgoal not in statistic['mani']
add_to_navi = split == 'train' and sg_type == 'GotoLocation' and subgoal not in statistic['navi']
if add_to_mani:
if sg_type not in mani_subgoal:
mani_subgoal[sg_type] = Counter()
mani_subgoal[sg_type][low_action_seq] += 1
if mani_subgoal[sg_type][low_action_seq] == 1:
mani_subgoal[sg_type][low_action_seq+' (objs)'] = [sg_arg]
elif sg_arg not in mani_subgoal[sg_type][low_action_seq+' (objs)']:
mani_subgoal[sg_type][low_action_seq+' (objs)'].append(sg_arg)
lang_input = traj['lang']['instr'][hidx]
num_low_steps = len(traj['low']['dec_out_low_actions'][hidx])
vis_history = []
for low_idx in range(num_low_steps):
vision_input = traj['low']['images'][hidx][low_idx]
actype_history_input = traj['low']['dec_in_low_actions'][hidx][:low_idx+1]
arg_history_input = traj['low']['dec_in_low_args'][hidx][:low_idx+1]
actype_output = traj['low']['dec_out_low_actions'][hidx][low_idx]
arg_output = traj['low']['dec_out_low_args'][hidx][low_idx]
try:
interact = traj['low']['interact'][hidx][low_idx]
except:
interact = 0
if actype_history_input[0] == 3: # gotolocation
target_obj = self.dec_in_vocab.id2w(arg_history_input[0])
detected_objs = obj_det_sep[str(vision_input)]['class']
visible = 0 if target_obj not in detected_objs else 1
reached = 0
if low_idx == (num_low_steps - 1):
reached = 1
if low_idx == (num_low_steps - 2):
action = self.dec_out_vocab_low.id2w(traj['low']['dec_out_low_actions'][hidx][low_idx])
if action in {'LookDown', 'LookUp'} and visible:
reached = 1
progress = (low_idx+1)/num_low_steps
else:
visible, reached, progress = -1, -1, -1
instance = {
'path': task_path,
'high_idx': hidx,
'low_idx': low_idx,
'interact': interact,
'lang_input': lang_input,
'vision_input': vision_input,
'actype_history_input': actype_history_input,
'arg_history_input': arg_history_input,
'vis_history_input': copy.deepcopy(vis_history),
'actype_output': actype_output,
'arg_output': arg_output,
'visible': visible,
'reached': reached,
'progress': progress,
'rotation': (rotation%360)/90,
'horizon': horizon/15,
}
statistic['horizon'][horizon] += 1
statistic['rotation'][rotation] += 1
if self.dec_out_vocab_low.id2w(actype_output) == 'RotateRight':
rotation += 90
elif self.dec_out_vocab_low.id2w(actype_output) == 'RotateLeft':
rotation -= 90
elif self.dec_out_vocab_low.id2w(actype_output) == 'LookUp':
horizon += 15
elif self.dec_out_vocab_low.id2w(actype_output) == 'LookDown':
horizon -= 15
vis_history.append(vision_input)
if actype_history_input[0] == 3: # gotolocation
low_instances_navi.append(instance)
else:
low_instances_mani.append(instance)
if add_to_mani:
low_mani_seed.append(instance)
if add_to_navi:
low_navi_seed.append(instance)
statistic['mani'][subgoal] += 1
statistic['navi'][subgoal] += 1
print('high len:', len(high_instances))
print('low mani len:', len(low_instances_mani))
print('low navi len:', len(low_instances_navi))
statistic['%s high len'%split] = len(high_instances)
statistic['%s low-mani len'%split] = len(low_instances_mani)
statistic['%s low-navi len'%split] = len(low_instances_navi)
if split == 'train':
statistic['train low-navi seed len'] = len(low_navi_seed)
statistic['train low-mani seed len'] = len(low_mani_seed)
high_save_path = os.path.join(self.pp_path, '%s_high_action_instances.json'%split)
with open(high_save_path, 'w') as f:
json.dump(high_instances, f, indent=2)
low_save_path = os.path.join(self.pp_path, '%s_low_action_instances_mani.json'%split)
with open(low_save_path, 'w') as f:
json.dump(low_instances_mani, f, indent=2)
with open(low_save_path.replace('mani', 'navi'), 'w') as f:
json.dump(low_instances_navi, f, indent=2)
if split == 'train':
low_save_path = os.path.join(self.pp_path, '%s_low_action_seed_mani.json'%split)
with open(low_save_path, 'w') as f:
json.dump(low_mani_seed, f, indent=2)
with open(low_save_path.replace('mani', 'navi'), 'w') as f:
json.dump(low_navi_seed, f, indent=2)
with open(os.path.join(self.pp_path, 'mani_subgoals.json'), 'w') as f:
json.dump(mani_subgoal, f, indent=2)
det_all_save_path = os.path.join(self.pp_path, '%s_det_res_all.json'%split)
with open(det_all_save_path, 'w') as f:
json.dump(det_res_all, f, indent=2)
det_sep_save_path = os.path.join(self.pp_path, '%s_det_res_sep.json'%split)
with open(det_sep_save_path, 'w') as f:
json.dump(det_res_sep, f, indent=2)
for k,v in statistic.items():
if isinstance(v, dict):
statistic[k] = dict(sorted(v.items(), key=lambda item: item[0]))
with open(os.path.join(self.pp_path, 'data_statistics.json'), 'w') as f:
json.dump(statistic, f, indent=2)
def run(func, task_queue, pbar):
while task_queue.qsize() > 0:
lock.acquire()
try:
k, task = task_queue.get(False)
except:
lock.release()
return
lock.release()
func(k,task)
pbar.update(1)
if __name__ == '__main__':
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
# os.system("taskset -p 0xffffffff %d" % os.getpid())
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
Config(parser)
parser.add_argument('--skip_detection', action='store_true')
args = parser.parse_args()
pprint.pprint(args)
dataset = AlfredDataset(args)
# dataset.prepare_data_instances()
```
#### File: models/config/configs.py
```python
import logging, os, time
def Config(parser):
# general settings
parser.add_argument('--seed', default=123, help='random seed', type=int)
parser.add_argument('--raw_data', default='data/full_2.1.0', help='dataset folder')
parser.add_argument('--pp_data', default='data/full_2.1.0_pp', help='folder name for preprocessed data')
parser.add_argument('--img_data', default='data/full_2.1.0_imgs.hdf5', help='path of the hdf5 data file storing the images of full_2.1.0')
parser.add_argument('--splits', default='data/splits/oct21.json', help='json file containing train/dev/test splits')
parser.add_argument('--all_detector_path', default='models/detector/maskrcnn_all.pth',
help='path of loading pretrained mask rcnn model for all 105 objects')
parser.add_argument('--obj_detector_path', default='models/detector/mrcnn_object.pth',
help='path of loading pretrained mask rcnn model for all 73 movable objects')
parser.add_argument('--rec_detector_path', default='models/detector/mrcnn_receptacle.pth',
help='path of loading pretrained mask rcnn model for all 32 static receptacles')
parser.add_argument('--preprocess', action='store_true', help='store preprocessed data to json files')
parser.add_argument('--exp_temp', default='exp', help='temp experimental sub directory for saving models and logs')
parser.add_argument('--name_temp', default='exp', help='temp experimental name for saving models and logs')
parser.add_argument('--use_templated_goals', help='use templated goals instead of human-annotated goal descriptions', action='store_true')
parser.add_argument('--image_size', default=300, type=int, help='image pixel size (assuming square shape eg: 300x300)')
parser.add_argument('--vocab_size', default=1500, type=int, help='vocabulary size')
# model settings
parser.add_argument('--max_enc_length', default=160, type=int, help='maximum length of encoder input')
parser.add_argument('--use_bert', action='store_true', help='use a pretrained bert model as the encoder')
parser.add_argument('--bert_model', default='bert', choices=['bert', 'albert', 'mobilebert', 'roberta'], help='which pretrained bert to use')
parser.add_argument('--bert_lr_schedule', action='store_true', help='use a warmup-linear-decay lr scheduler for bert')
parser.add_argument('--enc_layer_num', default=4, type=int)
parser.add_argument('--head_num', default=4, type=int)
parser.add_argument('--hidden_dim', default=512, type=int)
parser.add_argument('--inner_dim', default=2048, type=int)
parser.add_argument('--dropout', default=0.0, type=float, help='dropout rate for self-attention block hidden states')
parser.add_argument('--drop_input', default=0.0, type=float, help='dropout rate for input feats')
parser.add_argument('--topk_objs', default=30, type=int, help='use top k object proposals detected by the object detector module')
parser.add_argument('--confidence', default=0.4, help='object proposals lower than the confidence score wil be pruned', type=float)
parser.add_argument('--detector_type', default='sep', type=str, choices=['all', 'sep'],
help='use a single mrcnn model to detect all the objects or two mrcnn models to detect movable objects and static receptacles separately')
parser.add_argument('--lang_max_length', default=64, type=int, help='cutoff to language input to lang_max_length')
parser.add_argument('--history_max_length', default=32, type=int, help='only keep the history_max_length most recent actions')
parser.add_argument('--emb_dim', default=300, type=int, help='word embedding size')
parser.add_argument('--emb_init', default='xavier', help='word embedding initialization weights')
parser.add_argument('--emb_freeze', action='store_true', help='freeze word embedding')
parser.add_argument('--pred_head_pos', default='cls', type=str, choices=['cls', 'sep'],
help='To use the first [CLS] output to make all predictions [cls] or use the final positions to of actions [sep]')
# training settings
parser.add_argument('--train_level', default='mix', type=str, choices=['mix', 'low', 'high'],
help='train the model on low-level data only, high-level data only or mixed or both')
parser.add_argument('--train_proportion', default=100, type=int, help='percentage of training data to use')
parser.add_argument('--train_one_shot', action='store_true', help='use one-shot seed data to train')
parser.add_argument('--valid_metric', default='type', type=str, choices=['type','arg', 'mask'],
help='validation metric to select the best model')
parser.add_argument('--low_data', default='all', type=str, choices=['all', 'mani', 'navi'],
help='train the model on low-level data only, high-level data only or mixed or both')
parser.add_argument('--resume', help='load a checkpoint')
parser.add_argument('--batch', help='batch size', default=512, type=int)
parser.add_argument('--epoch', help='number of epochs', default=50, type=int)
parser.add_argument('--early_stop', help='validation check fail time before early stop training', default=5, type=int)
parser.add_argument('--optimizer', default='adam', type=str, choices=['adam'], help='optimizer type')
parser.add_argument('--weigh_loss', action='store_true',
help='weigh each loss term based on its uncertainty. Credit to Kendall et al CVPR18 paper')
parser.add_argument('--focal_loss', help='use focal loss', action='store_true')
parser.add_argument('--focal_gamma', default=2, type=float, help='gamma in focal loss')
parser.add_argument('--beta2', default=0.999, type=float, help='beta2 for adam optimizer')
parser.add_argument('--lr', default=1e-4, type=float, help='optimizer learning rate')
parser.add_argument('--lr_scheduler', default='step', type=str, choices=['step', 'noam'], help='lr_scheduler type')
parser.add_argument('--step_decay_epoch', default=2, type=int, help='num epoch to step decay learning rate')
parser.add_argument('--step_decay_factor', default=0.5, type=float, help='decay factor of learning rate each step')
parser.add_argument('--noam_lr_factor', default=0.1, type=float,
help='optimizer learning rate factor for noam')
parser.add_argument('--noam_warmup_iter', default=2000, type=int,
help='warmup iteration number for step/noam')
parser.add_argument('--auxiliary_loss_navi', help='additional navigation loss', action='store_true')
parser.add_argument('--random_skip', help='random skip some data of each epoch', action='store_true')
parser.add_argument('--disable_feat_lang', help='do not use language features as input', action='store_true')
parser.add_argument('--disable_feat_vis', help='do not use visual features as input', action='store_true')
parser.add_argument('--disable_feat_action_his', help='do not use action history features as input', action='store_true')
parser.add_argument('--enable_feat_vis_his', help='use additional history visual features as input', action='store_true')
parser.add_argument('--enable_feat_posture', help='use additional agent posture features as input', action='store_true')
parser.add_argument('--num_threads', default=0, type=int, help='enable multi-threading parallelism for data preprocessing if num_thread >0')
parser.add_argument('--gpu', help='use gpu', action='store_true')
parser.add_argument('--fast_epoch', action='store_true', help='fast epoch during debugging')
parser.add_argument('--debug', dest='debug', action='store_true')
# model_structure_parameters = [
# 'enc_layer_num', 'head_num', 'hidden_dim', 'emb_dim', 'train_level', 'low_data', 'inner_dim',
# ]
# return model_structure_parameters
```
#### File: models/model/mmt.py
```python
import os, sys, collections, time, logging
import numpy as np
import revtok # tokenizer
import torch
from torch import nn
from torch.nn import functional as F
sys.path.append(os.path.join(os.environ['ALFRED_ROOT']))
from models.nn.nn_utils import drop_input
from models.nn.transformer import Encoder
from models.nn.embedding import WordEmbedding, SpecialEmbedding
from models.utils.bert_utils import get_bert_model, mmt_word_ids_to_bert_ids
from gen.constants import *
class MultiModalTransformer(nn.Module):
def __init__(self, args, dataset):
super().__init__()
self.args = args
self.dataset = dataset
self.vocab = dataset.vocab
self.bert_tknz = dataset.bert_tknz if args.use_bert else None
self.action_vocab = dataset.dec_in_vocab
self.dec_vocabs = {
'type_high': self.dataset.dec_out_vocab_high,
'type_low': self.dataset.dec_out_vocab_low,
'arg': self.dataset.dec_out_vocab_arg,
}
self.can_navi = args.train_level !='high' and args.low_data != 'mani'
self.device = torch.device('cuda') if args.gpu else torch.device('cpu')
self.seq_len = self.args.max_enc_length
self.emb_dim = args.emb_dim
self.image_size = self.args.image_size
self.vis_len = args.topk_objs + 1
self.lang_len = args.lang_max_length
self.his_len = args.history_max_length * 2
assert self.seq_len == (1+self.vis_len+self.lang_len+self.his_len)
# 0: pad, 1: vision, 2: language, 3: action history, 4: cls (optional, may be used
# to obtain a state representation)
self.modal_indicator = self.totensor([4]+[1]*self.vis_len+[2]*self.lang_len+[3]*self.his_len)
# self.print_parameters()
self.high_actions = self.totensor(self.action_vocab.seq_encode(HIGH_ACTIONS))
self.low_actions = self.totensor(self.action_vocab.seq_encode(LOW_ACTIONS))
self.objs = self.totensor(self.action_vocab.seq_encode(ACTION_ARGS))
self.position_indexes = self.totensor(list(range(args.max_enc_length)))
self.zero = self.totensor(0)
self._construct_model()
self.print_parameters()
def totensor(self, x):
return torch.tensor(x).to(device=self.device)
def print_parameters(self):
amount = 0
for p in self.parameters():
amount += np.prod(p.size())
print("total number of parameters: %.2fM" % (amount/1e6))
parameters = filter(lambda p: p.requires_grad, self.parameters())
amount = 0
for p in parameters:
amount += np.prod(p.size())
print("number of trainable parameters: %.2fM" % (amount/1e6))
def _construct_model(self):
args = self.args
# input
if not self.args.use_bert:
# trainable positional embeddings
self.positional_emb = nn.Embedding(self.seq_len, args.hidden_dim)
# 0: pad, 1: vision, 2: language, 3: action history, 4: [CLS]
self.modality_emb = nn.Embedding(5, args.hidden_dim)
# the transformer encoder
self.encoder = Encoder(n_layers=args.enc_layer_num, n_head=args.head_num,
d_model=args.hidden_dim, d_inner=args.inner_dim, dropout=args.dropout)
# word embedding for natural language
self.word_emb = WordEmbedding(args.emb_dim, self.vocab, trainable=not args.emb_freeze,
init_method=args.emb_init, pretrain_path='data/glove840B300d_extracted.pkl')
# token embedding for action types and arguments (objects)
self.action_emb = SpecialEmbedding(args, self.word_emb,
self.vocab, self.action_vocab)
# self.action_emb = nn.Embedding(len(self.action_vocab), args.emb_dim)
# features -> transformer encoder input
self.vision_to_enc_input = nn.Sequential(
nn.Linear(args.emb_dim + 7, args.hidden_dim),
nn.LayerNorm(args.hidden_dim, eps=1e-6)
)
self.lang_to_enc_input = nn.Sequential(
self.word_emb,
nn.Linear(args.emb_dim, args.hidden_dim),
nn.LayerNorm(args.hidden_dim, eps=1e-6)
)
self.action_to_enc_input = nn.Sequential(
self.action_emb,
nn.Linear(args.emb_dim, args.hidden_dim),
nn.LayerNorm(args.hidden_dim, eps=1e-6)
)
# historical visual features as additional input
if self.args.enable_feat_vis_his:
self.vis_his_to_enc_input = nn.Sequential(
nn.Linear(args.emb_dim + 7, args.hidden_dim),
nn.LayerNorm(args.hidden_dim, eps=1e-6)
)
# agent posture features as additional input
if self.args.enable_feat_posture:
self.rotation_emb = nn.Embedding(4, args.hidden_dim)
self.horizon_emb = nn.Embedding(12, args.hidden_dim)
self.pos_ln = nn.LayerNorm(args.hidden_dim, eps=1e-6)
else:
self.encoder = get_bert_model(args)
self.word_emb = self.encoder.embeddings.word_embeddings
scale =torch.std(self.word_emb.weight[:1000]).item()
self.action_emb = SpecialEmbedding(args, self.word_emb,
self.vocab, self.action_vocab, self.bert_tknz)
self.vis_feat_to_emb = nn.Sequential(
nn.Linear(7, args.emb_dim),
# nn.LayerNorm(args.emb_dim, eps=1e-6)
)
nn.init.normal_(self.vis_feat_to_emb[0].weight, std=scale)
nn.init.constant_(self.vis_feat_to_emb[0].bias, 0)
self.arg_head = nn.Linear(args.hidden_dim, len(ACTION_ARGS))
self.high_head = nn.Linear(args.hidden_dim, len(HIGH_ACTIONS))
self.low_head = nn.Linear(args.hidden_dim, len(LOW_ACTIONS))
if self.args.enable_feat_vis_his:
self.vis_his_to_enc_input = nn.Linear(args.emb_dim + 7, args.emb_dim)
if self.args.enable_feat_posture:
self.rotation_emb = nn.Embedding(4, args.hidden_dim)
self.horizon_emb = nn.Embedding(12, args.hidden_dim)
self.pos_ln = nn.LayerNorm(args.hidden_dim, eps=1e-6)
nn.init.normal_(self.rotation_emb.weight, std=scale)
nn.init.normal_(self.horizon_emb.weight, std=scale)
# output
# transformatinos before going into the classification heads
if args.hidden_dim != args.emb_dim:
self.fc1 = nn.Linear(args.hidden_dim, args.emb_dim)
self.fc2 = nn.Linear(args.hidden_dim, args.emb_dim)
self.fc3 = nn.Linear(args.hidden_dim, args.emb_dim)
else:
self.fc1 = self.fc2 = self.fc3 = nn.Identity()
# navigation status monitoring
if self.can_navi and self.args.auxiliary_loss_navi:
self.visible_monitor = nn.Linear(args.hidden_dim, 1)
self.reached_monitor = nn.Linear(args.hidden_dim, 1)
self.progress_monitor = nn.Linear(args.hidden_dim, 1)
self.dropout = nn.Dropout(args.dropout)
# self.softmax = nn.Softmax(dim=1)
def forward(self, batch, is_optimizing=True):
task_type = batch['batch_type'][0]
inputs, enc_masks, labels = self.process_batch(batch)
if not self.args.use_bert:
enc_output, attns_list = self.encoder(inputs, enc_masks)
# enc_output: batch x seq_len x hid
else:
outputs = self.encoder(inputs_embeds=inputs, attention_mask =enc_masks, output_attentions=True)
enc_output = outputs.last_hidden_state
attns_list = outputs.attentions
type_logits, arg_logits, mask_logits, navi_logits = self.outputs_to_logits(enc_output,
attns_list, labels['type_pos'], labels['arg_pos'], task_type) #_bert
if not is_optimizing:
type_preds, arg_preds, mask_preds, navi_preds = self.pred(type_logits, arg_logits, mask_logits, navi_logits)
return type_preds, arg_preds, mask_preds, navi_preds, labels
type_loss, arg_loss, mask_loss, navi_loss = self.get_loss(type_logits, arg_logits, mask_logits, navi_logits,
labels, task_type)
return type_loss, arg_loss, mask_loss, navi_loss
def process_batch(self, batch):
"""convert batch data to tensor matrix
Args:
batch: list of from items from AlfredPyTorchDataset
Returns:
inputs: encoder input matrix (tensor of size [batch x seq_len x hid])
enc_masks: input sequence masks (tensor of size [batch x seq_len])
labels: dict storing
type: action type labels (tensor of size [batch])
arg: action argument labels (tensor of size [batch])
mask: interactive mask selection labels (list of tensor [1] or None)
interact: whether mask prediction is required (list of True/False)
type_pos: list of positions to perform argument prediction (+1 for type positions)
arg_pos: list of positions to perform argument prediction
vis_len: list of length of visual inputs (i.e. object number +1)
"""
for k,v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(device = self.device, non_blocking=True)
lang_feats = batch['lang_input']
vis_cls_col = batch['vision_cls']
if self.args.drop_input != 0:
lang_feats =drop_input(lang_feats, self.args.drop_input, self.training)
vis_cls_col = drop_input(vis_cls_col, self.args.drop_input, self.training)
his_feats = batch['action_history_input']
vis_feat1 = self.action_emb(vis_cls_col)
if not self.args.use_bert:
vis_feat2 = batch['vision_feats']
vis_feats = torch.cat([vis_feat1, vis_feat2], dim=2)
else:
vis_feat2 = self.vis_feat_to_emb(batch['vision_feats'])
vis_feats = vis_feat1 + vis_feat2
vis_his_feats = None
if 'his_vis_cls' in batch:
vis_his_feat1 = self.action_emb(batch['his_vis_cls']) # batch x vis_his_len x obj_num x emb_dim
vis_his_feat2 = batch['his_vis_feat'] # batch x vis_his_len x obj_num x 7
vis_his_feats = torch.cat([vis_his_feat1, vis_his_feat2], dim=3).sum(dim=2) # batch x vis_his_len x (emb_dim+7)
pos_feats = {'rotation': batch['rotation'], 'horizon': batch['horizon']} if 'rotation' in batch else None
bs = len(lang_feats)
feat2input = self.feats_to_inputs if not self.args.use_bert else self.feats_to_inputs_bert
inputs = feat2input(lang_feats, vis_feats, his_feats, vis_his_feats, pos_feats)
enc_masks = batch['seq_mask'] # batch x seq_len
# cls_ = torch.ones((bs,1)).to(device=self.device, dtype=torch.long)
# enc_masks = torch.cat([cls_, vis_cls_col, lang_feats, his_feats], dim=1)
# enc_masks = (enc_masks != 0)
# assert (enc_masks==batch['seq_mask']).sum() == enc_masks.shape[0]*enc_masks.shape[1]
# print('vision:')
# print(enc_masks[0][:32].data)
# print(inputs[0][:10][:30].data)
# print(vis_feat1[0][:10][:30].data)
# print(batch['vision_feats'][0][:10].data)
# print(vis_feat2[0][:10][:30].data)
# print('language:')
# print(lang_feats[0].data)
# print(enc_masks[0][32:96].data)
# print(inputs[0][32:66][:30].data)
# print('action:')
# print(enc_masks[0][96:].data)
# print(inputs[0][96:117][:30].data)
labels = {
'type': batch['actype_label'],
'arg': batch['arg_label'],
'mask': batch['mask_label'],
'interact': batch['interact'],
'type_pos': batch['type_pos'],
'arg_pos': batch['arg_pos'],
}
if 'visible_label' in batch:
labels.update({
'visible': batch['visible_label'],
'reached': batch['reached_label'],
'progress': batch['progress_label'],
})
return inputs, enc_masks, labels
def feats_to_inputs(self, lang_feats, vis_feats, his_feats, vis_his_feats=None, pos_feats=None):
"""convert features (indexes, vision features) to transformer inputs (embeddings)
Args:
lang_feats: word ids of size (batch, lang_len)
vis_feats: vision features of size (batch, vis_len, vis_feat_dim)
his_feats: action ids of size (batch, his_len)
Returns:
inputs: transformer inputs of size (batch, seq_len, hid)
"""
vis_in = self.vision_to_enc_input(vis_feats) #batch x vis_len x hid
lang_in = self.lang_to_enc_input(lang_feats) # batch x lang_len x hid
act_in = self.action_to_enc_input(his_feats) # batch x action_len x hid
if vis_his_feats is not None:
vis_his_in = self.vis_his_to_enc_input(vis_his_feats) # batch x (action_len/2-1) x hid
act_in[:, fc00:e968:6179::de52:7100, :] = vis_his_in
if pos_feats is not None:
ro = self.rotation_emb(pos_feats['rotation'])
ho = self.horizon_emb(pos_feats['horizon'])
cls_in = self.pos_ln(ro+ho).unsqueeze(1)
else:
bs, hid = len(lang_feats), self.args.hidden_dim
cls_in = torch.zeros(bs, 1, hid).to(device=self.device) #batch x 1 x hid
token_emb = torch.cat([cls_in, vis_in, lang_in, act_in], dim=1) # batch x seq_len x hid
modal_emb = self.modality_emb(self.modal_indicator)
pos_emb = self.positional_emb(self.position_indexes)
inputs = self.dropout(token_emb + modal_emb + pos_emb) # batch x seq_len x hid
# inputs = token_emb + modal_emb + pos_emb # batch x seq_len x hid
return inputs
def feats_to_inputs_bert(self, lang_feats, vis_feats, his_feats, vis_his_feats=None, pos_feats=None):
"""convert features (indexes, vision features) to transformer inputs (embeddings)
Args:
lang_feats: word ids of size (batch, lang_len)
vis_feats: vision features of size (batch, vis_len, vis_feat_dim)
his_feats: action ids of size (batch, his_len)
Returns:
inputs: transformer inputs of size (batch, seq_len, hid)
"""
# vis_in = self.vision_to_emb_input(vis_feats) #batch x vis_len x emb
vis_in = vis_feats #batch x vis_len x emb
lang_in = self.word_emb(lang_feats) # batch x lang_len x emb
act_in = self.action_emb(his_feats) # batch x action_len x emb
if pos_feats is not None:
ro = self.rotation_emb(pos_feats['rotation'])
ho = self.horizon_emb(pos_feats['horizon'])
cls_in = self.pos_ln(ro+ho).unsqueeze(1)
else:
bs, emb = len(lang_feats), self.args.emb_dim
cls_in = torch.zeros(bs, 1, emb).to(device=self.device) #batch x 1 x emb
input_emb = torch.cat([cls_in, vis_in, lang_in, act_in], dim=1) # batch x seq_len x emb
# input_emb = self.input_emb(inputs_embeds=input_emb) # batch x seq_len x hid
return input_emb
def outputs_to_logits(self, enc_output, attns_list, type_pos, arg_pos, task_type):
bs = len(enc_output)
enum = list(range(bs))
if self.args.pred_head_pos == 'cls' and 'navi' in task_type:
type_output = enc_output[:, 0, :]
arg_output = enc_output[:, 0, :]
else:
type_output = enc_output[enum, type_pos] # batch x hid
arg_output = enc_output[enum, arg_pos] # batch x hid
weight_arg = self.action_emb(self.objs) # num_obj x emb_dim
arg_logits = self.fc3(arg_output).mm(weight_arg.t())
# arg_logits = self.out_to_obj(arg_output) # batch x num_obj
mask_logits, navi_logits = [], {}
if 'high' in task_type:
weight_high = self.action_emb(self.high_actions) # num_high x emb_dim
type_logits = self.fc1(type_output).mm(weight_high.t())
# type_logits = self.out_to_high(type_output) # batch x num_high
elif 'low' in task_type:
weight_low = self.action_emb(self.low_actions) # num_high x emb_dim
type_logits = self.fc2(type_output).mm(weight_low.t())
# type_logits = self.out_to_low(type_output) # batch x num_low
# if not self.args.use_bert:
attns = attns_list[-1] # last layer attns: batch x num_head x q_len x v_len
attns = attns.sum(dim=1) # sum/select of heads: b x q_len x v_len
# attn of arg prediction token over each vision input
if self.args.pred_head_pos == 'cls' and 'navi' in task_type:
mask_logits = attns[:, 0, 1:(self.args.topk_objs+2)] # batch x vis_len
else:
mask_logits = attns[enum, arg_pos, 1:(self.args.topk_objs+2)] # batch x vis_len
# else:
# mask_out = enc_output[enum, arg_pos] #enc_output[:, 0, :]
# mask_logits = self.mask_prediction_head(mask_out)
if self.args.auxiliary_loss_navi and 'navi' in task_type:
cls_output = enc_output[:, 0, :] # batch x hid
navi_logits['visible'] = self.visible_monitor(cls_output)
navi_logits['reached'] = self.reached_monitor(cls_output)
navi_logits['progress'] = self.progress_monitor(cls_output)
return type_logits, arg_logits, mask_logits, navi_logits
def get_loss(self, type_logits, arg_logits, mask_logits, navi_logits, labels, task_type):
mask_loss, navi_loss = self.zero, {}
if self.args.focal_loss:
bs = len(type_logits)
enum = list(range(bs))
type_probs = F.softmax(type_logits, dim=1)[enum, labels['type']]
type_loss = F.cross_entropy(type_logits, labels['type'], reduction='none')
type_loss = torch.mean(type_loss * (1-type_probs) ** self.args.focal_gamma)
arg_probs = F.softmax(arg_logits, dim=1)[enum, labels['arg']]
arg_loss = F.cross_entropy(arg_logits, labels['arg'], ignore_index=-1, reduction='none')
arg_loss = torch.mean(arg_loss * (1-arg_probs) ** self.args.focal_gamma)
if 'mani' in task_type:
mask_probs = F.softmax(mask_logits, dim=1)[enum, labels['mask']]
mask_loss = F.cross_entropy(mask_logits, labels['mask'], ignore_index=-1, reduction='none')
mask_loss = torch.mean(mask_loss * (1-mask_probs) ** self.args.focal_gamma)
else:
type_loss = F.cross_entropy(type_logits, labels['type'])
arg_loss = F.cross_entropy(arg_logits, labels['arg'], ignore_index=-1)
# mask_loss = self.totensor(0.0)
if 'mani' in task_type:
mask_loss = F.cross_entropy(mask_logits, labels['mask'], ignore_index=-1)
if self.args.auxiliary_loss_navi and 'navi' in task_type:
l_v = F.binary_cross_entropy_with_logits(navi_logits['visible'].view(-1), labels['visible'], reduction='none')
l_r = F.binary_cross_entropy_with_logits(navi_logits['reached'].view(-1), labels['reached'], reduction='none')
l_p = 0.5 * (torch.sigmoid(navi_logits['progress']).view(-1) - labels['progress']).square()
navi_loss['visible'] = l_v[labels['visible']!=-1].mean()
navi_loss['reached'] = l_r[labels['reached']!=-1].mean()
navi_loss['progress'] = l_p[labels['progress']!=-1].mean()
return type_loss, arg_loss, mask_loss, navi_loss
def pred(self, type_logits, arg_logits, mask_logits, navi_logits):
type_preds = torch.argmax(type_logits, dim=1)
arg_preds = torch.argmax(arg_logits, dim=1)
mask_preds = torch.argmax(mask_logits, dim=1) if mask_logits != [] else []
navi_preds = {}
if navi_logits != {}:
navi_preds['visible'] = (navi_logits['visible']>0.5).view(-1)
navi_preds['reached'] = (navi_logits['reached']>0.5).view(-1)
navi_preds['progress'] = torch.sigmoid(navi_logits['progress'].view(-1))
return type_preds, arg_preds, mask_preds, navi_preds
def step(self, observations, task_type, topk=1):
# language
lang_feats = self.lang_obs_to_feats(observations['lang'])
# current vision
vis_feat_dim = self.emb_dim+7 if not self.args.use_bert else self.emb_dim
if observations['vis'] is None:
vis_feats = torch.zeros((self.vis_len, vis_feat_dim), device=self.device) # vis_len x (emb_dim+7)
vis_cls_col = torch.zeros((self.vis_len, ), dtype=torch.long, device=self.device) # vis_len
else:
vis_feats, vis_cls_col = self.vis_obs_to_feats(*observations['vis'])
# history actions
his_feats = self.action_history_to_feats(observations['act_his'])
# history visions and posture
if 'navi' not in task_type:
vis_his_feats = pos_feats = None
else:
vis_his_feats = self.vis_his_obs_to_feats(observations['vis_his'])
pos_feats = {k: self.totensor(v).unsqueeze(0) for k,v in observations['pos'].items()}
feats_input = (lang_feats.unsqueeze(0), vis_feats.unsqueeze(0), his_feats.unsqueeze(0),
vis_his_feats, pos_feats)
feat2input = self.feats_to_inputs if not self.args.use_bert else self.feats_to_inputs_bert
inputs = feat2input(*feats_input)
enc_masks = torch.cat([self.totensor([1]), vis_cls_col, lang_feats, his_feats], dim=0)
enc_masks = (enc_masks != 0)
# print('action_history', action_history)
# print('enc_masks', enc_masks)
if not self.args.use_bert:
enc_output, attns_list = self.encoder(inputs, enc_masks)
# enc_output: batch x seq_len x hid
else:
outputs = self.encoder(inputs_embeds=inputs, attention_mask=enc_masks.unsqueeze(0),
output_attentions=True)
enc_output = outputs.last_hidden_state
attns_list = outputs.attentions
arg_pos = enc_masks.nonzero()[-1]
type_pos = arg_pos - 1
# print('type_pos', type_pos, 'arg_pos:', arg_pos)
type_logits, arg_logits, mask_logits, _ = self.outputs_to_logits(enc_output, attns_list,
type_pos, arg_pos, task_type)
# type_pos.unsqueeze(0), arg_pos.unsqueeze(0), level)
level = task_type.split('_')[0]
type_probs, type_preds = self.topk_preds(type_logits, topk)
type_preds = self.dec_vocabs['type_%s'%level].seq_decode(type_preds)
arg_probs, arg_preds = self.topk_preds(arg_logits, topk)
arg_preds = self.dec_vocabs['arg'].seq_decode(arg_preds)
if level == 'low':
mask_probs, mask_preds = self.topk_preds(mask_logits, topk)
else:
mask_probs, mask_preds = [], []
preds = {'type': type_preds, 'arg': arg_preds, 'mask': mask_preds}
probs = {'type': type_probs, 'arg': arg_probs, 'mask': mask_probs}
return preds, probs
def topk_preds(self, logits, topk):
probs = F.softmax(logits.squeeze(), dim=0)
probs_sorted, idx_sorted = probs.topk(min(topk, len(probs)))
return probs_sorted.tolist(), idx_sorted.tolist()
def lang_obs_to_feats(self, lang_obs):
lang_widx = torch.zeros((self.lang_len,), dtype=torch.long, device=self.device) # lang_len
if lang_obs is None:
return lang_widx
if self.args.use_bert:
lang_obs = mmt_word_ids_to_bert_ids(lang_obs[1:-1], self.vocab, self.bert_tknz)
actual_len = min(self.lang_len, len(lang_obs))
lang_widx[:actual_len] = self.totensor(lang_obs[:actual_len])
if not self.args.use_bert:
lang_widx[lang_widx >= self.dataset.vocab.vocab_size] = 0
# logging.info(lang_widx)
return lang_widx # lang_len
def vis_obs_to_feats(self, boxes, classes, scores):
obj_num = min(self.vis_len - 1, len(scores))
vis_feat_col = torch.zeros((self.vis_len, 7), device=self.device) # vis_len x 7
vis_cls_col = torch.zeros((self.vis_len, ), dtype=torch.long, device=self.device) # vis_len
for obj_idx in range(obj_num):
bbox = self.totensor(boxes[obj_idx]).to(torch.float) / self.image_size
cls_idx = self.action_vocab.w2id(classes[obj_idx])
vis_feat_col[obj_idx+1][:4] = bbox
vis_feat_col[obj_idx+1][4] = bbox[2]-bbox[0]
vis_feat_col[obj_idx+1][5] = bbox[3]-bbox[1]
vis_feat_col[obj_idx+1][6] = float(scores[obj_idx]) # 1d
vis_cls_col[obj_idx + 1] = cls_idx
vis_cls_col[0] = 1
cls_feat = self.action_emb(vis_cls_col)
if not self.args.use_bert:
vis_feat = torch.cat([cls_feat, vis_feat_col], dim=1) #vis_len x (emb_dim+7)
else:
vis_feat = cls_feat + self.vis_feat_to_emb(vis_feat_col)
# print('vis_feat:', vis_feat.shape)
return vis_feat, vis_cls_col
def vis_his_obs_to_feats(self, vis_his):
if vis_his is None:
return None
# history visual input
vis_his_len = self.vis_his_len = self.args.history_max_length - 1
max_obj_num = self.max_obj_num = 10
his_vis_feat = torch.zeros((vis_his_len, max_obj_num, 7), device=self.device) # vis_his_len x max_obj_num x 7
his_vis_cls = torch.zeros((vis_his_len, max_obj_num), dtype=torch.long, device=self.device) # vis_his_len x max_obj_num
for his_idx, dets in enumerate(vis_his[-vis_his_len:]):
obj_num = min(max_obj_num, len(dets[0]))
for obj_idx in range(obj_num):
bbox = self.totensor(dets[0][obj_idx]).to(torch.float) / self.image_size
cls_idx = self.action_vocab.w2id(dets[1][obj_idx])
his_vis_feat[his_idx][obj_idx][:4] = bbox
his_vis_feat[his_idx][obj_idx][4] = bbox[2]-bbox[0]
his_vis_feat[his_idx][obj_idx][5] = bbox[3]-bbox[1]
his_vis_feat[his_idx][obj_idx][6] = float(dets[2][obj_idx])
his_vis_cls[his_idx][obj_idx] = cls_idx
his_cls_feat = self.action_emb(his_vis_cls) # vis_his_len x max_obj_num x emb_dim
his_vis_feat_all = torch.cat([his_cls_feat, his_vis_feat], dim=2) #vis_his_len x max_obj_num x (emb_dim+7)
return his_vis_feat_all.sum(dim=1).unsqueeze(0) #1 x vis_his_len x (emb_dim+7)
def action_history_to_feats(self, action_history_seq):
action_seq = torch.zeros((self.his_len,), dtype=torch.long, device=self.device)
if action_history_seq is None:
return action_seq
elif isinstance(action_history_seq[0], str):
action_history_seq = self.action_vocab.seq_encode(action_history_seq)
actual_len = min(self.his_len, len(action_history_seq))
action_seq[:actual_len] = self.totensor(action_history_seq[-actual_len:])
# logging.info(action_seq)
return action_seq # his_len
``` |
{
"source": "5966466/SIMLR-python",
"score": 3
} |
#### File: SIMLR/src/dist2.py
```python
import numpy as np
def dist2(*args):
if len(args) == 1:
x = args[0]
c = x
elif len(args) == 2:
x = args[0]
c = args[1]
else:
raise ValueError("The number of input is uncorrect")
if type(x) != np.ndarray or type(c) != np.ndarray:
raise TypeError("Please input numpy.ndarray variable")
ndata, dimx = x.shape
ncentres, dimc = c.shape
if dimx != dimc:
raise ValueError("Data dimension does not match dimension of centres")
n2 = np.dot(np.ones((ncentres, 1)), np.sum(x**2, axis=1).reshape(1, -1)).T + \
np.dot(np.ones((ndata, 1)), np.sum(c**2, axis=1).reshape(1, -1)) - \
2*(np.dot(x, c.T))
if True in (n2<0).flatten():
n2[n2<0] = 0
return n2
```
#### File: SIMLR/src/large_multipleK.py
```python
import numpy as np
from scipy.stats import norm
from .dist2 import dist2
import pdb
eps = np.finfo(np.double).eps
def large_multipleK(val,ind,KK):
# if type(x) != np.ndarray:
# raise TypeError("Please input 'numpy.ndarray' type variable into function multipleK")
val = val*val
sigma = np.arange(2, 1-0.25, -0.25)
allk = np.arange(np.ceil(KK/2), np.ceil(KK*1.5)+np.ceil(KK/10), np.ceil(KK/10))
D_Kernels = []
for allk_l in allk:
if allk_l < val.shape[1]:
temp = np.mean(val[:,0:int(allk_l)], axis=1, keepdims=True)
temp0 = 0.5*(np.tile(temp,(1,val.shape[1])) + temp[ind].squeeze())+ eps
for sigma_j in sigma:
temp = norm.pdf(val,0,sigma_j*temp0)
temptemp = temp[:,0]
temp = 0.5*(np.tile(temptemp[:,np.newaxis],(1,val.shape[1])) + temptemp[ind]) - temp;
D_Kernels.append(temp+eps)
D_Kernels = np.array(D_Kernels)
return D_Kernels
```
#### File: SIMLR/src/multipleK.py
```python
import numpy as np
from scipy.stats import norm
from .dist2 import dist2
eps = np.finfo(np.double).eps
def multipleK(x):
if type(x) != np.ndarray:
raise TypeError("Please input 'numpy.ndarray' type variable into function multipleK")
N = x.shape[0]
Kernels = []
sigma = np.arange(2, 1-0.25, -0.25)
Diff = dist2(x)
T = np.sort(Diff)
INDEX = np.argsort(Diff)
m, n = Diff.shape
allk = np.arange(10, 30+2, 2)
for allk_l in allk:
if allk_l < N-1:
TT = np.mean(T[:,2-1:allk_l+1], axis=1, keepdims=True) + eps
Sig = (np.tile(TT, n) + np.tile(TT.T, (n, 1))) / 2
Sig = Sig * (Sig>eps) + eps
for sigma_j in sigma:
W = norm.pdf(Diff, 0, sigma_j*Sig)
Kernels.append((W+W.T)/2)
Kernels = np.array(Kernels)
D_Kernels = []
for K in Kernels:
k = 1 / np.sqrt(np.diag(K)+1)
G = K
G_diag = np.diag(G).reshape(-1,1)
D_temp = (np.tile(G_diag, len(G)) + np.tile(G_diag.T, (len(G), 1)) - 2*G) / 2
D_temp = D_temp - np.diag(np.diag(D_temp))
D_Kernels.append(D_temp)
D_Kernels = np.array(D_Kernels)
return D_Kernels
```
#### File: SIMLR/src/Network_Diffusion.py
```python
import numpy as np
from .dominateset import dominateset
from .TransitionFields import TransitionFields
from scipy.sparse import dia_matrix
def Network_Diffusion(A, K):
A = A - np.diag(np.diag(A))
P = dominateset(np.abs(A), min(K, len(A)-1)) * np.sign(A)
DD = np.sum(np.abs(P.T), axis=0)
P = P + np.eye(len(P)) + np.diag(DD)
P = TransitionFields(P)
D, U = np.linalg.eig(P)
d = np.real(D + np.finfo(np.double).eps)
alpha = 0.8
beta = 2
d = (1-alpha)*d / (1-alpha*(d**beta))
D = np.diag(np.real(d))
#U = np.real(U)
W = np.dot(U, np.dot(D, U.T))
W = (W*(1-np.eye(len(W)))) / np.tile(1-np.diag(W).reshape(-1,1), len(W))
D = dia_matrix(np.diag(DD))
W = D.dot(W)
W = (W + W.T) / 2
return W
```
#### File: SIMLR/src/TransitionFields.py
```python
import numpy as np
from .NE_dn import NE_dn
def TransitionFields(W):
zeroindex = np.nonzero(np.sum(W, axis=1) == 0)[0]
W = W * len(W)
W = NE_dn(W, 'ave')
w = np.sqrt(np.sum(np.abs(W), axis=0)+np.finfo(np.double).eps)
W = W / np.tile(w, (len(W),1))
W = np.dot(W, W.T)
Wnew = W
Wnew[zeroindex, :] = 0
Wnew[:, zeroindex] = 0
return Wnew
``` |
{
"source": "5966466/software-curriculum-design",
"score": 2
} |
#### File: managsys/data/views.py
```python
from django.shortcuts import render, redirect
from .models import App, FilesApp
from register.models import Acc
from django.http import HttpResponse
import shutil, os
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.utils import timezone
#from data.funcs import get_file, check_dir
from .funcs import *
from django.http import FileResponse
# Create your views here.
@login_required
def detail(request, account_id, prestatus='default'):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
account = Acc.objects.get(pk=account_id)
files = check_dir(account)
url = 'img/' + os.path.basename(account.img.url)
content = {
'account': account,
'files': files,
'url': url,
'prestatus': prestatus,
}
return render(request, 'registers/homepage.html', content)
@login_required
def before_add(request, account_id):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
prestatus = 'appspace'
content = {
'id': account_id,
'prestatus': prestatus,
}
return render(request, 'datas/add.html', content)
@login_required
def add(request, account_id):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
account = Acc.objects.get(pk=account_id)
appName = request.POST['appName']
st, msg_path = make_dir(account, appName)
if not st:
prestatus = 'appspace'
content = {
'id': account_id,
'prestatus': prestatus,
'error_message': msg_path
}
return render(request, 'datas/add.html', content)
names = ['dataPre', 'dataPro', 'dataSho']
files = request.FILES.getlist('data')
st, msg = uploadFile(files, names, msg_path)
if not st:
prestatus = 'appspace'
content = {
'id': account_id,
'prestatus': prestatus,
'error_message': msg
}
return render(request, 'datas/add.html', content)
otherFile = request.FILES.get('others')
if otherFile is not None:
st, msg = uploadZip(otherFile, msg_path)
if not st:
prestatus = 'appspace'
content = {
'id': account_id,
'prestatus': prestatus,
'error_message': msg
}
return render(request, 'datas/add.html', content)
account.app_set.create(
name=account,
appName=appName,
path=msg_path
)
prestatus = 'appspace'
content = {
'message': "Successfully added",
'account': account,
'prestatus': prestatus,
'type': 1,
}
return render(request, 'datas/message.html', content)
@login_required
def application(request, account_id, app_id, error_message):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
account = Acc.objects.get(pk=account_id)
app = App.objects.get(pk=app_id)
files = check_dir(account)
dates = []
for f in files:
f = account.filesacc_set.get(name = f)
dates.append(f.date)
f_d = list(zip(files, dates))
url = 'img/' + os.path.basename(account.img.url)
if error_message == 'NULL':
error_message = None
content = {
'id': account_id,
'error_message': error_message,
'app': app,
'f_d': f_d,
'url': url,
}
return render(request, 'datas/application.html', content)
@login_required
def manage_file(request, account_id, prestatus):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
account = Acc.objects.get(pk=account_id)
files = check_dir(account)
url = 'img/' + os.path.basename(account.img.url)
prestatus = 'filespace'
content = {
'account': account,
'files': files,
'url': url,
'prestatus': prestatus,
}
return render(request, 'registers/homepage.html', content)
@login_required
def delete_file(request, account_id, filename):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
account = Acc.objects.get(pk=account_id)
os.remove(os.path.join(account.path, 'downloadfiles', filename))
delete_all(account, filename)
files = check_dir(account)
url = 'img/' + os.path.basename(account.img.url)
prestatus = 'filespace'
content = {
'message': "Successfully deleted",
'account': account,
'files': files,
'url': url,
'prestatus': prestatus,
'type': 1,
}
return render(request, 'registers/homepage.html', content)
@login_required
def before_upLog(request, account_id):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
account = Acc.objects.get(pk=account_id)
prestatus = 'filespace'
content = {
'account': account,
'prestatus': prestatus,
}
return render(request, 'datas/uplog.html', content)
@login_required
def upLog(request, account_id):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
account = Acc.objects.get(pk=account_id)
file = request.FILES.get('data')
if file is None:
prestatus = 'filespace'
content = {
'account': account,
'prestatus': prestatus,
'error_message': "Please select a log file to upload"
}
return render(request, 'datas/uplog.html', content)
filename = file.name
st, msg = get_file(file, os.path.join(account.path, \
'downloadfiles', filename))
if not st:
prestatus = 'filespace'
content = {
'account': account,
'prestatus': prestatus,
'error_message': msg
}
return render(request, 'datas/uplog.html', content)
file = account.filesacc_set.filter(name=filename)
if list(file) == []:
account.filesacc_set.create(
name = filename,
date = timezone.now()
)
else:
file[0].date = timezone.now()
file[0].save()
files = check_dir(account)
url = 'img/' + os.path.basename(account.img.url)
prestatus = 'filespace'
content = {
'message': "Successfully upLoaded",
'account': account,
'files': files,
'url': url,
'prestatus': prestatus,
'type': 1,
}
return render(request, 'registers/homepage.html', content)
@login_required
def delete(request, account_id, app_id):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
account = Acc.objects.get(pk=account_id)
app = App.objects.get(pk=app_id)
app.delete()
shutil.rmtree(app.path)
prestatus = 'appspace'
content = {
'message': "Successfully deleted",
'account': account,
'prestatus': prestatus,
'type': 1,
}
return render(request, 'datas/message.html', content)
@login_required
def before_revise(request, account_id, app_id):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
app = App.objects.get(pk=app_id)
prestatus = 'appspace'
account = Acc.objects.get(pk=account_id)
content = {
'account': account,
'id': account_id,
'prestatus': prestatus,
'app': app,
}
return render(request, 'datas/revise.html', content)
@login_required
def revise(request, account_id, app_id):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
account = Acc.objects.get(pk=account_id)
app = App.objects.get(pk=app_id)
names = ['dataPre', 'dataPro', 'dataSho']
isModify = False
for i in range(3):
file = request.FILES.get('data%s' % (i+1))
path = os.path.join(app.path, names[i] + '.py')
if file is not None:
isModify = True
st, msg = modifyFile(file, path)
if not st:
prestatus = 'appspace'
content = {
'account': account,
'id': account_id,
'app': app,
'prestatus': prestatus,
'error_message': msg,
}
return render(request, 'datas/revise.html', content)
otherFile = request.FILES.get('others')
if otherFile is not None:
st, msg = uploadZip(otherFile, app.path)
if not st:
prestatus = 'appspace'
content = {
'id': account_id,
'prestatus': prestatus,
'error_message': msg
}
return render(request, 'datas/add.html', content)
new_name = request.POST['appName']
if new_name == '':
prestatus = 'appspace'
content = {
'account': account,
'id': account_id,
'app': app,
'prestatus': prestatus,
'error_message': "Please input app Name",
}
return render(request, 'datas/revise.html', content)
st, msg = modify_dir(app, new_name)
if not st:
prestatus = 'appspace'
content = {
'account': account,
'id': account_id,
'app': app,
'prestatus': prestatus,
'error_message': msg,
}
return render(request, 'datas/revise.html', content)
if isModify:
for f in app.filesapp_set.all():
f.isAnalysis = False
f.save()
prestatus = 'appspace'
content = {
'error_message': "Successfully revised",
'account': account,
'prestatus': prestatus,
'app': app,
'type': 2,
}
return render(request, 'datas/message.html', content)
@login_required
def show_loading(request, account_id, app_id, filename):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
#account = Acc.objects.get(pk=account_id)
#app = App.objects.get(pk=app_id)
content = {
'id': account_id,
'a_id': app_id,
'f': filename,
}
return render(request, 'datas/show_loading.html', content)
@login_required
def show_approx(request, account_id, app_id, filename):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
account = Acc.objects.get(pk=account_id)
app = App.objects.get(pk=app_id)
st, show = show_in_views(account, app, filename)
if not st:
content = {
'error_message': show,
'account': account,
'app': app,
'type': 2,
}
return render(request, 'datas/message.html', content)
para = show.show_appro()
para = showPara(*para)
content = {
'para': para,
'account': account,
'id': app_id,
'f': filename,
}
return render(request, 'datas/show_approx.html', content)
@login_required
def show_detail(request, account_id, app_id, filename, item):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
account = Acc.objects.get(pk=account_id)
app = App.objects.get(pk=app_id)
st, show = show_in_views(account, app, filename)
if not st:
content = {
'error_message': show,
'account': account,
'app': app,
'type': 2,
}
return render(request, 'datas/message.html', content)
if item == 'ALL':
items, *values = show.show_all()
else:
items, *values = show.show_det(item)
content = {
'items': items,
'values': list(zip(*values)),
'account': account,
'item': item,
'app': app,
'f': filename,
}
return render(request, 'datas/show_detail.html', content)
@login_required
def downloadLocal(request, account_id, filename):
if request.user.acc.id != account_id:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
account = Acc.objects.get(pk=account_id)
path = os.path.join(account.path, 'downloadfiles', filename)
return FileResponse(open(path, 'rb'), as_attachment=True)
```
#### File: managsys/register/funcs.py
```python
from .models import Acc, IPaddr
import random
import os
from ftplib import FTP
import paramiko
import threading
from django.utils import timezone
"""
def sentence_name(name_id):
name_id = name_id.strip()
try:
if name_id.isdigit():
name_id = int(name_id)
account = Acc.objects.get(id=name_id)
else:
account = Acc.objects.get(account_name=name_id)
except Acc.DoesNotExist:
return (True, None)
else:
#passwd = account.account_passwd
return (False, account)
"""
def generate_id():
while 1:
ids = int(random.uniform(0.1, 1) * 1e10)
try:
account = Acc.objects.get(id=ids)
except Acc.DoesNotExist:
break
else:
continue
return ids
def make_dir(ids):
path = os.path.dirname(os.getcwd())
path = os.path.join(path, str(ids))
os.makedirs(path)
path2 = os.path.join(path, 'downloadfiles')
os.makedirs(path2)
return path
class Account_thread(threading.Thread):
def __init__(self, target, args=(), kwargs={}):
super().__init__()
self.target = target
self.args = args
self.kwargs = kwargs
self.result = (True, None)
def run(self):
self.result = self.target(*self.args, **self.kwargs)
def get_result(self):
return self.result
class Account():
def __init__(self, account):
self.account = account
def add_server(self, IPv4, user, passwd, path, port=21):
st, msg = self.check_ip(IPv4)
if not st:
return (st, msg)
st, msg = self.check_port(port)
if not st:
return (st, msg)
if user == '' or passwd == '' or path == '':
return (False, "Please input")
try:
self.account.ipaddr_set.create(
name=self.account,
ipAddr=IPv4,
userName=user,
userPasswd=<PASSWORD>,
port=port,
serverPath=path
)
except Exception as e:
return (False, "Failed to add: " + str(e))
return (True, None)
def add_multiserver(self, textPath):
with open(textPath, 'r') as file:
first = file.readline().strip().split(' ')
while '' in first:
first.remove('')
second = first.copy()
items = ['IPv4', 'port', 'user', 'passwd', 'path']
for i in items:
if i not in second:
print("hello")
return (False, "Illegal text file format")
second.remove(i)
if second != []:
print("hi")
return (False, "Illegal text file format")
threads = []
for i in file.readlines():
i = i.strip().split(' ')
while '' in i:
i.remove('')
in_add = dict(zip(first, i))
thread = Account_thread(target=self.add_server, kwargs=in_add)
thread.daemon = True
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
st, msg = thread.get_result()
if not st:
return (False, msg)
return (True, None)
def change_fileState(self, ip_choice):
filename = os.path.basename(ip_choice.serverPath)
file = self.account.filesacc_set.filter(name=filename)
if list(file) == []:
self.account.filesacc_set.create(
name = os.path.basename(ip_choice.serverPath),
date = timezone.now()
)
else:
file[0].date = timezone.now()
file[0].save()
def fetch_file_ftp(self, ip_choice):
ftp=FTP()
try:
ftp.connect(ip_choice.ipAddr, ip_choice.port)
ftp.login(ip_choice.userName, ip_choice.userPasswd)
except Exception as e:
return (False, "Couldn't connect to the server: " + str(e))
path_a = os.path.basename(ip_choice.serverPath)
path_b = os.path.dirname(ip_choice.serverPath)
try:
ftp.cwd(path_b)
fd = open(os.path.join(self.account.path, 'downloadfiles', path_a), \
'wb')
ftp.retrbinary('RETR '+ path_a, fd.write)
except Exception as e:
return (False, "Couldn't access to the file: " + str(e))
fd.close()
self.change_fileState(ip_choice)
return (True, None)
def fetch_file_sftp(self, ip_choice):
try:
sf = paramiko.Transport(ip_choice.ipAddr, ip_choice.port)
sf.connect(username=ip_choice.userName, password=ip_choice.userPasswd)
sftp = paramiko.SFTPClient.from_transport(sf)
except Exception as e:
return (False, "Couldn't connect to the server: " + str(e))
path_a = os.path.basename(ip_choice.serverPath)
try:
sftp.get(ip_choice.serverPath.strip(), \
os.path.join(self.account.path, 'downloadfiles', path_a))
except Exception as e:
return (False, "Couldn't access to the file: " + str(e))
self.change_fileState(ip_choice)
return (True, None)
def fetch_multifile(self, choice_list, d_type):
if d_type.lower() == 'ftp':
func = self.fetch_file_ftp
elif d_type.lower() == 'sftp':
func = self.fetch_file_sftp
else:
return (False, "No type match")
threads = []
for choice_id in choice_list:
ip_choice = IPaddr.objects.get(pk=choice_id)
thread = Account_thread(target=func, args=(ip_choice,))
thread.daemon = True
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
st, msg = thread.get_result()
if not st:
return (False, msg)
return (True, None)
def modify_item(self, ip_choice, item, value):
if item == 'ipAddr':
st, msg = self.check_ip(value)
if not st:
return (st, msg)
elif item == 'port':
st, msg = self.check_port(value)
if not st:
return (st, msg)
command = "ip_choice.%s = '%s'" % (item, value)
try:
exec(command)
ip_choice.save()
except Exception as e:
return (False, "Failed to revise %s: %s" % (item, str(e)))
return (True, None)
def delete_item(self, ip_choice):
ip_choice.delete()
def delete_multifile(self, choice_list):
threads = []
for choice_id in choice_list:
ip_choice = IPaddr.objects.get(pk=choice_id)
thread = Account_thread(target=self.delete_item, args=(ip_choice,))
thread.daemon = True
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
thread.get_result()
return (True, None)
def check_ip(self, IPv4):
a = IPv4.split('.')
if len(a) != 4:
return (False, "Illegal IPv4 address")
for i in a:
if not i.isdigit():
return (False, "Illegal IPv4 address")
if int(i) > 255:
return (False, "Illegal IPv4 address")
return (True, None)
def check_port(self, port):
if type(port) == int:
return (True, None)
elif not port.isdigit():
return (False, "Illegal Port")
else:
return (True, None)
```
#### File: software-curriculum-design/supplement/dataPre2.py
```python
import torch
import torchvision
from torch import nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import sqlite3
import os
import numpy as np
import threading, time
import zipfile, shutil
from xdb_test import XDB_test
import sys
class DataPre():
def __init__(self, filepath, setpath):
self.filepath = filepath.split('.')[0]
self.setpath = setpath
self.st_msg = (True, None)
setName = os.path.basename(filepath)
self.setName = setName.split('.')[0]
self.tableName = 'DataPre'
self.connection = sqlite3.connect(\
os.path.join(self.setpath, self.setName+'.db'))
self.cursor = self.connection.cursor()
path = os.path.join(self.setpath, self.setName)
if not os.path.exists(path):
os.mkdir(path)
self.states = np.array(['Wrong', 'Right'])
self.labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', \
'dog', 'frog', 'horse', 'ship', 'truck']
self.lock = threading.Lock()
#self.net = 0
#self.device = 0
#self.tups = []
def add_msg(self, msg):
if self.st_msg[0]:
self.st_msg = (False, msg)
def execute_set(self, demand):
self.cursor.execute(demand)
self.connection.commit()
data = self.cursor.fetchall()
return data
def create_table(self):
dm = '''CREATE TABLE %s
(
id integer NOT NULL PRIMARY KEY AUTOINCREMENT,
labelled_img varchar(50) NULL,
interfered_img varchar(50) NULL,
state varchar(10) NULL
);''' % self.tableName
try:
self.execute_set(dm)
except Exception as e:
self.add_msg(str(e))
def insert_table(self, labelled_img, interfered_img, state):
dm = '''INSERT INTO %s(labelled_img, interfered_img, state)
VALUES('%s', '%s', '%s');'''\
% (self.tableName, labelled_img, interfered_img, state)
try:
self.execute_set(dm)
except Exception as e:
self.add_msg(str(e))
def insert_table_all(self, tups):
for tup in tups:
dm = '''INSERT INTO %s(labelled_img, interfered_img, state)
VALUES('%s', '%s', '%s');'''\
% (self.tableName, tup[0], tup[1], tup[2])
try:
self.cursor.execute(dm)
except Exception as e:
self.add_msg(str(e))
try:
self.connection.commit()
except Exception as e:
self.add_msg(str(e))
def get_pics(self, test_data):
#net = torch.load(os.path.join(self.setpath, 'model'), map_location='cpu')['net']
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#net.to(device)
#net.eval()
tups = []
tol = len(test_data)
for i, (inputs, name) in enumerate(test_data):
if (i+1) % 10 != 0:
continue
inputs = inputs.to(self.device)
outputs = self.net(inputs)
pre_label = outputs.max(1)[1]
lbs = [self.labels[i] for i in pre_label]
sentc = list(np.array(lbs) == np.array(name))
sentc = [int(j) for j in sentc]
sentc = list(self.states[sentc])
tup = list(zip(name, lbs, sentc))
tups.extend(tup)
print('%s/%s' % (i, tol))
return tups
def initialize(self):
filepath = self.filepath + '.zip'
try:
z = zipfile.ZipFile(filepath, 'r')
z.extractall(path=os.path.dirname(filepath))
except Exception as e:
for i in a.namelist():
if os.path.isdir(i):
shutil.rmtree(filepath)
elif os.path.isfile(i):
os.remove(filepath)
self.st_msg = (False, str(e))
z.close()
sys.path.append(self.filepath)
import procdb
procdb.update(self.filepath)
sys.path.remove(self.filepath)
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
path = os.path.join(self.filepath, 'test_list.txt')
test_set = XDB_test(path, transform = transform)
test_data = torch.utils.data.DataLoader(test_set, batch_size = 128, shuffle = False)
self.net = torch.load(os.path.join(self.setpath, 'model'))['net']
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net.to(self.device)
self.net.eval()
return test_data
def estab_table(self):
self.create_table()
test_data = self.initialize()
print(time.ctime())
tups = self.get_pics(test_data)
print(time.ctime())
self.insert_table_all(tups)
print(time.ctime())
shutil.rmtree(self.filepath)
def get_one_pic(self, t_d):
for inputs, name in t_d:
inputs = inputs.to(self.device)
outputs = self.net(inputs)
pre_label = outputs.max(1)[1]
lbs = [self.labels[i] for i in pre_label]
sentc = list(np.array(lbs) == np.array(name))
sentc = [int(j) for j in sentc]
sentc = list(self.states[sentc])
tup = list(zip(name, lbs, sentc))
self.lock.acquire()
self.tups.extend(tup)
self.lock.release()
def estab_table2(self):
self.create_table()
test_data = self.initialize()
self.tups = []
t_d = []
for i, (inputs, name) in enumerate(test_data):
t_d.append((inputs, name))
if (i+1) % 10 == 0:
thread = threading.Thread(target=self.get_one_pic, args=(t_d,))
thread.daemon = True
thread.start()
print(i)
t_d = []
if t_d != []:
thread = threading.Thread(target=self.get_one_pic, args=(t_d,))
thread.daemon = True
thread.start()
print(i)
thread.join()
self.insert_table_all(self.tups)
shutil.rmtree(self.filepath)
def show(self):
dm = "SELECT DISTINCT(labelled_img) FROM %s" % (self.tableName)
data = self.execute_set(dm)
print(data)
print("hello")
#for i in data:
# print(i)
```
#### File: software-curriculum-design/supplement/dataPro.py
```python
import sqlite3
import os
import numpy as np
import pickle
class DataPro():
def __init__(self, pre):
self.pre = pre
self.path = os.path.join(pre.setpath, pre.setName)
dm = "SELECT COUNT(*) FROM %s" % self.pre.tableName
data = self.pre.execute_set(dm)
if data[0][0] == 0:
self.st_msg = (False, "There is no datum to analyze")
os.remove(os.path.join(pre.setpath, pre.setName+'.db'))
os.rmdir(self.path)
else:
self.st_msg = (True, None)
def acc_Dates(self):
dm = "SELECT DISTINCT(Dates) FROM %s" % self.pre.tableName
data = self.execute_set(dm)
date_dict = {i[0]:[] for i in data}
for i in date_dict.keys():
dm = "SELECT Timing, user, IP, state FROM %s WHERE Dates = '%s'" \
% (self.pre.tableName, i)
data = self.execute_set(dm)
date_dict[i] = np.array(data)
self.save(date_dict, 'Dates')
def acc_user(self):
dm = "SELECT DISTINCT user, IP FROM %s" % self.pre.tableName
data = self.execute_set(dm)
user_dict = {i:[] for i in data}
for i in user_dict.keys():
dm = "SELECT Dates, Timing, state FROM %s WHERE user = '%s' AND IP = '%s'" \
% (self.pre.tableName, i[0], i[1])
data = self.execute_set(dm)
user_dict[i] = np.array(data)
self.save(user_dict, 'user')
def acc_state(self):
state_dict = {'Success': 0, 'Failure': 0}
dm = "SELECT COUNT(*) FROM %s WHERE state = 'Success'" % self.pre.tableName
data = self.execute_set(dm)
state_dict['Success'] = data[0][0]
dm = "SELECT COUNT(*) FROM %s WHERE state = 'Failure'" % self.pre.tableName
data = self.execute_set(dm)
state_dict['Failure'] = data[0][0]
self.save(state_dict, 'state')
def execute_set(self, demand):
self.pre.cursor.execute(demand)
self.pre.connection.commit()
data = self.pre.cursor.fetchall()
return data
def save(self, data, name):
with open(os.path.join(self.path, name+'.pkl'), 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def add_msg(self, msg):
if self.st_msg[0]:
self.st_msg = (False, msg)
def acc_all(self):
try:
self.acc_Dates()
self.acc_user()
self.acc_state()
except Exception as e:
self.add_msg(str(e))
```
#### File: software-curriculum-design/supplement/dataSho.py
```python
import numpy as np
import pickle, os
class DataSho():
def __init__(self, pro):
self.pro = pro
def load(self, name):
with open(os.path.join(self.pro.path, name+'.pkl'), 'rb') as f:
return pickle.load(f)
def show_items(self):
return 'Dates', 'Timing', 'user', 'IP', 'state'
def show_appro(self):
chart = {'type': 'column'}
title = {'text': 'Analyzed log'}
date_dict = self.load('Dates')
xAxis = {'categories': list(date_dict.keys())}
yAxis = {'title': {'text': 'log10(Numbers)'}, 'type': 'logarithmic'}
success = []
failure = []
log = True
for i in date_dict.keys():
a = date_dict[i][:, 3]
#success.append(np.log10(np.sum(a == 'Success')+1))
#failure.append(np.log10(np.sum(a == 'Failure')+1))
success.append(np.sum(a == 'Success'))
failure.append(np.sum(a == 'Failure'))
series = {"Success": success, "Failure": failure}
#return series
return chart, title, xAxis, yAxis, series
#def show_det(self, return_value):
# return self.show_date(return_value)
def show_det(self, dates):
dating = self.load('Dates')[dates]
users = list(set(dating[:, 1]))
user_ips = []
for u in users:
rows = (dating[:, 1] == u)
ips = list(set(dating[rows, 2]))
user_ips.extend([(u, ip) for ip in ips])
success = []
failure = []
for user_ip in user_ips:
rows = (dating[:, 1] == user_ip[0])
a = dating[rows, :]
rows = (a[:, 2] == user_ip[1])
a = a[rows, :]
success.append(np.sum(a=='Success'))
failure.append(np.sum(a=='Failure'))
user_ips = np.array(user_ips)
users = list(user_ips[:, 0])
ips = list(user_ips[:, 1])
return ["user", "ip", "Success", "Failure"], users, ips, success, failure
def show_all(self):
user_ips = self.load('user')
users = []
ips = []
datings = []
success = []
failure = []
for user_ip, mat in user_ips.items():
dating = list(set(mat[:, 0]))
for d in dating:
users.append(user_ip[0])
ips.append(user_ip[1])
datings.append(d)
rows = (mat[:, 0] == d)
a = mat[rows, 2]
success.append(np.sum(a=='Success'))
failure.append(np.sum(a=='failure'))
namelist = ["user", "ip", "Date", "Success", "Failure"]
return namelist, users, ips, datings, success, failure
``` |
{
"source": "596acres/django-livinglots-friendlyowners",
"score": 2
} |
#### File: django-livinglots-friendlyowners/livinglots_friendlyowners/views.py
```python
from django.views.generic import CreateView, TemplateView
from django_monitor.views import MonitorMixin
class BaseAddFriendlyOwnerView(MonitorMixin, CreateView):
def get_template_names(self):
return ['livinglots/friendlyowners/add_friendlyowner.html',]
class BaseAddFriendlyOwnerSuccessView(TemplateView):
def get_template_names(self):
return ['livinglots/friendlyowners/add_friendlyowner_success.html',]
``` |
{
"source": "596acres/django-livinglots",
"score": 2
} |
#### File: django-livinglots/livinglots/models.py
```python
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def get_model_name(name, optional=True):
try:
return settings.LIVING_LOTS['MODELS'][name]
except KeyError:
if not optional:
raise ImproperlyConfigured(('Could not find a %s model. Did you '
'set LIVING_LOTS.MODELS.%s in your '
'settings.py?') % (name, name))
return None
def get_model(name, optional=True):
try:
model_name = get_model_name(name, optional=optional)
return apps.get_model(*model_name.split('.'))
except Exception:
if not optional:
raise ImproperlyConfigured(('Could not find a %s model. Did you '
'set LIVING_LOTS.MODELS.%s in your '
'settings.py?') % (name, name))
return None
def get_lot_model():
return get_model('lot')
def get_lot_model_name():
return get_model_name('lot')
def get_lotgroup_model():
return get_model('lotgroup')
def get_lotlayer_model():
return get_model('lotlayer', optional=True)
def get_organizer_model():
return get_model('organizer')
def get_organizer_model_name():
return get_model_name('organizer')
def get_owner_model():
return get_model('owner')
def get_owner_model_name():
return get_model_name('owner')
def get_owner_contact_model():
return get_model('owner_contact')
def get_owner_contact_model_name():
return get_model_name('owner_contact')
def get_owner_group_model():
return get_model('owner_group')
def get_owner_group_model_name():
return get_model_name('owner_group')
def get_parcel_model():
return get_model('parcel')
def get_parcel_model_name():
return get_model_name('parcel')
def get_pathway_model():
return get_model('pathway')
def get_stewardproject_model():
return get_model('stewardproject')
def get_stewardproject_model_name():
return get_model_name('stewardproject')
def get_watcher_model():
return get_model('watcher')
def get_watcher_model_name():
return get_model_name('watcher')
``` |
{
"source": "596acres/django-livinglots-steward",
"score": 2
} |
#### File: django-livinglots-steward/livinglots_steward/models.py
```python
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.translation import ugettext_lazy as _
from livinglots_organize.models import BaseOrganizer
class OptedInStewardProjectManager(models.Manager):
"""
A manager that only returns StewardProject instances where the group asked
to be included on the map.
"""
def get_queryset(self):
return super(OptedInStewardProjectManager, self).get_queryset().filter(
include_on_map=True,
)
class StewardProjectMixin(models.Model):
objects = models.Manager()
opted_in = OptedInStewardProjectManager()
project_name = models.CharField(_('project name'),
max_length=256,
help_text=_('The name of the project using this lot.'),
)
use = models.ForeignKey('livinglots_lots.Use',
limit_choices_to={'visible': True},
help_text=_('How is the project using the land?'),
verbose_name=_('use'),
)
support_organization = models.CharField(_('support organization'),
max_length=300,
blank=True,
null=True,
help_text=_("What is your project's support organization, if any?"),
)
land_tenure_status = models.CharField(_('land tenure status'),
choices=(
('owned', _('project owns the land')),
('licensed', _('project has a license for the land')),
('lease', _('project has a lease for the land')),
('access', _('project has access to the land')),
('not sure', _("I'm not sure")),
),
default=_('not sure'),
max_length=50,
help_text=_('What is the land tenure status for the project? (This '
'will not be shared publicly.)'),
)
include_on_map = models.BooleanField(_('include on map'),
default=True,
help_text=_('Can we include the project on our map?'),
)
class Meta:
abstract = True
class BaseStewardProject(StewardProjectMixin):
started_here = models.BooleanField(default=False)
content_type = models.ForeignKey(ContentType, related_name='+')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
abstract = True
class BaseStewardNotification(StewardProjectMixin, BaseOrganizer):
"""
A notification from someone who is part of a stewarding project letting us
know that they are stewards on a given lot.
"""
class Meta:
abstract = True
def __unicode__(self):
return self.name
``` |
{
"source": "5977862/pyspider",
"score": 2
} |
#### File: database/sqlalchemy/resultdb.py
```python
import re
import six
import time
import json
import sqlalchemy.exc
from sqlalchemy import (create_engine, MetaData, Table, Column,
String, Float, LargeBinary)
from sqlalchemy.engine.url import make_url
from pyspider.database.base.resultdb import ResultDB as BaseResultDB
from pyspider.libs import utils
from .sqlalchemybase import SplitTableMixin, result2dict
class ResultDB(SplitTableMixin, BaseResultDB):
__tablename__ = ''
def __init__(self, url):
self.table = Table('__tablename__', MetaData(),
Column('taskid', String(64), primary_key=True, nullable=False),
Column('url', String(1024)),
Column('result', LargeBinary),
Column('updatetime', Float(32)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
self.url = make_url(url)
if self.url.database:
database = self.url.database
self.url.database = None
try:
engine = create_engine(self.url, convert_unicode=True,
pool_recycle=3600)
engine.execute("CREATE DATABASE IF NOT EXISTS %s" % database)
except sqlalchemy.exc.SQLAlchemyError:
pass
self.url.database = database
self.engine = create_engine(url, convert_unicode=True,
pool_recycle=3600)
self._list_project()
def _create_project(self, project):
assert re.match(r'^\w+$', project) is not None
if project in self.projects:
return
self.table.name = self._tablename(project)
self.table.create(self.engine)
@staticmethod
def _parse(data):
for key, value in list(six.iteritems(data)):
if isinstance(value, six.binary_type):
data[key] = utils.text(value)
if 'result' in data:
if isinstance(data['result'], bytearray):
data['result'] = str(data['result'])
data['result'] = json.loads(data['result'])
return data
@staticmethod
def _stringify(data):
if 'result' in data:
data['result'] = utils.utf8(json.dumps(data['result']))
return data
def save(self, project, taskid, url, result):
if project not in self.projects:
self._create_project(project)
self._list_project()
self.table.name = self._tablename(project)
obj = {
'taskid': taskid,
'url': url,
'result': result,
'updatetime': time.time(),
}
if self.get(project, taskid, ('taskid', )):
del obj['taskid']
return self.engine.execute(self.table.update()
.where(self.table.c.taskid == taskid)
.values(**self._stringify(obj)))
else:
return self.engine.execute(self.table.insert()
.values(**self._stringify(obj)))
def select(self, project, fields=None, offset=0, limit=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
self.table.name = self._tablename(project)
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for task in self.engine.execute(self.table.select()
.with_only_columns(columns=columns)
.order_by(self.table.c.updatetime.desc())
.offset(offset).limit(limit)
.execution_options(autocommit=True)):
yield self._parse(result2dict(columns, task))
def count(self, project):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return 0
self.table.name = self._tablename(project)
for count, in self.engine.execute(self.table.count()):
return count
def get(self, project, taskid, fields=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return
self.table.name = self._tablename(project)
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for task in self.engine.execute(self.table.select()
.with_only_columns(columns=columns)
.where(self.table.c.taskid == taskid)
.limit(1)):
return self._parse(result2dict(columns, task))
```
#### File: pyspider/scheduler/task_queue.py
```python
import time
import heapq
import logging
import threading
try:
from UserDict import DictMixin
except ImportError:
from collections import Mapping as DictMixin
from .token_bucket import Bucket
from six.moves import queue as Queue
logger = logging.getLogger('scheduler')
try:
cmp
except NameError:
cmp = lambda x, y: (x > y) - (x < y)
class InQueueTask(DictMixin):
__slots__ = ('taskid', 'priority', 'exetime')
__getitem__ = lambda *x: getattr(*x)
__setitem__ = lambda *x: setattr(*x)
__iter__ = lambda self: iter(self.__slots__)
__len__ = lambda self: len(self.__slots__)
keys = lambda self: self.__slots__
def __init__(self, taskid, priority=0, exetime=0):
self.taskid = taskid
self.priority = priority
self.exetime = exetime
def __cmp__(self, other):
if self.exetime == 0 and other.exetime == 0:
return -cmp(self.priority, other.priority)
else:
return cmp(self.exetime, other.exetime)
def __lt__(self, other):
return self.__cmp__(other) < 0
class PriorityTaskQueue(Queue.Queue):
'''
TaskQueue
Same taskid items will been merged
'''
def _init(self, maxsize):
self.queue = []
self.queue_dict = dict()
def _qsize(self, len=len):
return len(self.queue_dict)
def _put(self, item, heappush=heapq.heappush):
if item.taskid in self.queue_dict:
task = self.queue_dict[item.taskid]
changed = False
if item.priority > task.priority:
task.priority = item.priority
changed = True
if item.exetime < task.exetime:
task.exetime = item.exetime
changed = True
if changed:
self._resort()
else:
heappush(self.queue, item)
self.queue_dict[item.taskid] = item
def _get(self, heappop=heapq.heappop):
while self.queue:
item = heappop(self.queue)
if item.taskid is None:
continue
self.queue_dict.pop(item.taskid, None)
return item
return None
@property
def top(self):
while self.queue and self.queue[0].taskid is None:
heapq.heappop(self.queue)
if self.queue:
return self.queue[0]
return None
def _resort(self):
heapq.heapify(self.queue)
def __contains__(self, taskid):
return taskid in self.queue_dict
def __getitem__(self, taskid):
return self.queue_dict[taskid]
def __setitem__(self, taskid, item):
assert item.taskid == taskid
self.put(item)
def __delitem__(self, taskid):
self.queue_dict.pop(taskid).taskid = None
class TaskQueue(object):
'''
task queue for scheduler, have a priority queue and a time queue for delayed tasks
'''
processing_timeout = 10 * 60
def __init__(self, rate=0, burst=0):
self.mutex = threading.RLock()
self.priority_queue = PriorityTaskQueue()
self.time_queue = PriorityTaskQueue()
self.processing = PriorityTaskQueue()
self.bucket = Bucket(rate=rate, burst=burst)
@property
def rate(self):
return self.bucket.rate
@rate.setter
def rate(self, value):
self.bucket.rate = value
@property
def burst(self):
return self.bucket.burst
@burst.setter
def burst(self, value):
self.bucket.burst = value
def check_update(self):
'''
Check time queue and processing queue
put tasks to priority queue when execute time arrived or process timeout
'''
self._check_time_queue()
self._check_processing()
def _check_time_queue(self):
now = time.time()
self.mutex.acquire()
while self.time_queue.qsize() and self.time_queue.top and self.time_queue.top.exetime < now:
task = self.time_queue.get_nowait()
task.exetime = 0
self.priority_queue.put(task)
self.mutex.release()
def _check_processing(self):
now = time.time()
self.mutex.acquire()
while self.processing.qsize() and self.processing.top and self.processing.top.exetime < now:
task = self.processing.get_nowait()
if task.taskid is None:
continue
task.exetime = 0
self.priority_queue.put(task)
logger.info("processing: retry %s", task.taskid)
self.mutex.release()
def put(self, taskid, priority=0, exetime=0):
'''Put a task into task queue'''
now = time.time()
task = InQueueTask(taskid, priority, exetime)
self.mutex.acquire()
if taskid in self.priority_queue:
self.priority_queue.put(task)
elif taskid in self.time_queue:
self.time_queue.put(task)
elif taskid in self.processing and self.processing[taskid].taskid:
# force update a processing task is not allowed as there are so many
# problems may happen
pass
else:
if exetime and exetime > now:
self.time_queue.put(task)
else:
self.priority_queue.put(task)
self.mutex.release()
def get(self):
'''Get a task from queue when bucket available'''
if self.bucket.get() < 1:
return None
now = time.time()
self.mutex.acquire()
try:
task = self.priority_queue.get_nowait()
self.bucket.desc()
except Queue.Empty:
self.mutex.release()
return None
task.exetime = now + self.processing_timeout
self.processing.put(task)
self.mutex.release()
return task.taskid
def done(self, taskid):
'''Mark task done'''
if taskid in self.processing:
self.mutex.acquire()
if taskid in self.processing:
del self.processing[taskid]
self.mutex.release()
return True
return False
def delete(self, taskid):
if taskid not in self:
return False
if taskid in self.priority_queue:
self.mutex.acquire()
del self.priority_queue[taskid]
self.mutex.release()
elif taskid in self.time_queue:
self.mutex.acquire()
del self.time_queue[taskid]
self.mutex.release()
elif taskid in self.processing:
self.done(taskid)
return True
def size(self):
return self.priority_queue.qsize() + self.time_queue.qsize() + self.processing.qsize()
def is_processing(self, taskid):
'''
return True if taskid is in processing
'''
return taskid in self.processing and self.processing[taskid].taskid
def __len__(self):
return self.size()
def __contains__(self, taskid):
if taskid in self.priority_queue or taskid in self.time_queue:
return True
if taskid in self.processing and self.processing[taskid].taskid:
return True
return False
if __name__ == '__main__':
task_queue = TaskQueue()
task_queue.processing_timeout = 0.1
task_queue.put('a3', 3, time.time() + 0.1)
task_queue.put('a1', 1)
task_queue.put('a2', 2)
assert task_queue.get() == 'a2'
time.sleep(0.1)
task_queue._check_time_queue()
assert task_queue.get() == 'a3'
assert task_queue.get() == 'a1'
task_queue._check_processing()
assert task_queue.get() == 'a2'
assert len(task_queue) == 0
``` |
{
"source": "59de44955ebd/QMediaTool",
"score": 2
} |
#### File: 59de44955ebd/QMediaTool/myprocess.py
```python
from PyQt5.QtCore import QProcess
########################################
#
########################################
class MyProcess (QProcess):
def kill (self):
if 'ffmpeg' in self.program() or 'bash' in self.program():
# exit cleanly by sending 'q'
self.write(b'q')
self.waitForBytesWritten()
ok = self.waitForFinished(3000)
if not ok:
super().kill()
else:
super().kill()
#self._proc.terminate() -> only for GUI apps, sends WM_CLOSE
``` |
{
"source": "59-lmq/MyDataDealForYOLOv2",
"score": 3
} |
#### File: 59-lmq/MyDataDealForYOLOv2/tray.py
```python
# import os
# import shutil
# import xml.etree.ElementTree as ET
# import cv2
# from numpy import zeros
#
#
# def make_new_folder(folder_path):
# """
# 用来创建新文件夹
# :param folder_path: 文件夹的位置
# :return:
# """
# if os.path.exists(folder_path) is False:
# print(f"[INFO] 文件夹{folder_path} 不存在,正在创建……")
# os.mkdir(folder_path)
# print(f"[INFO] 文件夹{folder_path} 创建完毕。")
#
#
# def change_image_brightness(image, image_path, a=0.5, g=10):
# """
# 用来改变图片的亮度,并保存到新的文件地址
# :param image: 输入的图片,需要opencv格式
# :param image_path: 需要保存的文件位置
# :param a: 亮度控制,0-1,默认为 0.5
# :param g: 默认为 10
# :return:
# """
# h, w, ch = image.shape
# src2 = zeros([h, w, ch], image.dtype)
# dst = cv2.addWeighted(image, a, src2, 1 - a, g)
# cv2.imshow('dst', dst)
# cv2.waitKey(25)
# cv2.imwrite(image_path, dst)
#
#
# def copyh2t(file_path, new_file_path):
#
# shutil.copy(file_path, new_file_path)
#
#
# def deal_xml(xml_path, file_name, user_path):
# doc = ET.parse(xml_path)
# root = doc.getroot()
# sub1 = root.find('filename') # 找到filename标签,
# sub1.text = file_name
# sub2 = root.find('path')
# sub3 = root.find('folder')
# sub2.text = user_path + sub3.text + '\\' + file_name
# doc.write(xml_path) # 保存修改
#
#
# def find_file():
# # 从标签文件中找到对应的文件并放在新的文件夹中
# this_path = os.getcwd()
# xmls_path = '/new_x'
# images_path = '/new_i'
# new_xmls_path = '/new_xml'
# new_images_path = '/new_images'
# files_xml = os.listdir(this_path + xmls_path)
# if os.path.exists('.' + new_xmls_path) == False:
# os.makedirs('.' + new_xmls_path)
# if os.path.exists('.' + new_images_path) == False:
# os.makedirs('.' + new_images_path)
# print(this_path)
#
# for file_ in files_xml:
# print(file_)
# if os.path.exists('.' + new_xmls_path + '/' + file_) == False:
# os.makedirs('.' + new_xmls_path + '/' + file_)
# if os.path.exists('.' + new_images_path + '/' + file_) == False:
# os.makedirs('.' + new_images_path + '/' + file_)
# path_xml_label = this_path + xmls_path + '/' + file_
# files_ = os.listdir(path_xml_label)
# for file__ in files_:
# print(file__)
# xml_name = os.path.splitext(file__)
# xml_path = path_xml_label + '/' + xml_name[0] + xml_name[1]
# new_xml_path = this_path + new_xmls_path + '/' + file_ + '/' + xml_name[0] + xml_name[1]
# image_path = this_path + images_path + '/' + file_ + '/' + xml_name[0] + '.jpg'
# new_image_path = this_path + new_images_path + '/' + file_ + '/' + xml_name[0] + '.jpg'
# copyh2t(image_path, new_image_path)
# copyh2t(xml_path, new_xml_path)
#
#
# def get_new_image():
# # 改变图片亮度,并复制粘贴标签文件
# a = [1.3]
# this_path = os.getcwd()
# data_path = "\\test_data\\"
# img_path = 'Mr_Hai_2\\'
# xml_path = 'Mr_Hai_xmls_2\\'
# color_name = data_path + 'new_color_'
# x_color_name = data_path + 'new_color_xmls_'
# files = os.listdir(this_path + data_path + img_path)
#
# print(files)
# for i in range(len(a)):
# new_img_folder_path = this_path + color_name + str(i) + '_' + str(int(a[i]*10))
# new_xml_folder_path = this_path + x_color_name + str(i) + '_' + str(int(a[i]*10))
# make_new_folder(new_img_folder_path)
# make_new_folder(new_xml_folder_path)
# for step, file in enumerate(files):
# print(step)
# img_name_path = this_path + data_path + img_path + file
# new_img_name_path = new_img_folder_path + "/" + file
# # print(img_name_path, new_img_name_path)
# img = cv2.imread(img_name_path)
# img = cv2.resize(img, (224, 224))
# cv2.imshow('img', img)
# # cv2.waitKey(1)
# change_image_brightness(img, a[i], 10, new_img_name_path)
# now_xml_path = this_path + data_path + xml_path + file.split('.')[0] + ".xml"
# new_xml_path = new_xml_folder_path + "/" + file.split('.')[0] + ".xml"
# shutil.copy(now_xml_path, new_xml_path)
#
#
# # get_new_image()
#
#
# def mian():
# # this_path = os.getcwd()
# #
#
# # 将文件夹中的图片进行排序
# this_path = os.getcwd()
# xml = '/test_data/raw_half_xmls'
# images = '/test_data/raw_half_images'
# path = this_path + xml
# xml_path = path + '/'
# images_path = this_path + images + '/'
# user_path = "F:\\pythonProject\\TOOLS\\ForPictureAndVideo\\test_data\\raw_half_images"
# files = os.listdir(path)
# print(files)
# for _, file0 in enumerate(files):
#
# files_ = os.listdir(path + '/' + file0)
# for i, file in enumerate(files_):
# print(i)
# # 获得一级目录下的所有文件
# path0 = path + '/' + file
# # 得到该一级文件目录文件内的绝对路径
# ext = os.path.splitext(file)
# new_xml_file_name = file0 + '_' + str(i) + '.xml'
# new_img_file_name = file0 + '_' + str(i) + '.jpg'
# img_file_path = images_path + file0 + '/'
# img_file = ext[0] + '.jpg'
# xml_file = xml_path + file0 + '/' + file
# deal_xml(xml_path=xml_file, file_name=new_img_file_name, user_path=user_path)
#
# os.chdir(xml_path + '/' + file0 + '/')
# os.rename(file, new_xml_file_name)
#
# os.chdir(img_file_path)
# os.rename(img_file, new_img_file_name)
# print(i, '----------done--------')
#
#
# # mian()
#
#
# def main():
# # 将某文件夹中所有文件夹中的文件复制粘贴到某文件夹中
# this_path = os.getcwd()
# img_path = '/test_data/raw_half_images/'
# xml_path = '/test_data/raw_half_xmls'
# new_img_path = '/test_data/raw_3quarter_images/'
# new_xml_path = '/test_data/raw_3quarter_xmls/'
# files = os.listdir(this_path + xml_path)
# for file in files:
# files_ = os.listdir(this_path + xml_path + '/' + file)
# for i, file_ in enumerate(files_):
# file_name = os.path.splitext(file_)
# make_new_folder(this_path + new_xml_path)
# make_new_folder(this_path + new_img_path)
# old_xml_file_path = this_path + xml_path + '/' + file + '/' + file_name[0] + file_name[1]
# new_xml_file_path = this_path + new_xml_path + '/' + file_name[0] + file_name[1]
# copyh2t(old_xml_file_path, new_xml_file_path)
#
# old_img_file_path = this_path + img_path + '/' + file + '/' + file_name[0] + '.jpg'
# new_img_file_path = this_path + new_img_path + '/' + file_name[0] + '.jpg'
# copyh2t(old_img_file_path, new_img_file_path)
# print(i, '----------done--------')
#
#
# main()
#
#
# def renaming():
# this_path = os.getcwd()
# xml = '/xml'
# images = '/images'
# path = this_path + xml
# xml_path = path + '/'
# user_path = "E:\\Pycharm\\peixun\\2\\images\\"
# i = 0
# files_ = os.listdir(path)
# for file in files_:
# # 得到该一级文件目录文件内的绝对路径
# ext = os.path.splitext(file)
# new_xml_file_name = str(i) + '.xml'
# new_img_file_name = str(i) + '.jpg'
# img_file_path = this_path + images + '/'
# img_file = ext[0] + '.jpg'
# xml_file = xml_path + file
# doc = ET.parse(xml_file)
# root = doc.getroot()
# sub1 = root.find('filename') # 找到filename标签,
# sub1.text = new_img_file_name
# sub2 = root.find('path')
# sub3 = root.find('folder')
# sub2.text = user_path + sub3.text + '\\' + sub1.text
# doc.write(xml_file) # 保存修改
#
# os.chdir(xml_path + '/')
# os.rename(file, new_xml_file_name)
#
# os.chdir(img_file_path)
# os.rename(img_file, new_img_file_name)
# print(i, '----------done--------')
# i += 1
#
#
# # renaming()
``` |
{
"source": "5A4B48/bing-it-on",
"score": 3
} |
#### File: 5A4B48/bing-it-on/bingit.py
```python
import requests
import json
import webbrowser
import time
import random
terms = []
def getterms():
a = requests.get('https://corporatebs-generator.sameerkumar.website/')
b = json.loads(a.content.decode('latin-1'))
c = b.get('phrase').split()
return c
while len(terms) < 45:
d = getterms()
for i in d:
terms.append(i)
for q in terms:
webbrowser.open("https://bing.com/search?q=" + q)
time.sleep(random.randint(1, 8))
``` |
{
"source": "5A4B48/protectwise-lib",
"score": 3
} |
#### File: protectwise-lib/protectwise/__init__.py
```python
import datetime
import getpass
import json
import os
import time
from configparser import ConfigParser
import requests
try:
input = raw_input
except:
pass
homedirectory = os.path.expanduser("~")
def get_times(daydiff):
# Takes an integer and returns a list of start
# and end times converted into the proper format
#//Todo make this more granular to support hours, times, ranges, etc.
daystotime = daydiff * 86400
et = int(datetime.datetime.utcnow().timestamp())
st = et - daystotime
endtime = et * 1000
starttime = st * 1000
return (str(starttime), str(endtime))
def initialize_token():
email = input("Email: ")
password = <PASSWORD>("Password: ")
try:
response = requests.post(
url="https://api.protectwise.com/api/v1/token",
headers={
"Content-Type": "application/json",
},
data=json.dumps({
"email": email,
"password": password
}))
token = json.loads(response.content)['token']
config = ConfigParser()
config.add_section('Token')
config.set('Token', 'token', token)
with open(os.path.join(homedirectory, '.config', 'protectwise.ini'),
"w") as configfile:
config.write(configfile)
except requests.exceptions.RequestException:
print('HTTP Request failed')
def generate_token():
# Get Token from protectwise
# POST https://api.protectwise.com/api/v1/token
if os.path.isdir(os.path.join(homedirectory, '.config')):
if os.path.isfile(os.path.join(homedirectory, '.config', 'protectwise.ini')):
dothis = input(
"Protectwise config file already exists, refresh token? [Y/N]: "
)
if str(dothis).upper().startswith('Y'):
initialize_token()
else:
print("[*] You selected to not refresh token, aborting")
else:
print("[*] creating protectwise configuration file as it does not exist")
initialize_token()
else:
creatdir = input("Directory " + os.path.join(homedirectory, '.config') + " does not exist create"
" it now? [Y/N]:")
if str(creatdir).upper().startswith("Y"):
os.mkdir(os.path.join(homedirectory, '.config'))
print("[+] created directory " + os.path.join(homedirectory, '.config'))
initialize_token()
def get_token():
config = ConfigParser()
config.read(os.path.join(homedirectory, '.config', 'protectwise.ini'))
token = config.get('Token', 'token')
return token
def get_domainReputation(domain):
# Domain Reputation
# GET https://api.protectwise.com/api/v1/reputations/domains/ucar.edu
token = get_token()
try:
response = requests.get(
url="https://api.protectwise.com/api/v1/reputations/domains/" +
domain,
params={
"details": "domain,geo",
},
headers={
"X-Access-Token": token,
}, )
return response.content
except requests.exceptions.RequestException:
print('HTTP Request failed')
def get_ipReputation(ip):
# IP Reputation
# GET https://api.protectwise.com/api/v1/reputations/ips/x.x.x.x
token = get_token()
try:
response = requests.get(
url="https://api.protectwise.com/api/v1/reputations/ips/" + ip,
params={
"details": "ip,geo",
},
headers={
"X-Access-Token": token,
}, )
return response.content
except requests.exceptions.RequestException:
print('HTTP Request failed')
def get_event_info(days):
# Event Collection
# GET https://api.protectwise.com/api/v1/events
# Returns a list of events, the events are dictionarie.
token = get_token()
start, end = get_times(days)
try:
response = requests.get(
url="https://api.protectwise.com/api/v1/events",
params={
"start": start,
"end": end,
},
headers={
"X-Access-Token": token,
}, )
events = json.loads(response.content)['events']
for e in events:
if e['state'] is None:
yield e
except requests.exceptions.RequestException:
print('HTTP Request failed')
def get_pcap(eventid, filename, basedir=os.getcwd()):
# Event Pcap Download
# GET https://api.protectwise.com/api/v1/events/eventid
token = get_token()
try:
response = requests.get(
url="https://api.protectwise.com/api/v1/pcaps/events/" + eventid,
params={
"filename": filename,
},
headers={
"X-Access-Token": token,
}, )
with open(os.path.join(basedir, filename) + '.pcap', 'wb') as f:
f.write(response.content)
except requests.exceptions.RequestException:
print('HTTP Request failed')
``` |
{
"source": "5A59/Zvm",
"score": 3
} |
#### File: base/utils/common_utils.py
```python
import struct
# 主要是给跳转指令用
def get_short_from_bytes(byte1, byte2):
val = (byte1 << 8) | byte2
return struct.unpack('>h', val.to_bytes(2, byteorder='big'))[0]
def get_int_from_bytes(data):
if len(data) == 2:
return struct.unpack('>h', data)[0]
if len(data) == 4:
return struct.unpack('>i', data)[0]
return 0
def get_float_from_bytes(data):
return struct.unpack('>f', data)[0]
def get_long_from_bytes(high, low):
return struct.unpack('>q', high + low)[0]
def get_double_from_bytes(high, low):
return struct.unpack('>d', high + low)[0]
def get_string_from_bytes(data):
return data.decode('utf-8')
```
#### File: base/utils/error_handler.py
```python
def rise_runtime_error(msg):
raise RuntimeError("jvm internal error: " + msg)
def rise_null_point_error():
raise RuntimeError("error: java.lang.NullPointerException")
def rise_class_cast_error():
raise RuntimeError("error: java.lang.ClassCastException")
def rise_error(error):
raise error
class InternalError(Exception):
pass
class OutOfMemoryError(Exception):
pass
class StackOverflowError(Exception):
pass
class UnknownError(Exception):
pass
```
#### File: Zvm/interpreter/interpreter.py
```python
from runtime.thread import Thread, Frame, JavaStack
from runtime.jclass import Method
from base.utils import print_utils, error_handler
from instruction import instruction
from interpreter.code_parser import CodeParser
from jgc.gc import GC
import threading
class Interpreter(object):
def __init__(self):
self.thread = None
def run(self, method):
print_utils.print_jvm_status(threading.currentThread.__name__)
print_utils.print_jvm_status('\n=================== running status =====================\n')
self.thread = Thread.new_thread()
thread = self.thread
frame = Frame(thread, method)
thread.add_frame(frame)
code_parser = CodeParser(method.code)
while True:
if not thread.has_frame():
break
GC.check_gc()
frame = thread.top_frame()
method = frame.method
code_parser.reset(method.code, frame.pc)
ins_code = code_parser.read_code()
print_utils.print_jvm_status('ins_code: %x' % ins_code)
ins = instruction.get_instruction(ins_code)
ins.read_operands(code_parser)
thread.pc = frame.pc # 保存上一条 pc
frame.pc = code_parser.pc
ins.execute_wrapper(frame)
print_utils.print_jvm_status('\n=================== output =====================')
print_utils.StreamPrinter.print_all(thread)
Thread.finish_thread(thread)
@staticmethod
def exec_method(method):
m_interpreter = Interpreter()
m_interpreter.run(method)
```
#### File: Zvm/runtime/jclass.py
```python
from base.utils import common_utils, print_utils
from java_class.class_file import *
from java_class.class_parser import ClassParser
from runtime.thread import Slot
from runtime.heap import Heap
from base.jvm_config import jdk_path
import os
# 对应 java 中的 Class
class JClass(object):
ACC_PUBLIC = 0x0001
ACC_PRIVATE = 0x0002
ACC_PROTECTED = 0x0004
ACC_STATIC = 0x0008
ACC_FINAL = 0x0010
ACC_VOLATILE = 0x0040
ACC_TRANSIENT = 0x0080
ACC_SYNTHETIC = 0x1000
ACC_ENUM = 0x4000
@staticmethod
def is_public(flag):
return flag & JClass.ACC_PUBLIC != 0
@staticmethod
def is_private(flag):
return flag & JClass.ACC_PRIVATE != 0
@staticmethod
def is_static(flag):
return flag & JClass.ACC_STATIC != 0
def __init__(self):
self.access_flag = None
self.name = None
self.super_class_name = None
self.super_class = None
self.interfaces = None
self.fields = None
self.methods = None
self.constant_pool = None
self.class_loader = None
self.static_fields = None # map{ name: Slot }
self.has_inited = False
def new_jclass(self, class_file):
Heap.new_jclass(self)
self.access_flag = common_utils.get_int_from_bytes(class_file.access_flag)
self.constant_pool = ConstantPool.new_constant_pool(class_file)
self.fields = Field.new_fields(class_file.fields, self.constant_pool.constants)
self.methods = Method.new_methods(self, class_file.methods, self.constant_pool.constants)
super_class = self.constant_pool.constants[common_utils.get_int_from_bytes(class_file.super_class)]
if super_class is not None:
self.super_class_name = super_class.class_name # 从方法区取
self.interfaces = None
self.static_fields = {}
for sf in self.__get_static_fields():
desc = sf.descriptor
slot = Slot()
if desc == 'B' or desc == 'I' or desc == 'J' or desc == 'S' or desc == 'Z':
slot.num = 0
elif desc == 'C':
slot.num = '0'
elif desc == 'F':
slot.num = 0.0
elif desc == 'D':
slot.num = 0.0
self.static_fields[sf.name] = slot
def get_instance_fields(self):
return [field for field in self.fields if not JClass.is_static(field.access_flag)]
# return Field[]
def __get_static_fields(self):
return [field for field in self.fields if JClass.is_static(field.access_flag)]
def get_main_method(self):
methods = self.methods
for method in methods:
if method.name == 'main' and method.descriptor == '([Ljava/lang/String;)V':
return method
return None
class ConstantPool(object):
def __init__(self):
self.constants = None
@staticmethod
def new_constant_pool(class_file):
r_cp = class_file.constant_pool
constants = []
for cp in r_cp:
if isinstance(cp, ClassInfo):
constants.append(ClassRef.new_class_ref(r_cp, cp))
elif isinstance(cp, FieldRefInfo):
constants.append(FieldRef.new_field_ref(r_cp, cp))
elif isinstance(cp, MethodRefInfo):
constants.append(MethodRef.new_method_ref(r_cp, cp))
elif isinstance(cp, InterfaceMethodRefInfo):
constants.append(None)
elif isinstance(cp, StringInfo):
st = r_cp[common_utils.get_int_from_bytes(cp.string_index)]
st = common_utils.get_string_from_bytes(st.bytes)
jstring = JString()
jstring.data = st
constants.append(jstring)
elif isinstance(cp, IntegerInfo):
jint = JInteger()
jint.data = common_utils.get_int_from_bytes(cp.bytes)
constants.append(jint)
elif isinstance(cp, FloatInfo):
jfloat = JFloat()
jfloat.data = common_utils.get_float_from_bytes(cp.bytes)
constants.append(jfloat)
elif isinstance(cp, LongInfo):
jlong = JLong()
jlong.data = common_utils.get_long_from_bytes(cp.high_bytes, cp.low_bytes)
constants.append(jlong)
elif isinstance(cp, DoubleInfo):
jdouble = JDouble()
jdouble.data = common_utils.get_double_from_bytes(cp.high_bytes, cp.low_bytes)
constants.append(jdouble)
elif isinstance(cp, NameAndTypeInfo):
constants.append(None)
elif isinstance(cp, Utf8Info):
constants.append(common_utils.get_string_from_bytes(cp.bytes))
elif isinstance(cp, MethodHandleInfo):
constants.append(None)
else:
constants.append(None)
constants_pool = ConstantPool()
constants_pool.constants = constants
return constants_pool
def get_attribute(attributes, constant_pool, name):
for attr in attributes:
index = common_utils.get_int_from_bytes(attr.attribute_name_index)
aname = constant_pool[index]
if aname == name:
return attr
return None
class Field(object):
def __init__(self):
self.access_flag = None
self.name = None
self.descriptor = None
self.descriptor_index = None
self.constant_value_index = None
self.signature = None # 记录范型变量
self.type = None # JClass
def is_public(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_PUBLIC)
def is_protected(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_PROTECTED)
def is_private(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_PRIVATE)
def is_static(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_STATIC)
def is_final(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_FINAL)
def is_volatile(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_VOLATILE)
def is_transient(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_TRANSIENT)
@staticmethod
def new_fields(cf_fields, constant_pool):
fields = []
for f in cf_fields:
nf = Field()
nf.access_flag = common_utils.get_int_from_bytes(f.access_flags)
nf.name = constant_pool[common_utils.get_int_from_bytes(f.name_index)]
nf.descriptor_index = common_utils.get_int_from_bytes(f.descriptor_index)
nf.descriptor = constant_pool[nf.descriptor_index]
attr = get_attribute(f.attributes, constant_pool, 'ConstantValue')
if attr is not None:
nf.constant_value_index = common_utils.get_int_from_bytes(attr.constant_value_index)
fields.append(nf)
return fields
class Method(object):
def __init__(self):
self.access_flag = None
self.name = None
self.descriptor = None
self.max_stack = None
self.max_locals = None
self.code = None
self.exceptions = None # ExceptionTable[]
self.arg_desc = None
self.jclass = None
@staticmethod
def new_methods(jclass, cf_methods, constant_pool):
methods = []
for m in cf_methods:
nm = Method()
nm.jclass = jclass
nm.access_flag = common_utils.get_int_from_bytes(m.access_flags)
nm.name = constant_pool[common_utils.get_int_from_bytes(m.name_index)]
nm.descriptor = constant_pool[common_utils.get_int_from_bytes(m.descriptor_index)]
attr = get_attribute(m.attributes, constant_pool, 'Code')
nm.max_stack = common_utils.get_int_from_bytes(attr.max_stack)
nm.max_locals = common_utils.get_int_from_bytes(attr.max_locals)
nm.code = attr.code
nm.exceptions = []
for ex in attr.exception_table:
jex = JException()
jex.start_pc = common_utils.get_int_from_bytes(ex.start_pc)
jex.end_pc = common_utils.get_int_from_bytes(ex.end_pc)
jex.handler_pc = common_utils.get_int_from_bytes(ex.handler_pc)
jex.catch_type = common_utils.get_int_from_bytes(ex.catch_type)
nm.exceptions.append(jex)
nm.arg_desc = Method.get_arg_desc(nm.descriptor)
methods.append(nm)
return methods
@staticmethod
def get_arg_desc(descs):
arg_desc = []
desc = ''
for s in descs:
if s == ')':
break
if len(desc) == 0:
if s == 'B' or s == 'C' or s == 'D' or s == 'F' or s == 'I' or s == 'J' or s == 'S' or s == 'Z':
desc = s
arg_desc.append(desc)
desc = ''
elif s == 'L':
desc += s
else:
if desc[0] == 'L':
desc += s
if s == ';':
arg_desc.append(desc)
desc = ''
elif desc[0] == '[':
if 'L' in desc:
desc += s
if s == ';':
arg_desc.append(desc)
desc = ''
else:
desc += s
if s != '[':
arg_desc.append(desc)
desc = ''
return arg_desc
class JException(object):
def __init__(self):
self.start_pc = 0
self.end_pc = 0
self.handler_pc = 0
self.catch_type = 0
class Ref(object):
def __init__(self):
self.cp = None
self.class_name = None
self.cache_class = None # JClass
def resolve_class(self, class_loader, need_re_resolve=False, class_name=None):
if self.cache_class is not None and not need_re_resolve:
return self.cache_class
if class_loader is None:
class_loader = ClassLoader.default_class_loader()
if class_name is None:
class_name = self.class_name
self.cache_class = class_loader.load_class(class_name)
self.cache_class.class_loader = class_loader
return self.cache_class
class ClassRef(Ref):
def __init__(self):
super(ClassRef, self).__init__()
@staticmethod
def new_class_ref(cp, class_info): # ConstantPool ConstantClassInfo
cr = ClassRef()
cr.cp = cp
tmp = cp[common_utils.get_int_from_bytes(class_info.name_index)]
cr.class_name = common_utils.get_string_from_bytes(tmp.bytes)
return cr
class MemberRef(Ref):
def __init__(self):
super(MemberRef, self).__init__()
self.name = None
self.descriptor = None
self.access_flag = None
@staticmethod
def check_state(flag, state):
if flag is None:
return False
return (flag & state) != 0
@staticmethod
def get_string(cp, index_byte):
return common_utils.get_string_from_bytes(cp[common_utils.get_int_from_bytes(index_byte)].bytes)
@staticmethod
def get_obj(cp, index_byte):
return cp[common_utils.get_int_from_bytes(index_byte)]
class FieldRef(MemberRef):
ACC_PUBLIC = 0x0001
ACC_PRIVATE = 0x0002
ACC_PROTECTED = 0x0004
ACC_STATIC = 0x0008
ACC_FINAL = 0x0010
ACC_VOLATILE = 0x0040
ACC_TRANSIENT = 0x0080
ACC_SYNTHETIC = 0x1000
ACC_ENUM = 0x4000
def __init__(self):
super(FieldRef, self).__init__()
self.field = None
def is_public(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_PUBLIC)
def is_protected(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_PROTECTED)
def is_private(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_PRIVATE)
def is_static(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_STATIC)
def is_final(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_FINAL)
def is_volatile(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_VOLATILE)
def is_transient(self):
return MemberRef.check_state(self.access_flag, FieldRef.ACC_TRANSIENT)
@staticmethod
def new_field_ref(cp, field_ref_info):
fr = FieldRef()
cl = cp[common_utils.get_int_from_bytes(field_ref_info.class_index)]
fr.class_name = MemberRef.get_string(cp, cl.name_index)
fr.cp = cp
name_and_type = MemberRef.get_obj(cp, field_ref_info.name_and_type_index)
fr.name = MemberRef.get_string(cp, name_and_type.name_index)
fr.descriptor = MemberRef.get_string(cp, name_and_type.descriptor_index)
return fr
def resolve_field(self, class_loader):
if self.field is not None:
return self.field
if self.cache_class is None:
self.resolve_class(class_loader)
fields = self.cache_class.fields
for f in fields:
if f.name == self.name:
self.field = f
break
return self.field
class MethodRef(MemberRef):
def __init__(self):
super(MethodRef, self).__init__()
self.method = None
@staticmethod
def new_method_ref(cp, method_ref_info):
mr = MethodRef()
cl = cp[common_utils.get_int_from_bytes(method_ref_info.class_index)]
mr.class_name = MemberRef.get_string(cp, cl.name_index)
mr.cp = cp
name_and_type = MemberRef.get_obj(cp, method_ref_info.name_and_type_index)
mr.name = MemberRef.get_string(cp, name_and_type.name_index)
mr.descriptor = MemberRef.get_string(cp, name_and_type.descriptor_index)
return mr
# TODO: 方法权限等的处理
def resolve_method(self, class_loader, need_re_resolve=False, class_name=None):
if self.method is not None and not need_re_resolve:
return self.method
if self.cache_class is None or need_re_resolve:
self.resolve_class(class_loader, need_re_resolve, class_name)
methods = self.cache_class.methods
for m in methods:
if m.name == self.name and m.descriptor == self.descriptor:
self.method = m
break
return self.method
def resolve_method_with_super(self, class_loader):
self.resolve_method(class_loader, True)
if self.method is None:
super_class = self.cache_class.super_class
while super_class is not None:
for m in super_class.methods:
if m.name == self.name:
self.method = m
break
if self.method is not None:
break
super_class = super_class.super_class
return self.method
def re_resolve_method_with_super_by_class_name(self, class_loader, class_name):
self.resolve_method(class_loader, True, class_name)
if self.method is None:
super_class = self.cache_class.super_class
while super_class is not None:
for m in super_class.methods:
if m.name == self.name:
self.method = m
break
if self.method is not None:
break
super_class = super_class.super_class
return self.method
class BaseType(object):
def __init__(self):
self.data = None
class JInteger(BaseType):
def __init__(self):
super(JInteger, self).__init__()
class JFloat(BaseType):
def __init__(self):
super(JFloat, self).__init__()
class JLong(BaseType):
def __init__(self):
super(JLong, self).__init__()
class JDouble(BaseType):
def __init__(self):
super(JDouble, self).__init__()
class JString(BaseType):
def __init__(self):
super(JString, self).__init__()
# TODO: 感觉还是应该分一个包出去
class ClassLoader(object):
default_loader = None
def __init__(self):
self._loading_classes = []
self._loaded_classes = {}
self.pkg_path = jdk_path
self.hack()
def get_all_loaded_class(self):
return self._loading_classes
def hack(self):
# 先提前 load
self.load_class('java/lang/Object')
@staticmethod
def default_class_loader():
# TODO: 线程同步
if ClassLoader.default_loader is None:
ClassLoader.default_loader = ClassLoader()
return ClassLoader.default_loader
def add_path(self, path):
self.pkg_path.append(path)
# TODO: jar zip 处理
def load_class(self, class_name):
# TODO: load class 线程之间同步 暂时轮询
if class_name in self._loading_classes:
while True:
if class_name not in self._loading_classes:
break
if class_name in self._loaded_classes:
return self._loaded_classes[class_name]
jclass = self.__load_class(class_name)
self._loading_classes.remove(class_name)
return jclass
def __load_class(self, class_name):
self._loading_classes.append(class_name)
if class_name[0] == '[':
return self.__load_array_class(class_name)
for path in self.pkg_path:
class_path = path + class_name.replace('.', '/') + '.class'
if not os.path.exists(class_path):
continue
print_utils.print_jvm_status('load class: ' + class_path)
jclass = self.define_class(class_name, class_path)
self._loaded_classes[class_name] = jclass
return jclass
return None
def __load_array_class(self, class_name):
jclass = JClass()
jclass.super_class_name = 'java/lang/Object'
jclass.class_loader = self
jclass.has_inited = True
jclass.name = class_name
self._loaded_classes[class_name] = jclass
def define_class(self, class_name, path):
parser = ClassParser(path)
parser.parse()
jclass = JClass()
jclass.name = class_name
jclass.new_jclass(parser.class_file)
jclass.super_class = self.load_super_class(jclass)
return jclass
def load_super_class(self, jclass):
if jclass.super_class_name == 'java/lang/Object' or jclass.super_class_name is None:
return
return self.load_class(jclass.super_class_name)
```
#### File: Zvm/test/test.py
```python
from runtime.jclass import ClassLoader
from runtime.jclass import JClass
from interpreter import interpreter
def main():
loader = ClassLoader()
j_class = loader.load_class('Hello')
print(j_class)
method = j_class.get_main_method()
interpreter.Interpreter.exec_method(method)
if __name__ == '__main__':
main()
```
#### File: 5A59/Zvm/Zvm.py
```python
import path_import
import sys
from runtime.jclass import ClassLoader
from interpreter import interpreter
def parse_params():
args = sys.argv
if len(args) <= 1:
print('use: python Zvm.py xx[.class]')
print('eg: python Zvm.py main')
print('eg: python Zvm.py main.class')
return None
name = sys.argv[1]
if name.endswith('.class'):
name = name[:name.find('.class')]
return name
def main():
class_file = parse_params()
if class_file is None:
return
loader = ClassLoader()
j_class = loader.load_class(class_file)
print(j_class)
method = j_class.get_main_method()
interpreter.Interpreter.exec_method(method)
if __name__ == '__main__':
main()
``` |
{
"source": "5A5H/PyFEMP",
"score": 3
} |
#### File: PyFEMP/elements/Laplace_Q1.py
```python
import numpy as np
def Elmt_Init():
'''
The initialization function of a PyFEMP element.
'''
NoElementDim = 2 # Number of dimensions
NoElementNodes = 4 # Number of nodes on the element (here: 4-Node Quadirlateral)
NoElementHistory = 0 # Number of history variables
ElementDofNames = ["T"] # Element degrees of freedom specified by a string for each scalar
ElementMaterialNames = ["alpha_q"] # Element material parameters specified by a string for each scalar
ElementPostNames = ["T"] # Element postprocessing parameters
return NoElementDim, NoElementNodes, ElementDofNames, NoElementHistory, ElementMaterialNames, ElementPostNames
def SH0_Q1(xi, eta):
'''
SH0_Q1(xi, eta) -> SH0
Return a two dimensional array containing shape functions and derivatives for Q1 element.
Usage: SH0( NodeNumber, SHPIndex)
with SHPIndex = {
0 -> shape function,
1 -> derived shape function w.r.t. xi,
2 -> derived shape function w.r.t. eta
}
'''
return 1/4 * np.array([
[(1.0-xi)*(1.0-eta), -(1.0-eta), -(1.0-xi)],
[(1.0+xi)*(1.0-eta), (1.0-eta), -(1.0+xi)],
[(1.0+xi)*(1.0+eta), (1.0+eta), (1.0+xi)],
[(1.0-xi)*(1.0+eta), -(1.0+eta), (1.0-xi)]
], dtype=np.float64)
def Elmt_KS(XL, UL, Hn, Ht, Mat, dt):
'''
This function returns element vector and matrix.
Generally the element matrix is the straight derivative
of the element vector with respect to the nodal degrees of freedoms,
in the order of the UL field.
Both need to be returned as numpy arrays, with dimensions:
element vector r_e : number of nodal degrees of freedom times number of nodes
element matrix k_e : NoNodes * NoNodalDOF ,NoNodes * NoNodalDOF
'''
verbose = False # True;
if verbose: print('XI :',XL)
if verbose: print('UI :',UL)
if verbose: print('Hn :',Hn)
if verbose: print('Ht :',Ht)
if verbose: print('b :',Mat)
if verbose: print('dt :',dt)
# element parameters
NoElmtNodes = 4
NoNodalDOF = 1
NoDim = 2
# initialize element vector /matrix
r_e = np.zeros(NoElmtNodes*NoNodalDOF)
k_e = np.zeros((NoElmtNodes*NoNodalDOF, NoElmtNodes*NoNodalDOF))
# geometry and dofs
XI = XL.reshape(-1,NoDim) # reformat to a matrix field: XI = [[X1x, X1y] ,[X2x, X2y] ,[X3x, X3y] ,[X4x, X4y]]
TI = UL
# Material Parameters
alpha_q = Mat[0]
# Provide integration points
a = 1/np.sqrt(3)
EGP = np.array([[-a,-a,1],[a,-a,1],[a,a,1],[-a,a,1]])
NoInt = len(EGP)
# Start integration Loop
for GP in range(NoInt):
if verbose: print('GP: ',GP)
xi, eta, wgp = EGP[GP]
# compute current shape functions
SH0 = SH0_Q1(xi, eta)
# compute mapping
Jed = np.einsum('Ii,Ij->ij', XI ,SH0[:,1:3])
detJ = np.linalg.det(Jed)
if (detJ <= 0): raise NameError("Error unphysical mapping detected.")
if verbose: print('detJ: ',detJ)
Jed_inv = np.linalg.inv(Jed)
# map shape function derivative
SHP = np.copy(SH0)
SHP[:,1:3] = np.einsum('Ij,ji->Ii', SH0[:,1:3], Jed_inv)
# compute gradient of temperature / heat flux
gradT = np.einsum('Ii,I->i' , SHP[:,1:3], TI)
if verbose: print('gradT: ')
if verbose: print(gradT)
q = - alpha_q * gradT
# export element vector
for I in range(NoElmtNodes):
# integrate nodal right hand side and export
r_e[I*NoNodalDOF+0] += q.dot(SHP[I,1:3]) * wgp * detJ
for J in range(NoElmtNodes):
# compute nodal stiffness matrix
k_e[I*NoNodalDOF+0, J*NoNodalDOF+0] += - alpha_q * SHP[J,1:3].dot( SHP[I,1:3])* wgp * detJ
return r_e, k_e
def Elmt_Post(XL, UL, Hn, Ht, Mat, dt, PostName):
'''
'''
if (PostName=="T"):
r_post = np.array([UL[0], UL[1], UL[2], UL[3]])
return r_post
else:
print("Waring: PostName "+PostName+" not defined!")
return np.array([0.0, 0.0, 0.0, 0.0])
```
#### File: PyFEMP/elements/LE_T1.py
```python
import numpy as np
def Elmt_Init():
NoElementDim = 2
NoElementNodes = 3
NoElementHistory = 0
ElementDofNames = ["UX", "UY"]
ElementMaterialNames = ["E", "nu"]
ElementPostNames = ["UX", "UY", "SigMises"]
return NoElementDim, NoElementNodes, ElementDofNames, NoElementHistory, ElementMaterialNames, ElementPostNames
def SH0_T1(xi, eta):
'''
SH0_T1(xi, eta) -> SH0
Return a two dimensional array containing shape functions and derivatives for T1 element.
Usage: SH0( NodeNumber, SHPIndex)
with SHPIndex = {
0 -> shape function,
1 -> derived shape function w.r.t. xi,
2 -> derived shape function w.r.t. eta
}
'''
return np.array([
[xi , 1.0, 0.0],
[eta , 0.0, 1.0],
[1 -xi -eta, -1.0, -1.0],
], dtype=np.float64)
def BmatVoigt2D(SHP):
'''
BmatVoigt(SHP) -> Bmat
Returns a B-Matrix (as dim:3) for computing the strain vector in voigt notation.
This B-Matrix assumes a 2D plane strain approximation.
The is a shape function matrix with derived functions w.r.t. physical space.
Input:
SHP( NodeNumber, SHPIndex)
with SHPIndex = {
0 -> shape function,
1 -> derived shape function w.r.t. x,
2 -> derived shape function w.r.t. y
}
Output:
Bmat(NodeNumber, i, j)
for eps_i = B_Iij * u_Ij
with
eps_i = [eps_11, eps_22, eps_33, 2*eps_12, 2*eps_23, 2*eps_13]
u_Ij = [[u_11, u_12] ... [u_n1, u_n2]]
'''
return np.array([
[
[N[1], 0 ],
[0 , N[2]],
[0 , 0 ],
[N[2], N[1]],
[0 , 0 ],
[0 , 0 ]
]
for N in SHP
], dtype=np.float64)
def HookeMatVoigt(lam, mue):
'''
HookeMatVoigt(lam, mue) -> Cmat
Returns the constitutive Voigt MATRIX(6,6) for a Hooke material law.
The input are the elastic Lame constants.
sig_i = Cmat_ij * eps_j
with
sig_i = [sig_11, sig_22, sig_33, sig_12, sig_23, sig_13]
eps_i = [eps_11, eps_22, eps_33, 2*eps_12, 2*eps_23, 2*eps_13]
'''
return np.array([
[lam + 2* mue, lam , lam , 0 , 0 , 0 ],
[lam , lam + 2* mue, lam ,0 , 0 , 0 ],
[lam , lam , lam + 2* mue, 0 , 0 , 0 ],
[0 , 0 , 0 , mue, 0 , 0 ],
[0 , 0 , 0 , 0 , mue, 0 ],
[0 , 0 , 0 , 0 , 0 , mue]
], dtype=np.float64)
def Elmt_KS(XL, UL, Hn, Ht, Mat, dt):
'''
'''
verbose = False # True;
if verbose: print('XI :',XL)
if verbose: print('UI :',UL)
if verbose: print('Hn :',Hn)
if verbose: print('Ht :',Ht)
if verbose: print('b :',Mat)
if verbose: print('dt :',dt)
# element specific paremeters
NoElementNodes = 3
NoNodalDOF = 2
NoDimension = 2
# initialize element vector /matrix
r_e = np.zeros(NoElementNodes*NoNodalDOF)
k_e = np.zeros((NoElementNodes*NoNodalDOF, NoElementNodes*NoNodalDOF))
# geometry and dofs
XI = XL.reshape(-1,NoDimension)
uI = UL.reshape(-1,NoNodalDOF)
# Material Parameters
Emod, nu = Mat[0], Mat[1]
lam , mue = (Emod*nu)/((1.0+nu)*(1.0-2.0*nu)), Emod/(2.0*(1.0+nu))
# constitutive matrix (hooke)
Cmat = HookeMatVoigt(lam, mue)
# Provide integration points
EGP = 1.0/6.0 * np.array([[1.0, 1.0, 1.0], [4.0, 1.0, 1.0], [1.0, 4.0, 1.0]])
NoInt = len(EGP)
# Start integration Loop
for GP in range(NoInt):
if verbose: print('GP: ',GP)
xi, eta, wgp = EGP[GP]
# compute current shape functions
SH0 = SH0_T1(xi, eta)
# compute mapping
Jed = np.einsum('Ii,Ij->ij', XI ,SH0[:,1:3])
detJ = np.linalg.det(Jed)
if (detJ <= 0): raise NameError("Error unphysical mapping detected.")
if verbose: print('detJ: ',detJ)
Jed_inv = np.linalg.inv(Jed)
# map shape function derivative
SHP = np.copy(SH0)
SHP[:,1:3] = np.einsum('Ij,ji->Ii', SH0[:,1:3], Jed_inv)
Bmat = BmatVoigt2D(SHP)
# compute strains / stresses
eps = np.einsum('Iij,Ij->i', Bmat, uI)
if verbose: print('eps: ')
if verbose: print(np.array([eps[0], eps[3]/2, eps[4]/2, eps[3]/2, eps[1], eps[5]/2, eps[4]/2, eps[5]/2, eps[2]]))
sig = np.einsum('ij,j->i' , Cmat, eps)
if verbose: print('sig: ')
if verbose: print(np.array([[sig[0], sig[3], sig[4]],[sig[3], sig[1], sig[5]],[sig[4], sig[5], sig[2]]]))
# export right hand side | this element has 4 nodes with 2 dofs each
for I in range(NoElementNodes):
# compute nodal right hand side
nodal_rhs_vec = np.einsum('i,ij->j',sig, Bmat[I])
# integrate nodal right hand side and export
r_e[I*2+0] += nodal_rhs_vec[0] * wgp * detJ
r_e[I*2+1] += nodal_rhs_vec[1] * wgp * detJ
for J in range(NoElementNodes):
# compute nodal stiffness matrix
nodal_stiffness = np.einsum('ki,ko,oj->ij', Bmat[I], Cmat, Bmat[J])
k_e[I*2+0, J*2+0] += nodal_stiffness[0,0] * wgp * detJ
k_e[I*2+0, J*2+1] += nodal_stiffness[0,1] * wgp * detJ
k_e[I*2+1, J*2+0] += nodal_stiffness[1,0] * wgp * detJ
k_e[I*2+1, J*2+1] += nodal_stiffness[1,1] * wgp * detJ
return r_e, k_e
def Elmt_Post(XL, UL, Hn, Ht, Mat, dt, PostName):
'''
'''
## NEW post strategy is to return indexes
# and contribuzions
# 3 nodes
r_post = np.zeros(3)
# geometry and dofs
XI = XL.reshape(-1,2)
uI = UL.reshape(-1,2)
# Material Parameters
Emod, nu = Mat[0], Mat[1]
lam , mue = (Emod*nu)/((1.0+nu)*(1.0-2.0*nu)), Emod/(2.0*(1.0+nu))
# constitutive matrix (hooke)
Cmat = HookeMatVoigt(lam, mue)
# Provide integration points
EGP = 1.0/6.0 * np.array([[1.0, 1.0, 1.0], [4.0, 1.0, 1.0], [1.0, 4.0, 1.0]])
NoInt = len(EGP)
# Start integration Loop
for GP in range(NoInt):
xi, eta, wgp = EGP[GP]
# compute current shape functions
SH0 = SH0_T1(xi, eta)
# compute mapping
Jed = np.einsum('Ii,Ij->ij', XI ,SH0[:,1:3])
detJ = np.linalg.det(Jed)
if (detJ <= 0): raise NameError("Error unphysical mapping detected.")
Jed_inv = np.linalg.inv(Jed)
# map shape function derivative
SHP = np.copy(SH0)
SHP[:,1:3] = np.einsum('Ij,ji->Ii', SH0[:,1:3], Jed_inv)
Bmat = BmatVoigt2D(SHP)
# compute strains / stresses
eps = np.einsum('Iij,Ij->i', Bmat, uI)
sig = np.einsum('ij,j->i' , Cmat, eps)
sig_vm = np.sqrt( \
sig[0]**2 + sig[1]**2 + sig[2]**2 \
-sig[0]*sig[1] -sig[0]*sig[2] -sig[1]*sig[2] \
+ 3* (sig[3]**2 + sig[4]**2 + sig[5]**2) \
)
if (PostName=="SigMises"):
r_post += sig_vm * SHP[:,0]
if (PostName=="UX"):
r_post = np.array([UL[0], UL[2], UL[4]])
return r_post
elif (PostName=="UY"):
r_post = np.array([UL[1], UL[3], UL[5]])
return r_post
elif (PostName=="SigMises"):
return r_post
else:
print("Waring: PostName "+PostName+" not defined!")
return np.array([0.0, 0.0, 0.0, 0.0])
```
#### File: PyFEMP/elements/T1_J2.py
```python
import numpy as np
def Elmt_Init():
NoElementDim = 2
NoElementNodes = 3
NoElementHistory = 7
ElementDofNames = ["UX", "UY"]
ElementMaterialNames = ["E", "nu", "y0", "kh"]
ElementPostNames = ["UX", "UY", "SigMises", "a"]
return NoElementDim, NoElementNodes, ElementDofNames, NoElementHistory, ElementMaterialNames, ElementPostNames
def SH0_T1(xi, eta):
'''
SH0_T1(xi, eta) -> SH0
Return a two dimensional array containing shape functions and derivatives for T1 element.
Usage: SH0( NodeNumber, SHPIndex)
with SHPIndex = {
0 -> shape function,
1 -> derived shape function w.r.t. xi,
2 -> derived shape function w.r.t. eta
}
'''
return np.array([
[xi , 1.0, 0.0],
[eta , 0.0, 1.0],
[1 -xi -eta, -1.0, -1.0],
], dtype=np.float64)
def BmatVoigt2D(SHP):
'''
BmatVoigt(SHP) -> Bmat
Returns a B-Matrix (as dim:3) for computing the strain vector in voigt notation.
This B-Matrix assumes a 2D plane strain approximation.
SHP is a shape function matrix with derived functions w.r.t. physical space.
Input:
SHP( NodeNumber, SHPIndex)
with SHPIndex = {
0 -> shape function,
1 -> derived shape function w.r.t. x,
2 -> derived shape function w.r.t. y
}
Output:
Bmat(NodeNumber, i, j)
for eps_i = B_Iij * u_Ij
with
eps_i = [eps_11, eps_22, eps_33, 2*eps_12, 2*eps_23, 2*eps_13]
u_Ij = [[u_11, u_12] ... [u_n1, u_n2]]
'''
return np.array([
[
[N[1], 0 ],
[0 , N[2]],
[0 , 0 ],
[N[2], N[1]],
[0 , 0 ],
[0 , 0 ]
]
for N in SHP
], dtype=np.float64)
def HookeMatVoigt(kappa, mue):
'''
HookeMatVoigt(kappa, mue) -> Cmat
Returns the constitutive Voigt MATRIX(6,6) for a Hooke material law.
The input are the kompression modulus kappa and the shear modulus mue.
sig_i = Cmat_ij * eps_j
with
sig_i = [sig_11, sig_22, sig_33, sig_12, sig_23, sig_13]
eps_i = [eps_11, eps_22, eps_33, 2*eps_12, 2*eps_23, 2*eps_13]
'''
return np.array([
[kappa+(4.0/3.0)*mue, kappa-(2.0/3.0)*mue, kappa-(2.0/3.0)*mue, 0 , 0 , 0 ],
[kappa-(2.0/3.0)*mue, kappa+(4.0/3.0)*mue, kappa-(2.0/3.0)*mue, 0 , 0 , 0 ],
[kappa-(2.0/3.0)*mue, kappa-(2.0/3.0)*mue, kappa+(4.0/3.0)*mue, 0 , 0 , 0 ],
[0 , 0 , 0 , mue, 0 , 0 ],
[0 , 0 , 0 , 0 , mue, 0 ],
[0 , 0 , 0 , 0 , 0 , mue]
], dtype=np.float64)
# definition of auxillary matrix
pp1, pp2, pp3 = (2.0/3.0), -(1.0/3.0), (1.0/2.0)
PP = np.array( \
[[pp1, pp2, pp2, 0.0, 0.0, 0.0], \
[ pp2, pp1, pp2, 0.0, 0.0, 0.0], \
[ pp2, pp2, pp1, 0.0, 0.0, 0.0], \
[ 0.0, 0.0, 0.0, pp3, 0.0, 0.0], \
[ 0.0, 0.0, 0.0, 0.0, pp3, 0.0], \
[ 0.0, 0.0, 0.0, 0.0, 0.0, pp3]] \
,dtype=np.float64)
def Elmt_KS(XL, UL, Hn, Ht, Mat, dt):
'''
'''
verbose = False # True;
if verbose: print('XI :',XL) # XL = [x11, x12, x21, x22, x31, x32]
if verbose: print('UI :',UL) # UL = [u11, u12, u21, u22, u31, u32]
if verbose: print('Hn :',Hn) # Hn = [eps_pl_11, eps_pl_22, eps_pl_33, 2*eps_pl_12, 2*eps_pl_23, 2*eps_pl_13, a]
if verbose: print('Ht :',Ht) # Ht = [eps_pl_11, eps_pl_22, eps_pl_33, 2*eps_pl_12, 2*eps_pl_23, 2*eps_pl_13, a]
if verbose: print('b :',Mat) # Mat = [Emod, nue, y0, kh]
if verbose: print('dt :',dt) # dt = dt
# element specific paremeters
NoElementNodes = 3
NoNodalDOF = 2
NoDimension = 2
# initialize element vector /matrix
r_e = np.zeros(NoElementNodes*NoNodalDOF)
k_e = np.zeros((NoElementNodes*NoNodalDOF, NoElementNodes*NoNodalDOF))
# geometry and dofs
XI = XL.reshape(-1, NoDimension)
uI = UL.reshape(-1, NoNodalDOF)
# Material Parameters
Emod, nu, y0, kh = Mat[0], Mat[1], Mat[2], Mat[3]
kappa , mue = Emod/(3*(1.0-2.0*nu)), Emod/(2.0*(1.0+nu))
# constitutive matrix (hooke)
Cmat = HookeMatVoigt(kappa, mue)
# provide integration points
EGP = np.array([[(1.0/3.0), (1.0/3.0), (1.0/2.0)]])
NoInt = len(EGP)
# start integration Loop
for GP in range(NoInt):
if verbose: print('GP: ',GP)
xi, eta, wgp = EGP[GP]
# read gp history
NoHGP = 7 # number of history at each gp
eps_pl_n = Hn[ GP*NoHGP : GP*NoHGP+6]
a_n = Hn[ GP*NoHGP+6]
# compute current shape functions
SH0 = SH0_T1(xi, eta)
# compute mapping
Jed = np.einsum('Ii,Ij->ij', XI ,SH0[:,1:3])
detJ = np.linalg.det(Jed)
if (detJ <= 0): raise NameError("Error unphysical mapping detected.")
if verbose: print('detJ: ',detJ)
Jed_inv = np.linalg.inv(Jed)
# map shape function derivative
SHP = np.copy(SH0)
SHP[:,1:3] = np.einsum('Ij,ji->Ii', SH0[:,1:3], Jed_inv)
Bmat = BmatVoigt2D(SHP)
# compute strains / stresses
eps = np.einsum('Iij,Ij->i', Bmat, uI)
if verbose: print('eps: ')
if verbose: print(np.array([[eps[0], eps[3]/2, eps[4]/2], [eps[3]/2, eps[1], eps[5]/2], [eps[4]/2, eps[5]/2, eps[2]]]))
###############################################
############# begin plastic part ##############
###############################################
# compute elastic trail stresses
eps_el_tr = eps - eps_pl_n
if verbose: print('eps_el_tr: ')
if verbose: print(np.array([[eps_el_tr[0], eps_el_tr[3]/2, eps_el_tr[4]/2], [eps_el_tr[3]/2, eps_el_tr[1], eps_el_tr[5]/2], [eps_el_tr[4]/2, eps_el_tr[5]/2, eps_el_tr[2]]]))
# compute deviatoric trail stresses
sig_tr = np.einsum('ij,j->i' , Cmat, eps_el_tr)
tr_sig_tr = sig_tr[0] + sig_tr[1] + sig_tr[2]
dev_sig_tr = sig_tr - (1.0/3.0) * tr_sig_tr * np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
# compute norm of deviatoric trail stresses
norm_dev_sig_tr = np.sqrt(
dev_sig_tr[0]**2 + dev_sig_tr[1]**2 + dev_sig_tr[2]**2 + \
2.0 * dev_sig_tr[3]**2 + 2.0 * dev_sig_tr[4]**2 + 2.0 * dev_sig_tr[5]**2
)
# compute yield criterion
phi_tr = norm_dev_sig_tr - np.sqrt(2.0/3.0) * (y0 + (2.0/3.0)*kh*a_n)
if verbose: print('norm_dev_sig_tr: ', norm_dev_sig_tr)
if verbose: print('yield: ', np.sqrt(2.0/3.0) * (y0 + (2.0/3.0)*kh*a_n))
if verbose: print('phi_tr: ', phi_tr)
# check yield criterion
if (phi_tr > 1e-8): # elasto-plastic loading
# compute plastic strain increment
delta_a = phi_tr/(2.0*mue + (2.0/3.0*kh))
a = a_n + np.sqrt(2.0/3.0) * delta_a
if verbose: print('delta_a: ', delta_a)
# compute plastic flow director
n_tr = dev_sig_tr/norm_dev_sig_tr
if verbose: print('n_tr: ')
if verbose: print(np.array([[n_tr[0], n_tr[3], n_tr[4]], [n_tr[3], n_tr[1], n_tr[5]], [n_tr[4], n_tr[5], n_tr[2]]]))
# compute plastic strain - take care of voigt notation!
eps_pl = eps_pl_n + delta_a * n_tr * np.array([1.0, 1.0, 1.0, 2.0, 2.0, 2.0])
if verbose: print('eps_pl: ')
if verbose: print(np.array([[eps_pl[0], eps_pl[3]/2, eps_pl[4]/2], [eps_pl[3]/2, eps_pl[1], eps_pl[5]/2], [eps_pl[4]/2, eps_pl[5]/2, eps_pl[2]]]))
# compute new elastic strains
eps_el = eps - eps_pl
if verbose: print('eps_el: ')
if verbose: print(np.array([[eps_el[0], eps_el[3]/2, eps_el[4]/2], [eps_el[3]/2, eps_el[1], eps_el[5]/2], [eps_el[4]/2, eps_el[5]/2, eps_el[2]]]))
sig = np.einsum('ij,j->i', Cmat, eps_el)
# modification of material tangent
fact1 = 1.0 - (2.0*mue*delta_a/norm_dev_sig_tr)
fact2 = 2.0*mue/(2.0*mue + (2.0/3.0*kh)) - (2.0*mue*delta_a/norm_dev_sig_tr)
Cmat += -2.0*mue*PP \
+ 2.0*mue*PP*fact1 \
- 2.0*mue*np.einsum('i,j->ij', n_tr, n_tr)*fact2
# export history
Ht[ GP*NoHGP : GP*NoHGP+6] = eps_pl
Ht[ GP*NoHGP+6] = a
else: # elastic loading
# old plastic state is stil valid
eps_pl = eps_pl_n
a = a_n
# compute elastic strains
eps_el = eps-eps_pl
sig = np.einsum('ij,j->i', Cmat, eps_el)
###############################################
############## end plastic part ###############
###############################################
# export right hand side | this element has 3 nodes with 2 dofs each
for I in range(NoElementNodes):
# compute nodal right hand side
nodal_rhs_vec = np.einsum('i,ij->j',sig, Bmat[I])
# integrate nodal right hand side and export
r_e[I*2+0] += nodal_rhs_vec[0] * wgp * detJ
r_e[I*2+1] += nodal_rhs_vec[1] * wgp * detJ
for J in range(NoElementNodes):
# compute nodal stiffness matrix
nodal_stiffness = np.einsum('ki,ko,oj->ij', Bmat[I], Cmat, Bmat[J])
# integrate nodal stiffness matrix and export
k_e[I*2+0, J*2+0] += nodal_stiffness[0,0] * wgp * detJ
k_e[I*2+0, J*2+1] += nodal_stiffness[0,1] * wgp * detJ
k_e[I*2+1, J*2+0] += nodal_stiffness[1,0] * wgp * detJ
k_e[I*2+1, J*2+1] += nodal_stiffness[1,1] * wgp * detJ
return r_e, k_e
def Elmt_Post(XL, UL, Hn, Ht, Mat, dt, PostName):
'''
'''
## NEW post strategy is to return indexes
# and contribuzions
# 3 nodes
r_post = np.zeros(3)
# geometry and dofs
XI = np.array([[XL[0], XL[1]], [XL[2], XL[3]], [XL[ 4], XL[ 5]]], dtype=np.float64)
uI = np.array([[UL[0], UL[1]], [UL[2], UL[3]], [UL[ 4], UL[ 5]]], dtype=np.float64)
# Material Parameters
Emod, nu, y0, kh = Mat[0], Mat[1], Mat[2], Mat[3]
lam , mue = (Emod*nu)/((1.0+nu)*(1.0-2.0*nu)), Emod/(2.0*(1.0+nu))
kappa = lam + (2.0/3.0)*mue
# constitutive matrix (hooke)
Cmat = HookeMatVoigt(kappa, mue)
# provide integration points
EGP = np.array([[(1.0/3.0), (1.0/3.0), (1.0/2.0)]])
NoInt = len(EGP)
# start integration Loop
for GP in range(NoInt):
xi, eta, wgp = EGP[GP]
# read gp history
NoHGP = 7 # number of history at each gp
eps_pl = Ht[ GP*NoHGP : GP*NoHGP+6]
a = Ht[ GP*NoHGP+6]
# compute current shape functions
SH0 = SH0_T1(xi, eta)
# compute mapping
Jed = np.einsum('Ii,Ij->ij', XI ,SH0[:,1:3])
detJ = np.linalg.det(Jed)
if (detJ <= 0): raise NameError("Error unphysical mapping detected.")
Jed_inv = np.linalg.inv(Jed)
# map shape function derivative
SHP = np.copy(SH0)
SHP[:,1:3] = np.einsum('Ij,ji->Ii', SH0[:,1:3], Jed_inv)
Bmat = BmatVoigt2D(SHP)
# compute strains
eps = np.einsum('Iij,Ij->i', Bmat, uI)
# compute elastic strain
eps_el = eps - eps_pl
# compute stresses
sig = np.einsum('ij,j->i' , Cmat, eps_el)
tr_sig = sig[0] + sig[1] + sig[2]
dev_sig = sig - (1.0/3.0) * tr_sig * np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
sig_vm = np.sqrt( \
sig[0]**2 + sig[1]**2 + sig[2]**2 \
-sig[0]*sig[1] -sig[0]*sig[2] -sig[1]*sig[2] \
+ 3* (sig[3]**2 + sig[4]**2 + sig[5]**2) \
)
if (PostName=="SigMises"):
r_post += sig_vm
if (PostName=="a"):
r_post += a
if (PostName=="UX"):
r_post = np.array([UL[0], UL[2], UL[4]])
return r_post
elif (PostName=="UY"):
r_post = np.array([UL[1], UL[3], UL[5]])
return r_post
elif (PostName=="SigMises"):
return r_post
elif (PostName=="a"):
return r_post
else:
print("Waring: PostName "+PostName+" not defined!")
return np.array([0.0, 0.0, 0.0])
``` |
{
"source": "5AF1/ERC-GUI",
"score": 2
} |
#### File: 5AF1/ERC-GUI/main.py
```python
from test1 import Ui_MainWindow
from PyQt5.QtCore import Qt
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtWidgets import QWidget
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent=parent)
self.api=str('')
self.state=0
self.setupUi(self)
self.pushButton_HandUp.clicked.connect(self.buttonExecute_HandUp)
self.pushButton_HandDown.clicked.connect(self.buttonExecute_HandDown)
self.pushButton_HandLeft.clicked.connect(self.buttonExecute_HandLeft)
self.pushButton_HandRight.clicked.connect(self.buttonExecute_HandRight)
self.pushButton_BodyUp.clicked.connect(self.buttonExecute_BodyUp)
self.pushButton_BodyDown.clicked.connect(self.buttonExecute_BodyDown)
self.pushButton_BodyLeft.clicked.connect(self.buttonExecute_BodyLeft)
self.pushButton_BodyRight.clicked.connect(self.buttonExecute_BodyRight)
self.pushButton_StateSwitch.clicked.connect(self.buttonExecute_StateSwitch)
self.pushButton_APIcall.clicked.connect(self.buttonExecute_APIcall)
self.setChildrenFocusPolicy(QtCore.Qt.StrongFocus)
def setChildrenFocusPolicy(self, policy):
def recursiveSetChildFocusPolicy(parentQWidget):
for childQWidget in parentQWidget.findChildren(QWidget):
childQWidget.setFocusPolicy(policy)
recursiveSetChildFocusPolicy(childQWidget)
recursiveSetChildFocusPolicy(self)
def keyPressEvent(self, e):
if e.key() == 96 :
#Qt.Key_Escape
print("Exit")
self.close()
elif e.key() == Qt.Key_W:
self.buttonExecute_HandUp()
elif e.key() == Qt.Key_S:
self.buttonExecute_HandDown()
elif e.key() == Qt.Key_A:
self.buttonExecute_HandLeft()
elif e.key() == Qt.Key_D:
self.buttonExecute_HandRight()
elif e.key() == Qt.Key_Up:
self.buttonExecute_BodyUp()
elif e.key() == Qt.Key_Down:
self.buttonExecute_BodyDown()
elif e.key() == Qt.Key_Left:
self.buttonExecute_BodyLeft()
elif e.key() == Qt.Key_Right:
self.buttonExecute_BodyRight()
else:
print(e.key())
self.showCurrentValues()
def func(self):
pass
def showCurrentValues(self):
print("State: %s \nIndex: %d" % (self.comboBox_State.itemText(self.state), self.state))
print("API: %s"%self.api)
def buttonExecute_StateSwitch(self):
self.state=self.comboBox_State.currentIndex()
print("State: %s \nIndex: %d" % (self.comboBox_State.itemText(self.state), self.state))
def buttonExecute_APIcall(self):
self.api =self.lineEdit_API.text()
print(self.api)
def buttonExecute_HandUp(self):
print("Key pressed: W\nKey code: " + str(Qt.Key_W))
def buttonExecute_HandDown(self):
print("Key pressed: S\nKey code: " + str(Qt.Key_S))
def buttonExecute_HandLeft(self):
print("Key pressed: A\nKey code: " + str(Qt.Key_A))
def buttonExecute_HandRight(self):
print("Key pressed: D\nKey code: " + str(Qt.Key_D))
def buttonExecute_BodyUp(self):
print("Key pressed: Up\nKey code: " + str(Qt.Key_Up))
def buttonExecute_BodyDown(self):
print("Key pressed: Down\nKey code: " + str(Qt.Key_Down))
def buttonExecute_BodyLeft(self):
print("Key pressed: Left\nKey code: " + str(Qt.Key_Left))
def buttonExecute_BodyRight(self):
print("Key pressed: Right\nKey code: " + str(Qt.Key_Right))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
w = MainWindow()
w.show()
sys.exit(app.exec_())
``` |
{
"source": "5AF1/LabWorksML",
"score": 3
} |
#### File: LabWorksML/ACC/CVP.py
```python
from prettytable import PrettyTable
#%%
class CVP:
def __init__(self, speakers, sales_pu, var_exp_pu, fix_exp, target_profit):
self.sales_pu = sales_pu
self.speakers = speakers
self.var_exp_pu = var_exp_pu
self.fix_exp = fix_exp
self.target_profit = target_profit
def sales(self):
return self.sales_pu * self.speakers
def var_exp(self):
return self.var_exp_pu * self.speakers
def contrib_margin_d(self):
return self.sales() - self.var_exp()
def contrib_margin_pu(self):
return self.contrib_margin_d() / self.speakers
def contrib_margin_rat(self):
return self.contrib_margin_d() / self.sales()
def var_exp_rat(self):
return self.var_exp() / self.sales()
def net_op_income(self):
return self.contrib_margin_d() - self.fix_exp
def break_even_pt_u(self):
return self.fix_exp / self.contrib_margin_pu()
def break_even_pt_d(self):
return self.fix_exp / self.contrib_margin_rat()
def u_sales_target_profit(self):
return (self.fix_exp + self.target_profit) / self.contrib_margin_pu()
def d_sales_target_profit(self):
return (self.fix_exp + self.target_profit) / self.contrib_margin_rat()
def marginal_safety(self):
return self.sales() - self.break_even_pt_d()
def marginal_safety_p(self):
return self.marginal_safety() / self.sales()
def degree_operating_leverage(self):
return self.contrib_margin_d() / self.net_op_income()
def expected_inc_in_net_op_inc(self, expected_inc_in_sale):
print(f"Expected increase in sales\t{expected_inc_in_sale}")
print(f"Degree of operating leverage\t{self.degree_operating_leverage()}")
print(
f"Expected increase in net operating income \t{self.degree_operating_leverage() * expected_inc_in_sale}"
)
return self.degree_operating_leverage() * expected_inc_in_sale
def table(self):
tab = PrettyTable()
tab.field_names = ["", "Total", "Per Unit"]
tab.align[""] = "l"
tab.add_row(
[f"Sales ({self.speakers} Units)", f"${self.sales()}", f"${self.sales_pu}"]
)
tab.add_row(
[f"Less: Variable Expenses", f"({self.var_exp()})", f"({self.var_exp_pu})"]
)
tab.add_row([f"", "-----", "-----"])
tab.add_row(
[
f"Contribution Margin",
f"{self.contrib_margin_d()}",
f"${self.contrib_margin_pu()}",
]
)
tab.add_row([f"Less: Fixed Expenses", f"({self.fix_exp})", "====="])
tab.add_row([f"", "-----", ""])
tab.add_row([f"Net Operating Income", f"${self.net_op_income()}", ""])
tab.add_row([f"", "=====", ""])
print(tab)
#%%
#%%
a = CVP(400, 250, 150, 35000, None)
a.table()
#%%
a.degree_operating_leverage()
#%%
a.speakers = int(a.speakers * 1.1)
a.var_exp_pu += 10
a.fix_exp -= 5000
a.table()
#%%
a = CVP(20000, 60, 45, 240000, 90000)
a.table()
print(a.contrib_margin_rat(), a.var_exp_rat())
print(a.break_even_pt_d(), a.break_even_pt_u())
print(a.d_sales_target_profit(), a.u_sales_target_profit())
print(a.marginal_safety(), a.marginal_safety_p())
print(a.expected_inc_in_net_op_inc(8))
#%%
a = CVP(20000, 60, 45, 240000, 90000)
a.table()
a.var_exp_pu += 3
a.fix_exp -= 30000
a.speakers *= 1.2
a.table()
#%%
a = CVP(2000, 90, 63, 30000, None)
a.table()
a.sales_pu *= 1.05
a.var_exp_pu *= 1.05
a.fix_exp += 5000
a.table()
a = CVP(2000, 90, 63, 30000, None)
a.var_exp_pu += 2
a.speakers *= 1.1
a.table()
#%%
if __name__ == "__main__":
a = CVP(400, 250, 150, 35000, None)
a.table()
print(a.degree_operating_leverage())
#%%
a = CVP(30000, 50, 35, 300000, None)
a.table()
# %%
```
#### File: LabWorksML/ML/conv.py
```python
import numpy as np
#%%
def Custom_Convolve2D(image, kernel, padding=0, strides=1):
# Cross Correlation
# kernel = np.flipud(np.fliplr(kernel))
## Put your code here.
# Gather Shapes of Kernel + Image + Padding
xKernShape = kernel.shape[0]
yKernShape = kernel.shape[1]
xImgShape = image.shape[0]
yImgShape = image.shape[1]
# Shape of Output Convolution
xOutput = int(((xImgShape - xKernShape + 2 * padding) / strides) + 1)
yOutput = int(((yImgShape - yKernShape + 2 * padding) / strides) + 1)
output = np.zeros((xOutput, yOutput))
# Apply Equal Padding to All Sides
if padding != 0:
imagePadded = np.zeros((xImgShape + padding * 2, yImgShape + padding * 2))
imagePadded[
int(padding) : int(-1 * padding), int(padding) : int(-1 * padding)
] = image
else:
imagePadded = image
# Iterate through image
for y in range(imagePadded.shape[1]):
# Exit Convolution
if y > imagePadded.shape[1] - yKernShape:
break
# Only Convolve if y has gone down by the specified Strides
if y % strides == 0:
for x in range(imagePadded.shape[0]):
# Go to next row once kernel is out of bounds
if x > imagePadded.shape[0] - xKernShape:
break
# Only Convolve if x has gone down by the specified Strides
if x % strides == 0:
output[x // strides, y // strides] = (
kernel * imagePadded[x : x + xKernShape, y : y + yKernShape]
).sum()
return output
def Custom_MaxPlooling(image, pool_size=(2, 2), padding=0, strides=None):
## Put your code here.
# Gather Shapes of pool + Image + Padding
xPoolShape = pool_size[0]
yPoolShape = pool_size[1]
xImgShape = image.shape[0]
yImgShape = image.shape[1]
# Stride
if strides is None:
strides = max(yPoolShape, xPoolShape)
# Shape of Output Convolution
xOutput = int(((xImgShape - xPoolShape + 2 * padding) / strides) + 1)
yOutput = int(((yImgShape - yPoolShape + 2 * padding) / strides) + 1)
output = np.zeros((xOutput, yOutput))
# Apply Equal Padding to All Sides
if padding != 0:
imagePadded = np.zeros((xImgShape + padding * 2, yImgShape + padding * 2))
imagePadded[
int(padding) : int(-1 * padding), int(padding) : int(-1 * padding)
] = image
else:
imagePadded = image
# Iterate through image
for y in range(imagePadded.shape[1]):
# Exit Convolution
if y > imagePadded.shape[1] - yPoolShape:
break
# Only Convolve if y has gone down by the specified Strides
if y % strides == 0:
for x in range(imagePadded.shape[0]):
# Go to next row once kernel is out of bounds
if x > imagePadded.shape[0] - xPoolShape:
break
# Only Convolve if x has gone down by the specified Strides
if x % strides == 0:
output[x // strides, y // strides] = (
imagePadded[x : x + xPoolShape, y : y + yPoolShape]
).max()
return output
def Custom_Flatten(img):
img = (img - img.min()) / (img.max() - img.min())
return img.flatten()
def Custom_Dense(data, units=10):
n = data.shape[0]
data = np.reshape(data, (n, 1))
# Setting the range from 0 to 0.04 to stop exponent explotion
w = np.random.uniform(0, 0.04, [units, n])
b = np.random.uniform(0, 0.04, [units, 1])
return 1 / (1 + np.exp(-1 * (w @ data + b)))
def Custom_softmax(data):
data = np.exp(data)
data = data / data.sum()
return data
#%%
im = np.array(
[
[4, 9, 2, 5, 8, 3],
[5, 6, 2, 4, 0, 3],
[2, 4, 5, 4, 5, 2],
[5, 6, 5, 4, 7, 8],
[5, 7, 7, 9, 2, 1],
[5, 8, 5, 3, 8, 4],
]
)
kernel = np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])
ss = np.array([[3.1, 0.3, 1.2]])
# %%
print(Custom_Convolve2D(im, kernel, 1, 1))
print(Custom_softmax(ss))
# %%
```
#### File: LabWorksML/ML/tree.py
```python
import numpy as np
import math
#%%
X = [[1, 1, 1, 1, 1], [1, 1, 1, 2, 1], []]
#%%
def e(b):
n = sum(b)
ans = 0
for i in b:
ans += i * math.log(i)
ans -= n * math.log(n)
return ans / (-n * math.log(2))
def ent(a, b):
n = sum(a)
ans = 0
for i in range(len(a)):
ans += a[i] * e(b[i]) / n
return ans
def tree(B):
for i in B:
V = [sum(j) for j in i]
a = ent(V, i)
print(a)
#%%
a = [5, 4, 5]
b = [[3, 2], [4], [2, 3]]
print(ent(a, b))
# %%
A = [[5, 4, 5], [4, 6, 4], [7, 7], [8, 6]]
B = [
[[3, 2], [4], [2, 3]],
[[2, 2], [4, 2], [3, 1]],
[[3, 4], [6, 1]],
[[6, 2], [3, 3]],
]
#%%
tree(B)
# %%
``` |
{
"source": "5AF1/travelhacks",
"score": 2
} |
#### File: travelhacks/users/views.py
```python
from django.shortcuts import render,redirect
#from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from .forms import UserRegisterForm,UserUpdateForm,ProfileUpdateForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def register(response):
if response.method == 'POST':
form = UserRegisterForm(response.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(response,f'Account created for {username}!\nYou can now log in ☺')
return redirect('login')
else:
form = UserRegisterForm()
return render(response,'users/register.html',{'form':form})
@login_required
def profile(response):
if response.method == 'POST':
u_form = UserUpdateForm(response.POST, instance=response.user)
p_form = ProfileUpdateForm(response.POST, response.FILES, instance=response.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
username = u_form.cleaned_data.get('username')
messages.success(response,f'Ok {username}!\nYour account has been updated ☺')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=response.user)
p_form = ProfileUpdateForm(instance=response.user.profile)
context = {
'u_form': u_form,
'p_form': p_form,
}
return render(response,'users/profile.html',context)
``` |
{
"source": "5agado/conversation-analyzer",
"score": 2
} |
#### File: conversation-analyzer/src/main.py
```python
import argparse
import configparser
import sys
import os
import util.io as mio
from model.conversation import Conversation
from model.conversationDataframe import ConversationDataframe
from stats.iConvStats import IConvStats
def main(_):
parser = argparse.ArgumentParser(description='Conversation Analyzer')
parser.add_argument('-p', metavar='conversationFilepath', dest='filepath', required=True)
parser.add_argument('-n', metavar='numberOfMessages', type=int, dest='numMsgs', default=0,
help='number of conversation messages to be analyzed')
baseFolderPath = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
parser.add_argument('--conf', metavar='configFilepath', dest='configFilepath', default= baseFolderPath+'\\config.ini')
args = parser.parse_args()
filepath = args.filepath
numMsgs = args.numMsgs
CONFIG_FILEPATH = args.configFilepath
STATS_SECTION = "Stats"
config = configparser.ConfigParser()
config.read(CONFIG_FILEPATH)
P_BASIC_LENGTH_STATS = config.getboolean(STATS_SECTION, "P_BASIC_LENGTH_STATS")
P_INTERVAL_STATS = config.getboolean(STATS_SECTION, "P_INTERVAL_STATS")
P_WORDS_COUNT = config.getboolean(STATS_SECTION, "P_WORDS_COUNT")
WORDS_COUNT_LIMIT = config.getint(STATS_SECTION, "WORDS_COUNT_LIMIT")
P_WORDS_USEDJUSTBY = config.getboolean(STATS_SECTION, "P_WORDS_USEDJUSTBY")
P_DELAY_STATS = config.getboolean(STATS_SECTION, "P_DELAY_STATS")
P_EMOTICONS_STATS = config.getboolean(STATS_SECTION, "P_EMOTICONS_STATS")
P_LEXICAL_STATS = config.getboolean(STATS_SECTION, "P_LEXICAL_STATS")
#conv = Conversation(filepath)
conv = ConversationDataframe(filepath)
conv.loadMessages(numMsgs)
if P_BASIC_LENGTH_STATS:
stats = conv.stats.generateStats(IConvStats.STATS_NAME_BASICLENGTH)
mio.printBasicLengthStats(stats)
if P_INTERVAL_STATS:
start, end, interval = conv.stats.getIntervalStats()
days = conv.stats.getDaysWithoutMessages()
mio.printIntervalStatsFor(start, end, interval, days)
if P_LEXICAL_STATS:
stats = conv.stats.generateStats(IConvStats.STATS_NAME_LEXICAL)
mio.printLexicalStats(stats)
if P_WORDS_COUNT:
stats = conv.stats.generateStats(IConvStats.STATS_NAME_WORDCOUNT)
mio.saveDfToStatsFolder(conv, stats.wordsCount, 'wordCount.csv')
if P_EMOTICONS_STATS:
stats = conv.stats.generateStats(IConvStats.STATS_NAME_EMOTICONS)
mio.printEmoticonsStats(stats)
#Not tested
#if P_DELAY_STATS:
# mio.printDelayStatsFor(conv)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: src/model/conversation.py
```python
import logging
import os
import time
import nltk
import util.io as mio
from stats.convStats import ConvStats
#----------------------------#
# DEPRECATED #
#----------------------------#
class Conversation:
def __init__(self, filepath):
self.sender1 = None
self.sender2 = None
self.filepath = filepath
self.statsFolder = os.path.dirname(filepath) + '\\stats'
if not os.path.exists(self.statsFolder):
os.makedirs(self.statsFolder)
self.messages = None
self.sender1Messages = None
self.sender2Messages = None
self.messagesBySender = {self.sender1:self.sender1Messages, self.sender2:self.sender2Messages}
self.stats = None
def loadMessages(self, limit=0, startDate=None, endDate=None):
logging.info("Start loading messages for conversation " + self.filepath)
start = time.time()
self.messages, [self.sender1, self.sender2] = \
mio.parseMessagesFromFile(self.filepath, limit, startDate, endDate)
if len(self.messages) == 0:
raise Exception("No messages found for conversation " + self.filepath)
self.sender1Messages = list(filter(lambda m: m.sender == self.sender1, self.messages))
self.sender2Messages = list(filter(lambda m: m.sender == self.sender2, self.messages))
self.messagesBySender[self.sender1] = self.sender1Messages
self.messagesBySender[self.sender2] = self.sender2Messages
end = time.time()
logging.info("Loading completed in {0:.2f}s".format(end-start))
self.stats = ConvStats
def getAsNLTKText(self, sender=None):
if sender:
return nltk.Text(self.getConvTextBySender(sender))
else:
return nltk.Text(self.getEntireConvText())
def getEntireConvText(self):
text = ''
for m in self.messages:
text += m.text + '\n'
return text
def getConvTextBySender(self, sender):
text = ''
for m in self.messages:
if m.sender == sender:
text += m.text + '\n'
return text
```
#### File: src/model/message.py
```python
from datetime import datetime
class Message:
DATE_FORMAT = '%Y.%m.%d'
TIME_FORMAT = '%H:%M:%S'
DATE_SEPARATOR = '.'
TIME_SEPARATOR = ':'
DATETIME_SEPARATOR = ' '
DATE_TIME_FORMAT = DATE_FORMAT + DATETIME_SEPARATOR + TIME_FORMAT
def __init__(self, date, time, sender, text):
self.date = date
self.time = time
self.datetime = date + Message.DATETIME_SEPARATOR + time
self.sender = sender
self.text = text
def getMessageLength(self):
return len(self.text)
def getHour(self):
return self.time.split(Message.TIME_SEPARATOR)[0]
def getWeekDay(self):
return datetime.strptime(self.datetime, Message.DATE_TIME_FORMAT).weekday()
def getMonth(self):
return self.date.split(Message.DATE_SEPARATOR)[1]
def getYear(self):
return self.date.split(Message.DATE_SEPARATOR)[0]
def __str__(self):
return "{} {} {}".format(self.datetime, self.sender, self.text)
```
#### File: src/stats/wordsCountStats.py
```python
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from util import statsUtil
class WordsCountStats:
def __init__(self, conversation):
self.conversation = conversation
self.msgs = self.conversation.messages
self.wordsCount = None
def loadWordsCount(self, groupByColumns=None, ngram_range=(1,1)):
"""
Generates dataframe with words count for each group-by entry and stores it internally.
Successive calls on this method will overwrite the stored data with the new results.
:param ngram_range: ngram to use for count vectorization
:param groupByColumns: names of features to use to group messages
:return: none. Results are stored internally
"""
if not groupByColumns:
groupByColumns = []
groupByColumns = groupByColumns + ['sender']
self.wordsCount = WordsCountStats._computeWordsCount(self.msgs, groupByColumns, ngram_range)
def loadWordsCountFromFile(self, filepath, indexCols=None):
"""
Load previously exported words count in this stats instance.
:param indexCols: columns to set as index
:param filepath: filepath of the csv words count file
:return: none. words-count data is stored in this stats instance
"""
self.wordsCount = pd.read_csv(filepath, index_col=indexCols)
def getWordsCount(self, words=None, sender=None, stopWords=None):
"""
Returns count for specified words.
:param stopWords: words to ignore
:param words: lookup words, if none return all present words
:param sender: if specified, just consider words occurrences for such sender, otherwise consider all senders (total)
:return: a wordsCount dataframe
"""
if not stopWords: stopWords = []
# if no words have been specified considering all present ones not in stopwords
if not words:
words = list(filter(lambda x : x not in stopWords, self.wordsCount.columns.values))
# Consider only words that are present and not in stopwords
else:
words = list(filter(lambda x : ((x not in stopWords)and(x in words)),
self.wordsCount.columns.values))
# If no word is present, return None
if not words:
return None
count = WordsCountStats._transformWordsCountBasedOnSender(self.wordsCount[words], sender)
return count
def getWordsUsedJustBy(self, originSender, referenceSender):
# collect words
wordsCount = self.wordsCount.ix[:,(self.wordsCount.loc[referenceSender]==0)
&(self.wordsCount.loc[originSender]!=0)]
#transpose to have sender count as columns, and words as rows
words = wordsCount.transpose().rename(columns={'sender':'word'})\
.sort_values(originSender, ascending=False)
return words
def getWordFirstAndLastOccurences(self, word, sender=None):
"""
Returns when word has been used (non-zero count) for the first and last time.
:param word: target word
:param sender: if specified, just consider words used by such sender, otherwise consider all senders
:return: two indexes, for which word firstly and lastly appears, or -1 if the word has never been used
"""
res = WordsCountStats._getFirstAndLastOccurences(self.wordsCount, word, sender)
if not res:
return -1, -1
else:
return res
#TODO add total option
def getLexicalStats(self, sender=None):
wordsCount = self.wordsCount
#wordsCount = WordsCountStats._transformWordsCountBasedOnSender(self.wordsCount, sender)
res = pd.DataFrame(wordsCount.apply(lambda x : WordsCountStats.lexicalStatsFun(x), axis=1))
if sender:
if isinstance(res.index, pd.core.index.MultiIndex):
res = res.xs(sender, level='sender')
else:
res = res.loc[sender]
return res
@staticmethod
# TODO Add options of filter out stopwords
def _computeWordsCount(msgs, groupByColumns, ngram_range=(1,1)):
"""
Generates dataframe with words count for each group-by entry.
Grouping is done on passed columns plus the sender one.
"""
# Group messages by sender and specified feature, concatenating text field
grouped_msgs = msgs.groupby(groupByColumns).agg({'text': lambda x: " ".join(x)})
# Count-vectorize msgs, using own defined analyzer (tokenizer)
vectorizer = CountVectorizer(tokenizer=lambda x: statsUtil.getWords(x),
ngram_range=ngram_range)
X = vectorizer.fit_transform(grouped_msgs['text'].values)
# Create count matrix using words as columns
countMatrix = pd.DataFrame(X.toarray(), index=grouped_msgs.index,columns=vectorizer.get_feature_names())
# Join data while dropping text column
wordsCount = grouped_msgs.drop('text', axis=1).join(countMatrix)
return wordsCount
@staticmethod
def _getFirstAndLastOccurences(wordsCount, word, sender=None):
"""
Returns when word has been used (non-zero count) for the first and last time, based on the provided words counts.
:param wordsCount: dataframe with words count for each group-by entry.
:param word: target word
:param sender: if specified, just consider words used by such sender, otherwise consider all senders
:return: tuple with wordsCount indexes for which word firstly and lastly appears, or none if the word has never been used
"""
# If word not present return -1
if word not in wordsCount.columns:
return None
wordsCount = WordsCountStats._transformWordsCountBasedOnSender(wordsCount, sender)
# (notice word column will still be present, even if sender never used it)
count = wordsCount[word]
# Being a series nonzero return always a tuple with just one array
# collect it and return df indexes using first and last elements of the array, or -1 if array is empty
nonZeroIdxs = count.nonzero()[0]
if len(nonZeroIdxs) == 0:
return None
else:
return wordsCount.index[nonZeroIdxs[0]], wordsCount.index[nonZeroIdxs[-1]]
@staticmethod
def _transformWordsCountBasedOnSender(wordsCount, sender=None):
"""
Transforms wordsCount. If sender extract data only for specified sender, if not sum over all senders.
Sender is check both on single and multi-index, if not present None is returned
"""
# If sender, get words count for the specified sender
if sender:
try:
if isinstance(wordsCount.index, pd.core.index.MultiIndex):
res = wordsCount.xs(sender, level='sender')
else:
res = wordsCount.loc[sender]
except KeyError:
print("Key error for " + sender)
return None
# If no sender is specified, regroup by index (ignoring sender) and aggregate by sum
else:
indexNames = list(wordsCount.index.names)
indexNames.remove('sender')
# If only index was sender, we simply sum
if not indexNames:
res = wordsCount.sum()
# Otherwise we sum by other indexes
else:
res = wordsCount.groupby(level=indexNames).sum()
return res
@staticmethod
def _normalizeWordsCount(wordsCount):
df = wordsCount.apply(lambda x : x/sum(x), axis=0)
return df
@staticmethod
def _computeWordsTrend(wordsCount, sender=None):
wordsCount = WordsCountStats._transformWordsCountBasedOnSender(wordsCount, sender)
wordsTrend = wordsCount.apply(lambda x: (x - x.shift(1))).dropna().astype(np.int8)
return wordsTrend
@staticmethod
def lexicalStatsFun(row):
tokensCount = sum(row)
vocabularyCount = len(row[row>0])
return pd.Series({'tokensCount':tokensCount,'vocabularyCount':vocabularyCount,
'lexicalRichness':vocabularyCount/tokensCount})
```
#### File: src/test/basicTests.py
```python
import argparse
import logging
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats.stats import pearsonr
from model.conversationDataframe import ConversationDataframe
from util import conversationGenerator
from util import io as mio
from util import plotting as mplot
from util import statsUtil
from matplotlib import animation
from stats.iConvStats import IConvStats
import pandas as pd
import seaborn as sns
#from sklearn import datasets, linear_model
def initLogger():
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(levelname)s - %(message)s",
"%Y-%m-%d %H:%M:%S")
ch.setFormatter(formatter)
logger.addHandler(ch)
def init(_):
parser = argparse.ArgumentParser(description='Conversation Analyzer')
parser.add_argument('-p', metavar='conversationfilePath', dest='filepath')
parser.add_argument('-n', metavar='numberOfMessages', type=int,
dest='numMsgs', default=0)
parser.add_argument('-l', metavar='wordsCountLimit', type=int,
dest='wCountLimit', default=20)
args = parser.parse_args()
filepath = args.filepath
numMsgs = args.numMsgs
wCountLimit = args.wCountLimit
initLogger()
#conv = ConversationDataframe(mio.getResourcesPath() + "\\unittest\\test_plotting.txt")
conv = ConversationDataframe(filepath)
conv.loadMessages(numMsgs)
return
res = conv.messages.rename(columns={'text':'numMsgs'})
res['lenMsgs'] = res['numMsgs'].apply(lambda x: len(x))
x = pd.to_numeric(res['hour'])
testCorrelation(x, res['lenMsgs'])
#testAnimation(conv)
#sentences = mnlp.sentenceSegmentation(conv.getEntireConvText())
#sentences = mnlp.wordTokenization(conv.getEntireConvText())
#for s in sentences:
# print(s)
#rawText = conv.getEntireConvText()
#mio.displayDispersionPlot(conv, ['sender1', ':D', 'well'])
#mio.showConcordance(conv, "phone")
#tokens = nltk.word_tokenize(rawText)
#words = [w.lower() for w in tokens]
def testCorrelation(x, y):
print(np.corrcoef(x, y)[0, 1])
sns.regplot(x=x, y=y)
plt.show()
def saveBunchOfStatsDf(conv):
statsList = [IConvStats.STATS_NAME_BASICLENGTH, IConvStats.STATS_NAME_LEXICAL,
IConvStats.STATS_NAME_WORDCOUNT, IConvStats.STATS_NAME_EMOTICONS]
for stat in statsList:
filepath = conv.statsFolder + '\\' + stat + '.txt'
df = conv.stats.generateStats(stat)
mio.printDataFrameToFile(df, filepath)
filepath = conv.statsFolder + '\\' + stat + 'byHour.txt'
df = conv.stats.generateStatsByHour(stat)
mio.printDataFrameToFile(df, filepath)
filepath = conv.statsFolder + '\\' + stat + 'byYearAndHour.txt'
df = conv.stats.generateStatsByYearAndHour(stat)
mio.printDataFrameToFile(df, filepath)
def testAnimation(conv):
data = conv.stats.generateStatsByYearMonthHour(IConvStats.STATS_NAME_BASICLENGTH)
data = data.groupby('year').get_group('2014')
grouped = data.groupby('month')
keys = sorted(list(grouped.groups.keys()))
print(keys)
fig = plt.figure()
ax = plt.axes()
def animate(i):
df = grouped.get_group(keys[i]).sort_values("hour")
print(df.head())
ax.clear()
sns.barplot(x="hour", y="lenMsgs", hue="sender", data=df, ax=ax)
ax.set_title(i)
anim = animation.FuncAnimation(fig, animate,
frames=len(grouped), interval=2000)
plt.show()
def testZipfLaw(conv):
_, wCount, _ = conv.stats.getWordCountStats()
print(wCount)
(_, occFirst) = wCount[0]
for i, (word, count) in enumerate(wCount[:10], start=1):
print(word + " " + str(i) + " = " + str(occFirst/count))
print(word + " " + str(count) + " = " + str(occFirst/i))
words, count = zip(*wCount)
mplot.plotZipfLaw(words, count)
return
def plotDelayByLengthStats(conv):
delay, senderDelay = mstats.getDelayStatsByLength(conv)
x = np.array([v[0] for v in senderDelay[conv.sender1+ ':' +conv.sender2]])
y = np.array([v[1] for v in senderDelay[conv.sender1+ ':' +conv.sender2]])
print(pearsonr(x, y))
print(np.corrcoef(x, y)[0, 1])
print(x)
print(x[:,np.newaxis])
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(x[:,np.newaxis],y)
plt.scatter(x, y, color='red')
plt.plot(x, regr.predict(x[:,np.newaxis]), color='blue')
plt.show()
def testConversationGenerator():
conv = conversationGenerator.generateNewConversation(100, "2014.01.30 06:01:57", "2014.12.30 06:01:57", ["s1", "s2"], 3, 20)
mio.printListToFile(conv, os.path.join(mio.getResourcesPath(), "test.txt"))
init(sys.argv[1:])
```
#### File: src/util/io.py
```python
import os
import sys
from datetime import datetime
from pandas import read_csv
from model.message import Message
from os.path import dirname
sys.path.append(dirname(__file__)+"\\..")
from util import logger
def parseMessagesFromFile(filePath, limit=0, startDate=None, endDate=None):
messages = []
senders = set([])
if startDate:
startDate = datetime.strptime(startDate, Message.DATE_FORMAT)
if endDate:
endDate = datetime.strptime(endDate, Message.DATE_FORMAT)
try:
with open(filePath, 'r', encoding="utf8") as f:
for line in f:
date, time, sender, text = line.split(' ', 3)
if startDate or endDate:
thisDate = datetime.strptime(date, Message.DATE_FORMAT)
if (not startDate or thisDate>=startDate) and (not endDate or thisDate<=endDate):
messages.append(Message(date, time, sender, text.strip()))
else:
messages.append(Message(date, time, sender, text.strip()))
senders.add(sender)
if limit != 0 and len(messages) >= limit:
break
except IOError:
logger.warning("No such file: " + filePath)
return messages, senders
def loadDataFromFile(filepath):
data = read_csv(filepath)
return data
def getSetFromFile(filePath):
theSet = set([])
try:
with open(filePath, 'r') as f:
theSet = {line.strip() for line in f}
except IOError:
logger.warning("No such file " + filePath)
return theSet
def displayDispersionPlot(conv, words):
text = conv.getAsNLTKText()
text.dispersion_plot(words)
def showConcordance(conv, word):
text = conv.getAsNLTKText()
text.concordance(word)
def printBasicLengthStats(basicLengthStatsDf):
logger.info("##BASIC LENGTH STATS")
for sender, vals in basicLengthStatsDf.iterrows():
totalNum, totalLength, avgLegth = vals.tolist()
logger.info("#" + sender)
logger.info("Number of messages: {:.0f}".format(totalNum))
logger.info("Total length: {:.0f}".format(totalLength))
logger.info("Average length: {0:.2f}".format(avgLegth))
logger.info('-'*10)
def printLexicalStats(lexicalStatsDf):
logger.info("##LEXICAL STATS")
for sender, vals in lexicalStatsDf.iterrows():
tokensCount, vocabularyCount, lexicalRichness = vals.tolist()
logger.info("#" + sender)
logger.info("Tokens count: {:.0f}".format(tokensCount))
logger.info("Distinct tokens count: {:.0f}".format(vocabularyCount))
logger.info("Lexical diversity: {0:.5f}".format(lexicalRichness))
logger.info('-'*10)
def printIntervalStatsFor(start, end, interval, days):
logger.info("##Conv Interval")
logger.info("Conversation started: {}".format(str(start)))
logger.info("Conversation ended: {}".format(str(end)))
logger.info("Conversation overall duration: {}".format(interval))
logger.info("{:.0f} days without messages".format(len(days)))
percentage = (len(days)/(interval.days+1))*100
logger.info("{0:.2f}% out of the conversation overall days-interval".format(percentage))
#logger.info(days)
logger.info('-'*10)
def printEmoticonsStats(emoticonsStatsDf):
logger.info("##EMOTICONS STATS")
for sender, vals in emoticonsStatsDf.iterrows():
numEmoticons, emoticonsRatio, lenMsgs = vals.tolist()
logger.info("#" + sender)
logger.info("Emoticons count: {:.0f}".format(numEmoticons))
logger.info("Messages total length: {:.0f}".format(lenMsgs))
logger.info("Ratio: {0:.5f}".format(emoticonsRatio))
logger.info('-'*10)
def saveDfToStatsFolder(conv, df, filename, saveIndex=True):
statsFolder = os.path.join(os.path.dirname(conv.filepath), 'stats')
if not os.path.exists(statsFolder):
os.makedirs(statsFolder)
df.to_csv(os.path.join(statsFolder, filename), index=saveIndex)
def printDelayStatsFor(conv):
delay = conv.stats.getDelayStats()
logger.info("##Reply Delay Stats")
logger.info("Reply delay by sender: ")
for s, d in delay.items():
msg = "Between {} and {}".format(s.split(':')[0], s.split(':')[1])
logger.info('{} : {}'.format(msg, d))
logger.info('-'*10)
def printDictToFile(d, title, filepath):
with open(filepath, "w+", encoding="utf8") as f:
if title:
f.write(title + "\n")
for k, v in d.items():
f.write('{} : {}\n'.format(k, v))
def printListToFile(l, filepath, title=None):
with open(filepath, "w+", encoding="utf8") as f:
if title:
f.write(title + "\n")
for e in l:
f.write(str(e)+"\n")
def getResourcesPath():
return os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'resources'))
```
#### File: src/util/plotting.py
```python
import os
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import util.io as mio
from util import statsUtil
from model.message import Message
SAVE_PLOT = False
def plotBasicLengthStatsByYearAndMonth(data, yearsToShow=None, targetStats=None,
targetSenders=None):
df = statsUtil.filter_stats(data, {'sender':targetSenders, 'year':yearsToShow,
'stat':targetStats})
g = sns.factorplot(x="month", y="val", row="stat", hue='sender', col='year', data=df,
kind="bar", size=3, aspect=2.5, legend_out=False)
g.fig.suptitle('Basic Length Stats')
sns.plt.show()
def plotBasicLengthStatsByHour(data, targetStats=None, targetSenders=None, kind='bar'):
df = statsUtil.filter_stats(data, {'sender':targetSenders, 'stat':targetStats})
g = sns.factorplot(x="hour", y="val", row="stat", hue='sender', data=df,
kind=kind, size=3, aspect=2.5, legend_out=False)
g.fig.suptitle('Basic Length Stats - Hour')
#sns.plt.show()
def plotRichnessVariation(data, targetLabel, yearsToShow=None, targetSenders=None):
df = data.reset_index()
df = statsUtil.filter_stats(df, {'year':yearsToShow, 'sender':targetSenders})
g = sns.factorplot(x=targetLabel, y="lexicalRichness", col="year", hue='sender',
data=df, kind="point", legend_out=False)
g.set(ylabel='lexical richness (%)')
g.fig.suptitle('Vocabulary Richness')
sns.plt.show()
def _genericFactorPlot(data, xTarget, yTarget, filters, title, yLabel, col=None, row=None,
kind='point'):
df = statsUtil.filter_stats(data, filters)
g = sns.factorplot(x=xTarget, y=yTarget, col=col, row=row, hue='sender',
data=df, kind=kind, legend_out=False)
g.set(ylabel=yLabel)
g.fig.suptitle(title)
sns.plt.show()
# does single year only. Use with animations or to see boxplots
def plotSingleBasicLengthStatByYearAndHour(data, stat, yearsToShow=None,
targetSenders=None, ax=None):
df = statsUtil.filter_stats(data, {'sender':targetSenders, 'year':yearsToShow,
'stat':[stat]})
ax = sns.barplot(x="hour", y='val', hue="sender", data=df, ax=ax)
ax.set(ylabel=stat)
#sns.plt.show()
def plotSingleBasicLengthStatHeatmap(data, stat, targetSender, yearsToShow=None):
df = data.xs(targetSender, level='sender')
df = df.reset_index()
def plot(ax, df, count):
df = df.pivot('month', 'day', stat)
# TODO share y. Fix it or try factorgrid
ax = sns.heatmap(df, mask=df.isnull(), ax=ax, vmin=0, vmax=30000)#cmap=ListedColormap(['red', 'blue'])
ax.set(ylabel='month' if count == 1 else '')
_plotByYear(df, "{} ({})".format(stat, targetSender), plot, yearsToShow)
def plotSentimentStatsByHour(sentimentStats, valueNames):
data = statsUtil.transformSentimentStats(sentimentStats, valueNames, ['sender', 'hour'])
ax = sns.factorplot(x="hour", y="val", col="emotion", hue='sender',
data=data, kind="point", sharey=False, legend_out=False)
ax.set(ylabel='mean(val)')
sns.plt.show()
def plotSentimentStatsByYearAndMonth(sentimentStats, valueNames):
data = statsUtil.transformSentimentStats(sentimentStats, valueNames, ['sender', 'year', 'month'])
sns.factorplot(x="month", y="val", row="emotion", hue='sender', col='year',
data=data, kind="point", sharey=False, legend_out=False)
sns.plt.show()
# TODO fill all possible values for the index (dates, month, year)
# Add sender or total labels
def plotWordsCount(wordsCountStats, words, sender=None, yearsToShow=None):
data = wordsCountStats.getWordsCount(words, sender)
if data is None:
return
def plot(ax, df, count):
df.reset_index(level='year').plot(ax=ax)
if 'year' in list(data.index.names):
_plotByYear(data, 'Word count', plot, yearsToShow)
else:
data.plot()
sns.plt.show()
def plotZipfLaw(words, count):
figureAesthetic()
#plt.figure(1).suptitle("Zip's Law", fontsize=20)
ax = plt.subplot(1,2,1)
plt.xlabel("word")
plt.ylabel("frequency")
numWords = 20
x = np.arange(numWords)
y = count[:numWords]
plt.xticks(np.arange(len(x)), words[:numWords])
plt.gcf().autofmt_xdate()
ax.plot(x, y, c='r', linewidth=2)
ax = plt.subplot(1,2,2)
plt.xlabel("rank (log scale)")
plt.ylabel("frequency (log scale)")
x = np.arange(len(words))
plt.xscale('log')
plt.yscale('log')
ax.plot(x,count, c='r', linewidth=2)
plt.show()
def figureAesthetic():
sns.set_context("poster")
sns.set_style("darkgrid")
sns.plt.grid(True)
def _plotByYear(data, title, plotFun, yearsToShow=None):
plt.title(title)
if 'year' in list(data.index.names):
grouped = data.groupby(level='year')
else:
grouped = data.groupby('year')
numberOfYears = len(grouped) if not yearsToShow else len(yearsToShow)
count = 1
fig = plt.figure(1)
ax = None
for year, group in grouped:
if yearsToShow and year not in yearsToShow:
continue
if ax:
ax = plt.subplot(1, numberOfYears, count, sharey=ax)
else:
ax = plt.subplot(1,numberOfYears,count)
ax.set_title(year)
plotFun(ax, group, count)
count += 1
fig.suptitle(title)
sns.plt.show()
def savePlotAsImage(plot, filename):
folderPath = os.path.join(mio.getResourcesPath(), 'imgs')
if not os.path.exists(folderPath):
os.makedirs(folderPath)
if SAVE_PLOT:
filepath = os.path.join(folderPath, filename)
plot.savefig(filepath)
else:
pass
```
#### File: src/util/watsonUtils.py
```python
import json
from watson_developer_cloud import ToneAnalyzerV3
def getToneAnalyzerService(username, password):
toneAnalyzer = ToneAnalyzerV3(
username=username,
password=password,
version=ToneAnalyzerV3.latest_version,
x_watson_learning_opt_out=True)
return toneAnalyzer
def extractToneFrom(text, toneService):
res = toneService.tone(text, tones='emotion')
tones = res['document_tone']['tone_categories'][0]['tones']
tones = dict([(t['tone_id'], t['score']) for t in tones])
return tones
``` |
{
"source": "5agado/face-swap",
"score": 3
} |
#### File: face-swap/face_swap/autoencoder.py
```python
from keras.layers import Input, Dense, Flatten, Reshape
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Conv2D
from keras.models import Model
from keras.optimizers import Adam
from ast import literal_eval
from face_swap.PixelShuffler import PixelShuffler
def conv(filters, kernel_size=5, strides=2):
def block(x):
x = Conv2D(filters, kernel_size=kernel_size,
strides=strides, padding='same')(x)
x = LeakyReLU(0.1)(x)
return x
return block
# deconvolution block used in the decoder
def upscale(filters, kernel_size=3):
def block(x):
x = Conv2D(filters * 4, kernel_size=kernel_size,
padding='same')(x)
x = LeakyReLU(0.1)(x)
x = PixelShuffler()(x)
return x
return block
def Encoder(input_shape, hidden_dim, init_filters=128, num_conv_blocks=4):
model_input = Input(shape=input_shape)
x = model_input
for i in range(num_conv_blocks):
x = conv(init_filters * (2 ** i))(x)
x = Dense(hidden_dim)(Flatten()(x))
x = Dense(4 * 4 * hidden_dim)(x)
x = Reshape((4, 4, hidden_dim))(x)
x = upscale(hidden_dim//2)(x)
return Model(model_input, x)
def Decoder(input_shape, init_filters=256, num_deconv_blocks=3):
model_input = Input(shape=input_shape)
x = model_input
for i in range(num_deconv_blocks):
x = upscale(init_filters // (2 ** i))(x)
x = Conv2D(3, kernel_size=5, padding='same', activation='sigmoid')(x)
return Model(model_input, x)
def get_autoencoders(cfg):
models_path = cfg.get('models_path', None)
IMAGE_SHAPE = literal_eval(cfg.get('img_shape'))
ENCODER_DIM = cfg.get('encoder_dim')
DECODER_INPUT_SHAPE = literal_eval(cfg.get('decoder_input_shape'))
encoder_init_filters = cfg.get('encoder_init_filters')
encoder_nb_conv_blocks = cfg.get('encoder_nb_conv_blocks')
decoder_init_filters = cfg.get('decoder_init_filters')
decoder_nb_conv_blocks = cfg.get('decoder_nb_conv_blocks')
optimizer = Adam(lr=5e-5, beta_1=0.5, beta_2=0.999)
encoder = Encoder(IMAGE_SHAPE, ENCODER_DIM,
init_filters=encoder_init_filters,
num_conv_blocks=encoder_nb_conv_blocks)
decoder_a = Decoder(DECODER_INPUT_SHAPE,
init_filters=decoder_init_filters,
num_deconv_blocks=decoder_nb_conv_blocks)
decoder_b = Decoder(DECODER_INPUT_SHAPE,
init_filters=decoder_init_filters,
num_deconv_blocks=decoder_nb_conv_blocks)
x = Input(shape=IMAGE_SHAPE)
autoencoder_a = Model(x, decoder_a(encoder(x)))
autoencoder_b = Model(x, decoder_b(encoder(x)))
autoencoder_a.compile(optimizer=optimizer, loss='mean_absolute_error')
autoencoder_b.compile(optimizer=optimizer, loss='mean_absolute_error')
if models_path:
print("Loading Autoencoder Models...")
encoder.load_weights(models_path + '/encoder.h5')
decoder_a.load_weights(models_path + '/decoder_A.h5')
decoder_b.load_weights(models_path + '/decoder_B.h5')
print("Autoencoder Models Loaded")
return autoencoder_a, autoencoder_b
```
#### File: face-swap/face_swap/FaceGenerator.py
```python
import cv2
import numpy as np
from skimage.transform._geometric import _umeyama
from ast import literal_eval
import copy
import logging
from face_swap.Face import Face
import face_swap.faceswap_utils as utils
class FaceGenerator:
def __init__(self, generator_fun, input_size, config, resize_fun=None):
"""
:param generator_fun: takes a face and return an img plus optional mask
:param input_size: target size for the seed face before being fed to the generator fun
"""
self.generator_fun = generator_fun
# whether to realign prediction values to 0-1 scale before move to 256 RGB schema (needed exactly if models output is generated by tanh activation function)
self.tanh_fix = config['tanh_fix']
self.input_size = input_size
self.align = config['align']
self.border_expand = literal_eval(config['border_expand'])
self.resize_fun = resize_fun
# TODO fix alignment process
def generate(self, seed_face: Face=None, output_size=None):
"""
Operates pre and post processing around the generation function
:param seed_face:
:param output_size:
:return:
"""
# pre-process face
if self.align:
face_img, _ = utils.align_face(seed_face, boundary_resize_factor=self.border_expand)
_, reverse_matrix = utils.align_face(seed_face, boundary_resize_factor=self.border_expand, invert=True)
else:
face_img = seed_face.get_face_img()
#old_size = face_img.shape[:2][::-1]
face_img = cv2.resize(face_img, self.input_size)
face_img = face_img / 255 * 2 - 1 if self.tanh_fix else face_img / 255.
# generate face
gen_face, face_mask = self.generator_fun(face_img)
if self.align:
#aligned_face = copy.copy(seed_face)
#aligned_face.img = gen_face
#gen_face, align_matrix = utils.align_face(seed_face, boundary_resize_factor=self.border_expand,
# invert=True, img=cv2.resize(gen_face, old_size))
# Align back mask too
#gen_face = cv2.resize(gen_face, old_size)
#face_mask = cv2.resize(face_mask, old_size)
gen_face = cv2.warpAffine(gen_face, reverse_matrix, self.input_size,
borderMode=cv2.BORDER_REPLICATE)
face_mask = cv2.warpAffine(face_mask, reverse_matrix, self.input_size, borderMode=cv2.BORDER_REPLICATE)
#gen_face = cv2.resize(gen_face, self.input_size)
#face_mask = cv2.resize(face_mask, self.input_size)
# post-process face
gen_face = (gen_face + 1) * 255 / 2 if self.tanh_fix else gen_face * 255
gen_face = np.clip(gen_face, 0, 255).astype(np.uint8)
# if not specified we simply output with the same size of seed image
if not output_size:
output_size = seed_face.get_face_img().shape[:2][::-1]
# resizing logic
# if not specified just rely on basic resizing
if not self.resize_fun:
gen_face = cv2.resize(gen_face, output_size,
interpolation=cv2.INTER_CUBIC)
# otherwise rely on provided resize function
else:
gen_face = self.resize_fun(gen_face, output_size)
if face_mask is not None:
face_mask = cv2.resize(face_mask, output_size,
interpolation=cv2.INTER_CUBIC)
return gen_face, face_mask
def aue_generate_face(aue, face_img: np.array):
"""
:param aue: autoencoder to use for the generation (call predict on it)
:param face_img: img to feed to the generator
:return:
"""
gen_img = aue.predict(np.expand_dims(face_img, 0))[0]
return gen_img, None
def gan_masked_generate_face(generator_fun, face_img: np.array):
"""
Generated a face from the seed one considering a generator_fun which should output alpha mask and bgr results
:param generator_fun: takes an image and returns alpha mask concatenated with bgr results
:param face_img: img to feed to the generator
:return:
"""
gen_res = generator_fun(face_img)
gen_mask = gen_res[:, :, 0]
gen_bgr = gen_res[:, :, 1:]
gen_mask = np.clip(gen_mask * 255, 0, 255).astype(np.uint8)
# stack mask such as we have three channels
gen_mask = np.stack([gen_mask, gen_mask, gen_mask], axis=2)
return gen_bgr, gen_mask
def random_channel_shift(x, intensity=None, channel_axis=2):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
if intensity is None:
intensity = max_x/255*15.
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_transform(img, rotation_range=90,
zoom_range=0.1, shift_range=0.1, random_flip=0.5,
channel_shift_intensity=0):
h, w = img.shape[:2]
# setup transformation factors
rotation = np.random.uniform(-rotation_range, rotation_range)
scale = np.random.uniform(1 - zoom_range, 1 + zoom_range)
tx = np.random.uniform(-shift_range, shift_range) * w
ty = np.random.uniform(-shift_range, shift_range) * h
# setup transformation matrix
mat = cv2.getRotationMatrix2D((w//2, h//2), rotation, scale)
mat[:, 2] += (tx, ty)
# warp affine
result = cv2.warpAffine(img, mat, (w, h), borderMode=cv2.BORDER_REPLICATE)
# optionally flip horizontally
if np.random.random() < random_flip:
result = result[:, ::-1]
# optionally apply random channel shift:
if channel_shift_intensity > 0:
result = random_channel_shift(result, intensity=channel_shift_intensity)
return result
def random_warp(img, mult_f=1):
"""
Get pair of random warped images from aligned face image.
For now assumes input image to be 256x256, and base returning size of 64x64
:param img:
:param mult_f: determines size of returned images (multiply each dimension, e.g. 2 for 128x128)
:return:
"""
assert img.shape == (256, 256, 3)
#range_ = np.linspace(128 - 80, 128 + 80, 5)
range_ = np.linspace(128 - 110, 128 + 110, 5)
mapx = np.broadcast_to(range_, (5, 5))
mapy = mapx.T
mapx = mapx + np.random.normal(size=(5, 5), scale=5)
mapy = mapy + np.random.normal(size=(5, 5), scale=5)
interp_mapx = cv2.resize(mapx, (80*mult_f, 80*mult_f))[8*mult_f:72*mult_f, 8*mult_f:72*mult_f].astype('float32')
interp_mapy = cv2.resize(mapy, (80*mult_f, 80*mult_f))[8*mult_f:72*mult_f, 8*mult_f:72*mult_f].astype('float32')
warped_image = cv2.remap(img, interp_mapx, interp_mapy, cv2.INTER_LINEAR)
src_points = np.stack([mapx.ravel(), mapy.ravel()], axis=-1)
dst_points = np.mgrid[0:65*mult_f:16*mult_f, 0:65*mult_f:16*mult_f].T.reshape(-1, 2)
mat = _umeyama(src_points, dst_points, True)[0:2]
target_image = cv2.warpAffine(img, mat, (64*mult_f, 64*mult_f))
target_image = cv2.resize(target_image, (64*mult_f, 64*mult_f))
warped_image = cv2.resize(warped_image, (64*mult_f, 64*mult_f))
return warped_image, target_image
def super_resolution_resizing(sr_model, lr_img, dest_size):
SIZE_DIFF_THRESHOLD = 50
sr_model_out_size = sr_model.output_shape[1:3]
lr_img_size = lr_img.shape[0:2]
img_to_resize = lr_img
# use sr only if necessary
# notice that for now sr_model_input_img_size should be the same as lr_img_size
if (sum(dest_size) > sum(lr_img_size) and
(sum(dest_size) - sum(sr_model_out_size)) > SIZE_DIFF_THRESHOLD):
hr_image = sr_model.predict(np.asarray([lr_img / 255.0]))[0]
hr_image = np.clip(hr_image * 255, 0, 255).astype(np.uint8)
img_to_resize = hr_image
else:
logging.debug("Not using super res. lr_img_size {} - dst_size {}".format(lr_img_size, dest_size))
res_img = cv2.resize(img_to_resize, dest_size,
interpolation=cv2.INTER_CUBIC)
return res_img
```
#### File: face-swap/face_swap/faceswap_utils.py
```python
from functools import reduce
import logging
import cv2
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
from face_swap.Face import Face
from skimage.transform._geometric import _umeyama
def get_face_mask(face: Face, mask_type,
erosion_size=None,
dilation_kernel=None,
blur_size: int = None):
"""
Return mask of mask_type for the given face.
:param face:
:param mask_type:
:param erosion_size:
:param dilation_kernel:
:param blur_size:
:return:
"""
if mask_type == 'hull':
# we can rotate the hull mask obtained from original image
# or re-detect face from aligned image, and get mask then
mask = get_hull_mask(face, 255)
elif mask_type == 'rect':
face_img = face.get_face_img()
mask = np.zeros(face_img.shape, dtype=face_img.dtype)+255
else:
logging.error("No such mask type: {}".format(mask_type))
raise Exception("No such mask type: {}".format(mask_type))
# apply mask modifiers
if erosion_size:
erosion_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, erosion_size)
mask = cv2.erode(mask, erosion_kernel, iterations=1)
if dilation_kernel:
mask = cv2.dilate(mask, dilation_kernel, iterations=1)
if blur_size:
mask = cv2.blur(mask, (blur_size, blur_size))
return mask
def get_hull_mask(from_face: Face, fill_val=1):
"""
:param from_face:
:param fill_val: generally 1 or 255
:return:
"""
mask = np.zeros(from_face.img.shape, dtype=from_face.img.dtype)
hull = cv2.convexHull(np.array(from_face.landmarks).reshape((-1, 2)).astype(int)).flatten().reshape((
-1, 2))
hull = [(p[0], p[1]) for p in hull]
cv2.fillConvexPoly(mask, np.int32(hull), (fill_val, fill_val, fill_val))
return mask
def seamless_cloning(hull_to, to_face, img_res):
# Calculate Mask
hull8U = [(p[0], p[1]) for p in hull_to]
mask = np.zeros(to_face.img.shape, dtype=to_face.img.dtype)
cv2.fillConvexPoly(mask, np.int32(hull8U), (255, 255, 255))
# Actual seamless cloning
r = cv2.boundingRect(np.float32([hull_to]))
center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))
# Clone seamlessly.
output = cv2.seamlessClone(np.uint8(img_res), to_face.img, mask, center, cv2.NORMAL_CLONE)
return output
def insert_image_in(src_img, target_image, center, mask):
"""
Insert/superpose source image into target image, matching source with the given center and applying the given mask
:param src_img:
:param target_image:
:param center:
:param mask:
:return:
"""
res = target_image.copy()
y1 = max(0, center[1] - src_img.shape[0] // 2)
y2 = y1 + src_img.shape[0]
x1 = max(0, center[0] - src_img.shape[1] // 2)
x2 = x1 + src_img.shape[1]
# need to check how much I can cover on the destination
# and make sure source is same size, otherwise throws
# exception
dest_shape = res[y1:y2, x1:x2, :].shape[:2]
alpha_s = mask[:dest_shape[0], :dest_shape[1], :] / 255.0
alpha_l = 1.0 - alpha_s
res[y1:y2, x1:x2, :] = (alpha_s * src_img[:dest_shape[0], :dest_shape[1], :] +
alpha_l * res[y1:y2, x1:x2, :])
return res
#################################
# ALIGNMENT #
#################################
mean_face_x = np.array([
0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 0.799124,
0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 0.36688, 0.426036,
0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 0.265825, 0.334606, 0.260918,
0.182743, 0.645647, 0.714428, 0.793132, 0.858516, 0.79751, 0.719335, 0.254149,
0.340985, 0.428858, 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721,
0.490127, 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874,
0.553364, 0.490127, 0.42689])
mean_face_y = np.array([
0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891,
0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 0.587326,
0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 0.179852, 0.231733,
0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 0.216423, 0.244077, 0.245099,
0.780233, 0.745405, 0.727388, 0.742578, 0.727388, 0.745405, 0.780233, 0.864805,
0.902192, 0.909281, 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746,
0.784792, 0.824182, 0.831803, 0.824182])
default_landmarks_2D = np.stack([mean_face_x, mean_face_y], axis=1)
# other implementation option see
# https://matthewearl.github.io/2015/07/28/switching-eds-with-python/
def align_face(face, boundary_resize_factor=None, invert=False, img=None):
if img is None:
face_img = face.get_face_img(boundary_resize_factor=boundary_resize_factor)
else:
face_img = img
src_landmarks = np.array([(x - face.rect.left, y - face.rect.top) for (x, y) in face.landmarks])
# need to resize default ones to match given head size
(w, h) = face.get_face_size()
translation = None
if boundary_resize_factor:
img_w, img_h = face_img.shape[:2][::-1]
translation = (img_w - w, img_h - h)
#w += translation[0]
#h += translation[1]
# w/1.5 h/1.5
scaled_default_landmarks = np.array([(int(x * w), int(y * h)) for (x, y) in default_landmarks_2D])
# default aligned face has only 51 landmarks, so we remove
# first 17 from the given one in order to align
src_landmarks = src_landmarks[17:]
target_landmarks = scaled_default_landmarks
if invert:
align_matrix = get_align_matrix(target_landmarks, src_landmarks, translation)
else:
align_matrix = get_align_matrix(src_landmarks, target_landmarks, translation)
aligned_img = cv2.warpAffine(face_img,
align_matrix,
(w, h),
borderMode=cv2.BORDER_REPLICATE)
return aligned_img, align_matrix
def get_align_matrix(src_landmarks, target_landmarks, translation: tuple = None):
align_matrix = _umeyama(src_landmarks, target_landmarks, True)[:2]
if translation:
align_matrix[0, 2] -= translation[0]//2
align_matrix[1, 2] -= translation[1]//2
return align_matrix
#################################
# COLOR CORRECTION
#################################
def correct_colours(img_1, img_2, blur_amount):
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(img_1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(img_2, (blur_amount, blur_amount), 0)
# Avoid divide-by-zero errors.
im2_blur += (128 * (im2_blur <= 1.0)).astype(im2_blur.dtype)
return (img_2.astype(np.float64) * im1_blur.astype(np.float64) /
im2_blur.astype(np.float64))
# See also http://vzaguskin.github.io/histmatching1/
def hist_match(source, template):
# Code borrow from:
# https://stackoverflow.com/questions/32655686/histogram-matching-of-two-images-in-python-2-x
oldshape = source.shape
source = source.ravel()
template = template.ravel()
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def color_hist_match(src_im, tar_im):
#src_im = cv2.cvtColor(src_im, cv2.COLOR_BGR2Lab)
#tar_im = cv2.cvtColor(tar_im, cv2.COLOR_BGR2Lab)
matched_R = hist_match(src_im[:,:,0], tar_im[:,:,0])
matched_G = hist_match(src_im[:,:,1], tar_im[:,:,1])
matched_B = hist_match(src_im[:,:,2], tar_im[:,:,2])
matched = np.stack((matched_R, matched_G, matched_B), axis=2).astype(np.float64)
return matched
def hist_eq(source, template, nbr_bins=256):
imres = source.copy()
for d in range(source.shape[2]):
imhist, bins = np.histogram(source[:, :, d].flatten(), nbr_bins, normed=True)
tinthist, bins = np.histogram(template[:, :, d].flatten(), nbr_bins, normed=True)
cdfsrc = imhist.cumsum() #cumulative distribution function
cdfsrc = (255 * cdfsrc / cdfsrc[-1]).astype(np.uint8) #normalize
cdftint = tinthist.cumsum() #cumulative distribution function
cdftint = (255 * cdftint / cdftint[-1]).astype(np.uint8) #normalize
im2 = np.interp(source[:, :, d].flatten(), bins[:-1], cdfsrc)
im3 = np.interp(im2, cdftint, bins[:-1])
imres[:, :, d] = im3.reshape((source.shape[0], source.shape[1]))
return imres
```
#### File: face-swap/face_swap/gan_utils.py
```python
import keras.backend as K
from keras.layers import concatenate, Lambda
import tensorflow as tf
from tensorflow.contrib.distributions import Beta
from keras.models import Sequential, Model
def cycle_variables(gen):
"""
Return basic generator components (inputs, outputs and generation function)
:param gen: generator
:return:
"""
distorted_input = gen.inputs[0]
fake_output = gen.outputs[0]
fun_generate = K.function([distorted_input], [fake_output])
return distorted_input, fake_output, fun_generate
def cycle_variables_masked(gen):
"""
Return masked generator components (inputs, outputs and generation function)
:param gen: generator
:return:
"""
# input and output of the generator
distorted_input = gen.inputs[0]
fake_output = gen.outputs[0]
# in the generator we pre-append an alpha component to the output
# we here separate such alpha component from the actual bgr image
alpha = Lambda(lambda x: x[:, :, :, :1])(fake_output)
rgb = Lambda(lambda x: x[:, :, :, 1:])(fake_output)
masked_fake_output = alpha * rgb + (1 - alpha) * distorted_input
fun_generate = K.function([distorted_input], [masked_fake_output])
fun_mask = K.function([distorted_input], [concatenate([alpha, alpha, alpha])])
fun_abgr = K.function([distorted_input], [concatenate([alpha, rgb])])
return distorted_input, fake_output, alpha, fun_generate, fun_mask, fun_abgr
def define_loss(netD, real, fake, vggface_feat=None, mixup_alpha=None, use_lsgan=True):
loss_fn = get_loss_fun(use_lsgan)
if mixup_alpha:
dist = Beta(mixup_alpha, mixup_alpha)
lam = dist.sample()
mixup = lam * real + (1 - lam) * fake
output_mixup = netD(mixup)
loss_D = loss_fn(output_mixup, lam * K.ones_like(output_mixup))
output_fake = netD(fake) # dummy
loss_G = .5 * loss_fn(output_mixup, (1 - lam) * K.ones_like(output_mixup))
else:
output_real = netD(real) # positive sample
output_fake = netD(fake) # negative sample
loss_D_real = loss_fn(output_real, K.ones_like(output_real))
loss_D_fake = loss_fn(output_fake, K.zeros_like(output_fake))
loss_D = loss_D_real + loss_D_fake
loss_G = .5 * loss_fn(output_fake, K.ones_like(output_fake))
loss_G += K.mean(K.abs(fake - real))
if not vggface_feat is None:
loss_G = add_perceptual_loss(loss_G, real=real, fake=fake, vggface_feat=vggface_feat)
return loss_D, loss_G
def define_loss_masked(netD, real, fake_argb, distorted, vggface_feat=None, mixup_alpha=None, use_lsgan=True):
# loss weights
w_D = 0.5 # Discriminator contribution to generator loss
w_recon = 1. # L1 reconstruction loss
w_edge = 1. # edge loss
loss_fn = get_loss_fun(use_lsgan)
alpha = Lambda(lambda x: x[:, :, :, :1])(fake_argb)
fake_rgb = Lambda(lambda x: x[:, :, :, 1:])(fake_argb)
fake = alpha * fake_rgb + (1 - alpha) * distorted
if mixup_alpha:
dist = Beta(mixup_alpha, mixup_alpha)
lam = dist.sample()
mixup = lam * concatenate([real, distorted]) + (1 - lam) * concatenate([fake, distorted])
output_mixup = netD(mixup)
loss_D = loss_fn(output_mixup, lam * K.ones_like(output_mixup))
output_fake = netD(concatenate([fake, distorted])) # dummy
loss_G = w_D * loss_fn(output_mixup, (1 - lam) * K.ones_like(output_mixup))
else:
output_real = netD(concatenate([real, distorted])) # positive sample
output_fake = netD(concatenate([fake, distorted])) # negative sample
loss_D_real = loss_fn(output_real, K.ones_like(output_real))
loss_D_fake = loss_fn(output_fake, K.zeros_like(output_fake))
loss_D = loss_D_real + loss_D_fake
loss_G = w_D * loss_fn(output_fake, K.ones_like(output_fake))
# Reconstruction loss
loss_G += w_recon * K.mean(K.abs(fake_rgb - real))
# Edge loss (similar with total variation loss)
loss_G += w_edge * K.mean(K.abs(first_order(fake_rgb, axis=1) - first_order(real, axis=1)))
loss_G += w_edge * K.mean(K.abs(first_order(fake_rgb, axis=2) - first_order(real, axis=2)))
# Perceptual Loss
if not vggface_feat is None:
loss_G = add_perceptual_loss_masked(loss_G, real=real, fake=fake, vggface_feat=vggface_feat, fake_rgb=fake_rgb)
return loss_D, loss_G
# Build a perceptual-loss model from VGG pre-trained model
# Via Keras can load VGG model via
# vgg = VGG16(include_top=False, model='resnet50', input_shape=HR_IMG_SHAPE) # model can be 'resnet50' or 'vgg16'
# of for VGGFace
# vggface = VGGFace(include_top=False, model='resnet50', input_shape=(224, 224, 3))
def perceptual_loss_model(vgg_model=None):
if vgg_model:
vgg_model.trainable = False
out_size55 = vgg_model.layers[36].output
out_size28 = vgg_model.layers[78].output
out_size7 = vgg_model.layers[-2].output
vgg_feat = Model(vgg_model.input, [out_size55, out_size28, out_size7])
vgg_feat.trainable = False
else:
vgg_feat = None
return vgg_feat
def add_perceptual_loss(loss_G, real, fake, vggface_feat):
pl_params = (0.01, 0.1, 0.1)
real_sz224 = tf.image.resize_images(real, [224, 224])
fake_sz224 = tf.image.resize_images(fake, [224, 224])
real_feat55, real_feat28, real_feat7 = vggface_feat(real_sz224)
fake_feat55, fake_feat28, fake_feat7 = vggface_feat(fake_sz224)
loss_G += pl_params[0] * K.mean(K.abs(fake_feat7 - real_feat7))
loss_G += pl_params[1] * K.mean(K.abs(fake_feat28 - real_feat28))
loss_G += pl_params[2] * K.mean(K.abs(fake_feat55 - real_feat55))
return loss_G
def add_perceptual_loss_masked(loss_G, real, fake, vggface_feat, fake_rgb):
w_pl1 = (0.01, 0.1, 0.2, 0.02) # perceptual loss 1
w_pl2 = (0.005, 0.05, 0.1, 0.01) # perceptual loss 2
def preprocess_vggface(x):
x = (x + 1) / 2 * 255 # channel order: BGR
# x[..., 0] -= 93.5940
# x[..., 1] -= 104.7624
# x[..., 2] -= 129.
x -= [91.4953, 103.8827, 131.0912]
return x
pl_params = w_pl1
real_sz224 = tf.image.resize_images(real, [224, 224])
real_sz224 = Lambda(preprocess_vggface)(real_sz224)
# Perceptual loss for masked output
fake_sz224 = tf.image.resize_images(fake, [224, 224])
fake_sz224 = Lambda(preprocess_vggface)(fake_sz224)
real_feat112, real_feat55, real_feat28, real_feat7 = vggface_feat(real_sz224)
fake_feat112, fake_feat55, fake_feat28, fake_feat7 = vggface_feat(fake_sz224)
loss_G += pl_params[0] * K.mean(K.abs(fake_feat7 - real_feat7))
loss_G += pl_params[1] * K.mean(K.abs(fake_feat28 - real_feat28))
loss_G += pl_params[2] * K.mean(K.abs(fake_feat55 - real_feat55))
loss_G += pl_params[3] * K.mean(K.abs(fake_feat112 - real_feat112))
# Perceptual loss for raw output
pl_params = w_pl2
fake_sz224 = tf.image.resize_images(fake_rgb, [224, 224])
fake_sz224 = Lambda(preprocess_vggface)(fake_sz224)
fake_feat112, fake_feat55, fake_feat28, fake_feat7 = vggface_feat(fake_sz224)
loss_G += pl_params[0] * K.mean(K.abs(fake_feat7 - real_feat7))
loss_G += pl_params[1] * K.mean(K.abs(fake_feat28 - real_feat28))
loss_G += pl_params[2] * K.mean(K.abs(fake_feat55 - real_feat55))
loss_G += pl_params[3] * K.mean(K.abs(fake_feat112 - real_feat112))
return loss_G
def first_order(x, axis=1):
img_nrows = x.shape[1]
img_ncols = x.shape[2]
if axis == 1:
return K.abs(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :])
elif axis == 2:
return K.abs(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :])
else:
return None
def get_loss_fun(use_lsgan=True):
# least square loss
if use_lsgan:
loss_fn = lambda output, target: K.mean(K.abs(K.square(output - target)))
else:
loss_fn = lambda output, target: -K.mean(
K.log(output + 1e-12) * target + K.log(1 - output + 1e-12) * (1 - target))
return loss_fn
``` |
{
"source": "5agado/fitbit-analyzer",
"score": 3
} |
#### File: src/stats/stepsStats.py
```python
import pandas as pd
import numpy as np
NAME_VAL_COL = "value"
NAME_DT_COL = "datetime"
def groupAndSumByDate(stepsData):
data = stepsData
if isinstance(stepsData, list):
data = pd.concat(stepsData, ignore_index=True)
res = data.groupby(data[NAME_DT_COL].dt.date)[NAME_VAL_COL].sum()
return res
```
#### File: src/util/plotting.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats as scipystats
from src.stats import sleepStats, hbStats
NAMES={'sleep_inefficiency':'Sleep Inefficiency (%)',
'sleep_efficiency':'Sleep Efficiency (%)',
'restless':'Restless (minutes)',
'awake':'Awake (minutes)',
'total_minutes':'Total Minutes',
'sleep_hours':'Hours of Sleep',
'first_min_asleep':'First Minute Asleep'}
dayOfWeek = {0: 'Mon', 1: 'Tue', 2: 'Wed', 3: 'Thur', 4: 'Fri', 5: 'Sat', 6: 'Sun'}
dayOfWeekOrder = ['Mon', 'Tue', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']
months={1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun', 7:'Jul', 8:'Aug',
9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}
monthsOrder = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
def plotPreliminaryStats(stats):
"""
Plot measures distribution using histograms
:param stats: data to plot
"""
columns = ['sleep_efficiency', 'restless', 'awake', 'total_minutes', 'sleep_hours',
'first_min_asleep']
plotStats = stats[columns]
plotStats = plotStats.rename(columns=NAMES)
return plotStats.hist()
#sns.plt.show()
def plotWeekdayStatsSleep(stats):
columns = ['sleep_efficiency', 'restless', 'sleep_hours',
'first_min_asleep', 'awake', 'total_minutes']
return _plotWeekdayStats(stats, columns, groupBy=True)
def plotWeekdayStatsHb(stats):
columns = ['count', 'mean', 'std']
return _plotWeekdayStats(stats, columns, groupBy=False)
def _plotWeekdayStats(stats, columns, groupBy=True):
dataToPlot = stats.copy()
# Group by weekday and rename date column
if groupBy:
dataToPlot = dataToPlot.groupby(stats['date'].dt.weekday).mean()
dataToPlot = dataToPlot.reset_index().rename(columns={'date':'weekday'})
# change stats from columns to row attribute
dataToPlot = pd.melt(dataToPlot, id_vars=['weekday'], value_vars=columns,
var_name='stats', value_name='val')
# Rename stats and weekdays
dataToPlot['stats'].replace(NAMES, inplace=True)
dataToPlot['weekday'].replace(dayOfWeek, inplace=True)
# Plot
g = sns.factorplot(data=dataToPlot, x="weekday", y="val", col="stats",
order=dayOfWeekOrder, kind="point", sharey=False, col_wrap=3)
g.set_xticklabels(rotation=45)
g.set(xlabel='')
return g
#sns.plt.show()
def plotWeekdayStatsByMonthSleep(stats):
stat_name = 'sleep_efficiency'
return _plotWeekdayByMonthStats(stats, stat_name)
def _plotWeekdayByMonthStats(stats, stat_name):
dataToPlot = _prepareWeekdayByMonthStats(stats)
# Plot
g = sns.pointplot(data=dataToPlot, x="day", y=stat_name, hue="month", order=dayOfWeekOrder)
g.set(xlabel='')
g.set_ylabel(NAMES[stat_name])
return g
#sns.plt.show()
def _prepareWeekdayByMonthStats(stats):
# Add day and month columns, and groupby
stats = stats.copy()
stats['day'] = stats['date'].dt.weekday
stats['month'] = stats['date'].dt.month
dataToPlot = stats.groupby(['day', 'month']).mean()
dataToPlot = dataToPlot.reset_index()
dataToPlot['day'].replace(dayOfWeek, inplace=True)
dataToPlot['month'].replace(months, inplace=True)
return dataToPlot
# def plotWeekdayStats(stats, columns):
# """
# Plot aggregated (mean) stats by dayOfWeek
# :param stats: data to plot
# :param columns: columns from stats to plot
# """
# MEASURE_NAME = 'weekday'
# dayOfWeek={0:'Mon', 1:'Tue', 2:'Wed', 3:'Thur', 4:'Fri', 5:'Sat', 6:'Sun'}
# order = ['Mon','Tue','Wed','Thur','Fri','Sat','Sun']
# stats[MEASURE_NAME] = stats[MEASURE_NAME].map(dayOfWeek)
#
# f, axes = getAxes(2,2)
# for i, c in enumerate(columns):
# if c in NAMES:
# c = NAMES[c]
# g = sns.barplot(x=MEASURE_NAME, y=c, data=stats, order=order, ax=axes[i])
# g.set_xlabel('')
# sns.plt.show()
# #plot(stats, columns, MEASURE_NAME, 2, 3, order=order)
def plotMonthlyStatsSleep(stats, columns=None):
if not columns:
columns = ['sleep_inefficiency', 'restless', 'sleep_hours',
'first_min_asleep']
return _plotMonthlyStats(stats, columns, groupBy=True)
def plotMonthlyStatsHb(stats):
columns = ['count', 'mean', 'max', 'min', 'std']
return _plotMonthlyStats(stats, columns, groupBy=False)
def _plotMonthlyStats(stats, columns, groupBy=True):
dataToPlot = stats.copy()
# Group by month and rename date column
if groupBy:
dataToPlot = dataToPlot.groupby(stats['date'].dt.month).mean()
dataToPlot = dataToPlot.reset_index().rename(columns={'date': 'month'})
# change stats from columns to row attribute
dataToPlot = pd.melt(dataToPlot, id_vars=['month'], value_vars=columns,
var_name='stats', value_name='val')
# Rename stats and weekdays
dataToPlot['stats'].replace(NAMES, inplace=True)
dataToPlot['month'].replace(months, inplace=True)
order = [m for m in monthsOrder if m in dataToPlot['month'].unique()]
# Plot
g = sns.factorplot(data=dataToPlot, x="month", y="val", col="stats", order=order, kind="bar", sharey=False)
g.set_xticklabels(rotation=45)
g.set(xlabel='')
return g
#sns.plt.show()
# def _plotMonthlyStats(stats, columns):
# """
# Plot aggregated (mean) stats by month
# :param stats: data to plot
# :param columns: columns from stats to plot
# """
# MEASURE_NAME = 'month'
# months={1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun', 7:'Jul', 8:'Aug',
# 9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}
# order = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
# stats[MEASURE_NAME] = stats[MEASURE_NAME].map(months)
#
# order = [m for m in order if m in stats[MEASURE_NAME].unique()]
#
# f, axes = getAxes(2,2)
# for i, c in enumerate(columns):
# if c in NAMES:
# c = NAMES[c]
# g = sns.barplot(x=MEASURE_NAME, y=c, data=stats, order=order, ax=axes[i])
# g.set_xlabel('')
# sns.plt.show()
def _prepareYearAndMonthStats(stats, columns):
# Group by month and change stats from columns to row attribute
dataToPlot = stats.groupby(stats['date'].dt.to_period("M")).mean()
dataToPlot = pd.melt(dataToPlot.reset_index(), id_vars=['date'], value_vars=columns,
var_name='stats', value_name='val')
# Rename stats
dataToPlot['stats'].replace(NAMES, inplace=True)
return dataToPlot
def plotYearAndMonthStatsSleep(stats, columns=None):
"""
Plot aggregated (mean) stats by year and month.
:param stats: data to plot
"""
if not columns:
columns = ['sleep_efficiency', 'sleep_hours']
dataToPlot = _prepareYearAndMonthStats(stats, columns)
# Plot
g = sns.factorplot(data=dataToPlot, x="date", y="val", row="stats", kind="point", sharey=False)
g.set_xticklabels(rotation=45)
for ax in g.axes.flat:
ax.grid(b=True)
return g
#sns.plt.show()
def _prepareDailyStats(stats, columns):
dataToPlot = stats.rename(columns=NAMES)
dates = pd.date_range(start=dataToPlot.date.iloc[0].date(), end=dataToPlot.date.iloc[-1].date())
dataToPlot.set_index(['date'], inplace=True)
dataToPlot = dataToPlot.reindex(dates)
dataToPlot.reset_index(inplace=True)
dataToPlot.rename(columns={'index': 'date'}, inplace=True)
return dataToPlot
def plotDailyStatsSleep(stats, columns=None):
"""
Plot daily stats. Fill all data range, and put NaN for days without measures
:param data: data to plot
"""
MEASURE_NAME = 'date'
if not columns:
columns = ['sleep_inefficiency', 'sleep_hours']
dataToPlot = _prepareDailyStats(stats, columns)
f, axes = getAxes(2,1)
xTicksDiv = min(10, len(dataToPlot))
#xticks = [(x-pd.DateOffset(years=1, day=2)).date() for x in stats.date]
xticks = [x.date() for x in dataToPlot.date]
keptticks = xticks[::int(len(xticks)/xTicksDiv)]
xticks = ['' for _ in xticks]
xticks[::int(len(xticks)/xTicksDiv)] = keptticks
for i, c in enumerate(columns):
g =sns.pointplot(x=MEASURE_NAME, y=NAMES[c], data=dataToPlot, ax=axes[i])
g.set_xticklabels([])
g.set_xlabel('')
g.set_xticklabels(xticks, rotation=45)
sns.plt.show()
def plotDailyStatsHb(data):
ax = data.groupby(data[hbStats.NAME_DT_COL].dt.date).mean().plot()
#data.groupby(data[hbStats.NAME_DT_COL].dt.date).mean().rolling(30).mean().plot(ax=ax)
sns.plt.show()
def plotYearMonthStatsHb(data):
#pd.groupby(b,by=[b.index.month,b.index.year])
data.groupby(pd.TimeGrouper(freq='M')).mean().plot()
sns.plt.show()
def plotSleepValueHeatmap(intradayStats, sleepValue=1):
sns.set_context("poster")
sns.set_style("darkgrid")
xTicksDiv = 20
#stepSize = int(len(xticks)/xTicksDiv)
stepSize = 60
xticks = [x for x in intradayStats.columns.values]
keptticks = xticks[::stepSize]
xticks = ['' for _ in xticks]
xticks[::stepSize] = keptticks
plt.figure(figsize=(16, 4.2))
g = sns.heatmap(intradayStats.loc[sleepValue].reshape(1,-1))
g.set_xticklabels(xticks, rotation=45)
g.set_yticklabels([])
g.set_ylabel(sleepStats.SLEEP_VALUES[sleepValue])
plt.tight_layout()
sns.plt.show()
def plotCorrelation(stats):
#columnsToDrop = ['sleep_interval_max_len', 'sleep_interval_min_len',
# 'sleep_interval_avg_len', 'sleep_inefficiency',
# 'sleep_hours', 'total_hours']
#stats = stats.drop(columnsToDrop, axis=1)
g = sns.PairGrid(stats)
def corrfunc(x, y, **kws):
r, p = scipystats.pearsonr(x, y)
ax = plt.gca()
ax.annotate("r = {:.2f}".format(r),xy=(.1, .9), xycoords=ax.transAxes)
ax.annotate("p = {:.2f}".format(p),xy=(.2, .8), xycoords=ax.transAxes)
if p>0.04:
ax.patch.set_alpha(0.1)
g.map_upper(plt.scatter)
g.map_diag(plt.hist)
g.map_lower(sns.kdeplot, cmap="Blues_d")
g.map_upper(corrfunc)
sns.plt.show()
def getAxes(nrows, ncols):
f, axes = plt.subplots(nrows=nrows, ncols=ncols)
axes = axes.reshape(-1)
return f, axes
def plot(data, columns, measureName, nrows, ncols, order=None):
f, axes = plt.subplots(nrows=nrows, ncols=ncols)
axes = axes.reshape(-1)
for i, c in enumerate(columns):
sns.barplot(x=measureName, y=c, data=data, order=order, ax=axes[i])
sns.plt.show()
```
#### File: src/util/scraper.py
```python
import datetime
import json
import sys
import argparse
from pathlib import Path
import fitbit
from src.util import logger
from src.util import gather_keys_oauth2 as Oauth2
def dumpToFile(data_type, dumpDir: Path, date, data):
directory = dumpDir / str(date.year) / str(date)
directory.mkdir(parents=True, exist_ok=True)
with (directory / "{}.json".format(data_type)).open(mode='w') as f:
f.write(json.dumps(data, indent=True))
def previouslyDumped(dumpDir: Path, date):
return (dumpDir / str(date.year) / str(date)).is_dir()
def dumpDay(client, dumpDir: Path, date):
steps_data = client.intraday_time_series('activities/steps', date)
intradayData = steps_data['activities-steps-intraday']['dataset']
if not intradayData:
logger.info("No {} measures for {}. Skipping the rest too".format('steps', date.split('\\')[-1]))
return None
dumpToFile("steps", dumpDir, date, steps_data)
dumpToFile("sleep", dumpDir, date, client.get_sleep(date))
dumpToFile("calories", dumpDir, date, client.intraday_time_series('activities/calories', date))
dumpToFile("distance", dumpDir, date, client.intraday_time_series('activities/distance', date))
dumpToFile("floors", dumpDir, date, client.intraday_time_series('activities/floors', date))
dumpToFile("elevation", dumpDir, date, client.intraday_time_series('activities/elevation', date))
dumpToFile("heartbeat", dumpDir, date, client.intraday_time_series('activities/heart', date))
def scrapeFromDateOnward(startDate, dumpDir: Path, client):
date = datetime.datetime.strptime(startDate, "%Y-%m-%d").date()
todayDate = datetime.date.today()
while previouslyDumped(dumpDir, date):
logger.info("Already scraped {}".format(datetime.datetime.strftime(date, "%Y-%m-%d")))
date += datetime.timedelta(days=1)
date -= datetime.timedelta(days=1)
logger.info("Will RE-Scrape data for {}".format(datetime.datetime.strftime(date, "%Y-%m-%d")))
while date < todayDate:
logger.info("Scraping data for {}".format(datetime.datetime.strftime(date, "%Y-%m-%d")))
dumpDay(client, dumpDir, date)
date += datetime.timedelta(days=1)
def scrapeFromTodayAndBackward(dumpDir: Path, client, limit, stop_if_already_dumped=True):
# dumping
count = 1
date = datetime.date.today()
while count < limit:
if previouslyDumped(dumpDir, date):
logger.info("Already scraped {}".format(date.isoformat()))
if stop_if_already_dumped:
print("Stopping the scraping")
break
date -= datetime.timedelta(days=1)
continue
logger.info("Scraping data for {}".format(date.isoformat()))
dumpDay(client, dumpDir, date)
date -= datetime.timedelta(days=1)
count += 1
dumpDay(client, dumpDir, date)
def main(_=None):
parser = argparse.ArgumentParser(description='Fitbit Scraper')
parser.add_argument('--id', metavar='clientId', dest='clientId', required=True,
help="client-id of your Fitbit app")
parser.add_argument('--secret', metavar='clientSecret', dest='clientSecret', required=True,
help="client-secret of your Fitbit app")
parser.add_argument('--out', metavar='outDir', dest='outDir', required=True,
help="output data destination folder")
parser.add_argument('--start', dest='startDate', default='2016-01-01',
help="Date from which to start the forward scraping. Defaults to 2016-01-01")
#parser.add_argument('--limit', type=int, dest='limit', default=400,
# help="maximum number of days to scrape")
args = parser.parse_args()
clientId = args.clientId
clientSecret = args.clientSecret
dumpDir = Path(args.outDir)
startDate = args.startDate
#limit = args.limit
server = Oauth2.OAuth2Server(clientId, clientSecret)
server.browser_authorize()
ACCESS_TOKEN = server.oauth.session.token['access_token']
REFRESH_TOKEN = server.oauth.session.token['refresh_token']
client = fitbit.Fitbit(clientId, clientSecret, oauth2=True,
access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
scrapeFromDateOnward(startDate, dumpDir, client)
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "5agado/intro-ai",
"score": 3
} |
#### File: src/test/basicTest.py
```python
from neural_network.perceptron import Perceptron
from neural_network.neural_net import NeuralNet
from util import utils
import os
from bitstring import BitArray
import random
from genetic_algorithm.population import Population
def testPerceptron():
p = Perceptron(2)
#p.useStepFunction = True
p.t_sessions = 2000
t_model = utils.readTrainModel(os.path.join(utils.getResourcesPath(), 'logic_gates/NAND.txt'))
p.learn(t_model)
print(p.getOutput([0, 0]))
print(p.getOutput([0, 1]))
print(p.getOutput([1, 0]))
print(p.getOutput([1, 1]))
def testNeuralNet():
net = NeuralNet(2, 2, 1)
net.t_sessions = 20000
t_model = utils.readTrainModel(os.path.join(utils.getResourcesPath(), 'logic_gates/XOR.txt'))
net.learn(t_model)
print(net.getOutputs([0, 0]))
print(net.getOutputs([0, 1]))
print(net.getOutputs([1, 0]))
print(net.getOutputs([1, 1]))
def numberRecognition():
net = NeuralNet(15, 2, 1)
t_model = utils.readTrainModel(os.path.join(utils.getResourcesPath(), 'number_grids.txt'))
net.learn(t_model)
print(net.getOutputs([0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0]))
print(net.getOutputs([1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1]))
print(net.getOutputs([1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1]))
print(net.getOutputs([1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1]))
print(net.getOutputs([1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1]))
print(net.getOutputs([1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0]))
print(net.getOutputs([1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]))
def initPopulation(p):
for i in range(len(p.chromos)):
for j in range(p.chromoSize):
p.chromos[i].genes[j] = (
BitArray(int=random.randint(-200000, 200000), length=21))
def evolve(p, net, t_model):
for _ in range(300):
for chromo in p.chromos:
genes = [(a.int)/100000.0 for a in chromo.genes]
#print(genes)
net.setWeights(genes)
perf = net.getError(t_model)
chromo.fitness = 1/perf
#print(perf)
#print(p.getTotalFitness())
p.newGeneration()
def testNeuralNetWithGA():
net = NeuralNet(2, 2, 1)
t_model = utils.readTrainModel(os.path.join(utils.getResourcesPath(), 'logic_gates/NAND.txt'))
Population.initPopulation = initPopulation
Population.evolve = evolve
p = Population(70, 9)
p.initPopulation()
p.evolve(net, t_model)
print(net.getOutputs([0, 0]))
print(net.getOutputs([0, 1]))
print(net.getOutputs([1, 0]))
print(net.getOutputs([1, 1]))
print(net.getError(t_model))
#testPerceptron()
#testNeuralNet()
#testNeuralNetWithGA()
#numberRecognition()
```
#### File: src/test/circleFitting.py
```python
import pygame
import random
import sys
import math
from genetic_algorithm.population import Population
from bitstring import BitArray
'''
idea from http://www.ai-junkie.com/
Given an area that has a number of non overlapping circles scattered about its surface
find the disk of largest radius which may be placed amongst these disks without
overlapping any of them
'''
d_width = 640
d_height = 480
min_r = 5 #minimal radius
max_r = 50
def intersect(c1, c2):
x = math.pow(c1[0][0] - c2[0][0], 2)
y = math.pow(c1[0][1] - c2[0][1], 2)
res = math.sqrt(x+y)
return (res < (c1[1]+c2[1]) or res< math.fabs(c1[1]-c2[1]));
def isValid(c):
if c[1] < min_r:
return False
if (c[0][0] + c[1] > d_width) or (c[0][0] - c[1] < 0):
return False
if (c[0][1] + c[1] > d_height) or (c[0][1] - c[1] < 0):
return False
return True;
def getCircles():
numCircles = 10
circles = []
fail = False
while (len(circles) < numCircles):
x = random.randint(min_r, d_width - min_r)
y = random.randint(min_r, d_height - min_r)
if fail:
r = random.randint(min_r, max_r//2)
else:
r = random.randint(min_r, max_r)
new = [(x, y), r]
valid = True
for circle in circles:
if intersect(circle, new):
valid = False
break
if valid and isValid(new):
circles.append(new)
fail = False
else:
fail = True
return circles
def initPopulation(p):
uint_l = 10 #uint bitarray representation length
for i in range(len(p.chromos)):
p.chromos[i].genes[0] = (BitArray(uint=random.randint(min_r, d_width - min_r), length=uint_l))
p.chromos[i].genes[1] = (BitArray(uint=random.randint(min_r, d_width - min_r), length=uint_l))
p.chromos[i].genes[2] = (BitArray(uint=random.randint(min_r, max_r), length=uint_l))
def evolve(p, window, circles):
for _ in range(200):
for i in range(len(p.chromos)):
c = [(p.chromos[i].genes[0].uint, p.chromos[i].genes[1].uint), p.chromos[i].genes[2].uint]
valid = True
for circle in circles:
if intersect(circle, c):
valid = False
break
if valid and isValid(c):
p.chromos[i].fitness = p.chromos[i].genes[2].uint
else:
p.chromos[i].fitness = 0
#print(p.chromos[i].fitness)
best = p.getBestIndividuals()[0]
c = [(best.genes[0].uint, best.genes[1].uint), best.genes[2].uint]
#print(c)
window.fill((0, 0, 0))
for circle in circles:
pygame.draw.circle(window, (255, 255, 255), circle[0], circle[1], 1)
pygame.draw.circle(window, (255, 0, 0), c[0], c[1])
p.newGeneration()
pygame.display.update()
pygame.time.delay(10)
def main():
pygame.init()
window = pygame.display.set_mode((d_width, d_height))
circles = getCircles()
for circle in circles:
pygame.draw.circle(window, (255, 255, 255), circle[0], circle[1], 1)
Population.initPopulation = initPopulation
Population.evolve = evolve
p = Population(80, 3)
p.initPopulation()
p.evolve(window, circles)
#pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
main()
```
#### File: src/test/documentClassification.py
```python
from util import utils
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import HashingVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm.classes import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
N = None #the number of docs to be categorized.
T = None #the number of training examples.
def readData(filename):
filePath = os.path.join(utils.getResourcesPath(), filename)
f = open(filePath, 'r')
global N, T
N = int(f.readline().strip())
docs = []
for _ in range(0, N):
docs.append(f.readline().strip())
#training data
filePath = os.path.join(utils.getResourcesPath(), 'hackerrank/trainingdata.txt')
f = open(filePath, 'r')
T = int(f.readline().strip())
t_docs = []
t_docsCategories = []
for _ in range(0, T):
cat, doc = f.readline().strip().split(' ', 1)
t_docsCategories.append(cat)
t_docs.append(doc)
return docs, t_docs, t_docsCategories
data = readData('hackerrank/documentClassification.txt')
X_train = np.array(data[1])
y_train = np.array(data[2])
X_test = np.array(data[0])
print("Extracting features from the training dataset using a sparse vectorizer")
#vectorizer = HashingVectorizer(stop_words='english', non_negative=True)
vectorizer = TfidfVectorizer(min_df=2,
ngram_range=(1, 2),
stop_words='english',
strip_accents='unicode',
norm='l2')
X_train = vectorizer.fit_transform(X_train)
#vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
# stop_words='english')
#X2_train = vectorizer.fit_transform(data_train.data)
X_test = vectorizer.transform(X_test)
nb_classifier = MultinomialNB().fit(X_train, y_train)
svm_classifier = LinearSVC().fit(X_train, y_train)
maxent_classifier = LogisticRegression().fit(X_train, y_train)
y_nb_predicted = nb_classifier.predict(X_test)
print(y_nb_predicted)
y_nb_predicted = svm_classifier.predict(X_test)
print(y_nb_predicted)
y_nb_predicted = maxent_classifier.predict(X_test)
print(y_nb_predicted)
```
#### File: src/test/mazeOfPacman.py
```python
import os
import random
import pygame
import sys
from util import utils
from time import sleep
square_l = 10
pacman_r, pacman_c = None, None
food_r, food_c = None, None
r,c = None, None
visited = []
fPath = []
def dfs(grid, start, end, path = []):
#print(start)
global visited
global fPath
fPath.append(start)
stack = list()
path = path + [start]
if start == end:
#path.append(len(path))
return path
up = (start[0]-1, start[1])
left = (start[0], start[1]-1)
right = (start[0], start[1]+1)
down = (start[0]+1, start[1])
if (grid[up[0]][up[1]] != '%') and (not up in visited):
stack.append(up)
visited.append(up)
if (grid[left[0]][left[1]] != '%') and (not left in visited):
stack.append(left)
visited.append(left)
if (grid[right[0]][right[1]] != '%') and (not right in visited):
stack.append(right)
visited.append(right)
if (grid[down[0]][down[1]] != '%') and (not down in visited):
stack.append(down)
visited.append(down)
while len(stack) > 0:
node = stack.pop()
newpath = dfs(grid, node, end, path)
if newpath:
return newpath
return None
def bfs(grid, start, end, path = []):
#print(start)
global visited
global fPath
stack = list()
stack.append(start)
while len(stack) > 0:
node = stack.pop(0)
fPath.append((node[0], node[1]))
if (node[0], node[1]) == end:
while True:
path.insert(0, node)
node = node[2]
#print(node)
if ((node[0], node[1]) == start):
path.insert(0, start)
return path
up = (node[0]-1, node[1])
left = (node[0], node[1]-1)
right = (node[0], node[1]+1)
down = (node[0]+1, node[1])
if (grid[up[0]][up[1]] != '%') and (not up in visited):
stack.append((up[0], up[1], node))
visited.append(up)
if (grid[left[0]][left[1]] != '%') and (not left in visited):
stack.append((left[0], left[1], node))
visited.append(left)
if (grid[right[0]][right[1]] != '%') and (not right in visited):
stack.append((right[0], right[1], node))
visited.append(right)
if (grid[down[0]][down[1]] != '%') and (not down in visited):
stack.append((down[0], down[1], node))
visited.append(down)
return None
def astar(grid, start, end):
frontier = list()
costs = {}
explored = list()
path = {}
frontier.append(start)
costs[start] = manDistance(start, end)
while len(frontier) > 0:
#take cheapest one. Implement with priority queue
index = 0
minv = costs[frontier[index]]
for i in range(len(frontier)):
if costs[frontier[i]] < minv:
minv = costs[frontier[i]]
index = i
node = frontier.pop(index)
if node == end:
respath = [node]
while True:
respath.insert(0, path[node])
node = path[node]
if node == start:
return respath
explored.append(node)
stack = []
up = (node[0]-1, node[1])
left = (node[0], node[1]-1)
right = (node[0], node[1]+1)
down = (node[0]+1, node[1])
if (grid[up[0]][up[1]] != '%'):
cost = 1 if (grid[up[0]][up[1]] == '-') else 0
stack.append(((up[0], up[1]), cost))
if (grid[left[0]][left[1]] != '%'):
cost = 1 if (grid[left[0]][left[1]] == '-') else 0
stack.append(((left[0], left[1]), cost))
if (grid[right[0]][right[1]] != '%'):
cost = 1 if (grid[right[0]][right[1]] == '-') else 0
stack.append(((right[0], right[1]), cost))
if (grid[down[0]][down[1]] != '%'):
cost = 1 if (grid[down[0]][down[1]] == '-') else 0
stack.append(((down[0], down[1]), cost))
for child in stack:
if not child[0] in explored or not child[0] in frontier:
path[child[0]] = node
frontier.append(child[0])
costs[child[0]] = (costs[node] + child[1] +
abs(manDistance(child[0], end) - manDistance(node, end)))
elif costs[child[0]] > (costs[node] + child[1] + abs(manDistance(child[0], end) - manDistance(node, end))):
path[child[0]] = node
costs[child[0]] = costs[node] + child[1] + abs(manDistance(child[0], end) - manDistance(node, end))
return None
def manDistance(node, goal):
x = abs(node[0] - goal[0])
y = abs(node[1] - goal[1])
return x+y
def readMaze(filename):
global pacman_r, pacman_c
global food_r, food_c
global r,c
filePath = os.path.join(utils.getResourcesPath(), filename)
f = open(filePath, 'r')
pacman_r, pacman_c = [ int(i) for i in f.readline().strip().split() ]
food_r, food_c = [ int(i) for i in f.readline().strip().split() ]
r,c = [ int(i) for i in f.readline().strip().split() ]
grid = []
for i in range(0, r):
grid.append(f.readline().strip())
return grid
def renderMaze(window, m, pos):
window.fill((255, 255, 255))
global r, c
for y in range(r):
for x in range(c):
if m[y][x] == '%':
box = pygame.Rect(x*square_l, y*square_l, square_l, square_l)
pygame.draw.rect(window, (0, 0, 0), box, 0)
box = pygame.Rect(pos[1]*square_l, pos[0]*square_l, square_l, square_l)
pygame.draw.rect(window, (255, 0, 0), box, 0)
pygame.display.update()
pygame.time.delay(10)
def main():
#pygame.init()
maze = readMaze('hackerrank\pacman.txt')
#window = pygame.display.set_mode((c * square_l, r * square_l))
#visited.append((pacman_r, pacman_c))
res = dfs(maze, (pacman_r, pacman_c), (food_r, food_c))
print(len(res))
#renderMaze(window, maze, (pacman_c, pacman_r))
for line in res:
#renderMaze(window, maze, line)
print(line[0], line[1])
#sleep(0.5)
# print(len(fPath))
# for line in fPath:
# #renderMaze(window, maze, line)
# print(line[0], line[1])
# #sleep(0.5)
# while True:
# for event in pygame.event.get():
# if event.type == pygame.QUIT:
# sys.exit(0)
main()
```
#### File: src/test/nPuzzle.py
```python
from util import utils
import os
from queue import PriorityQueue
k = None
def readGrid(filename):
filePath = os.path.join(utils.getResourcesPath(), filename)
f = open(filePath, 'r')
global k
k = int(f.readline().strip())
grid = []
for i in range(0, k):
grid.append([])
for _ in range(0, k):
grid[i].append(f.readline().strip())
return grid
def getValidNextConfigs(grid):
k = len(grid)
validNextConfigs = []
moves = []
for y in range(0, k):
if '0' in grid[y]:
x = grid[y].index('0')
if y != 0:
validNextConfigs.append(getResConfig(grid, 'UP'))
moves.append('UP')
if y != k-1:
validNextConfigs.append(getResConfig(grid, 'DOWN'))
moves.append('DOWN')
if x != 0:
validNextConfigs.append(getResConfig(grid, 'LEFT'))
moves.append('LEFT')
if x != k-1:
validNextConfigs.append(getResConfig(grid, 'RIGHT'))
moves.append('RIGHT')
return validNextConfigs, moves
def getResConfig(grid, move = 'UP'):
k = len(grid)
x = None
y = None
resConfig = []
for i in range(0, k):
resConfig.append([])
if '0' in grid[i]:
y = i
x = grid[y].index('0')
for j in range(0, k):
resConfig[i].append(grid[i][j])
if move == 'UP':
resConfig[y][x] = resConfig[y-1][x]
resConfig[y-1][x] = '0'
elif move == 'DOWN':
resConfig[y][x] = resConfig[y+1][x]
resConfig[y+1][x] = '0'
elif move == 'LEFT':
resConfig[y][x] = resConfig[y][x-1]
resConfig[y][x-1] = '0'
elif move == 'RIGHT':
resConfig[y][x] = resConfig[y][x+1]
resConfig[y][x+1] = '0'
return resConfig
#hFunction = ['misplaces', 'manhattan']
def getHeuristicCost(grid, hFunction = 'manhattan'):
k = len(grid)
cost = 0
for i in range(0, k):
for j in range(0, k):
if grid[i][j] != '0' and grid[i][j] != str(j + (k*i)):
if (hFunction == 'misplaced'):
cost += 1
elif (hFunction == 'manhattan'):
value = int(grid[i][j])
cost += abs(value//k - i) + abs(value%k - j)
return cost
def ucs(grid, start, end):
frontier = PriorityQueue()
costs = {}
explored = list()
path = {}
moves = {}
costs[start] = getHeuristicCost(grid)
frontier.put((costs[start], start))
while not frontier.empty():
conf = frontier.get()[1]
#found solution. Building path via backward visit
gridConf = stringToGrid(conf, k)
if getHeuristicCost(gridConf) == 0:
resmove = []
respath = [conf]
while True:
resmove.insert(0, moves[conf])
respath.insert(0, path[conf])
conf = path[conf]
if conf == start:
return resmove
explored.append(conf)
validNextConfigs, nextMoves = getValidNextConfigs(gridConf)
for i in range(len(validNextConfigs)):
nextConf = validNextConfigs[i]
move = gridToString(nextConf)
if not move in explored: #or not move in frontier:
path[move] = conf
moves[move] = nextMoves[i]
costs[move] = costs[conf] + (getHeuristicCost(nextConf) - getHeuristicCost(gridConf)) + 1
frontier.put((costs[move], move))
elif ((costs[move] > (costs[conf] + (getHeuristicCost(nextConf) - getHeuristicCost(gridConf)) + 1))):
path[move] = conf
moves[move] = nextMoves[i]
costs[move] = (costs[conf] + (getHeuristicCost(nextConf) - getHeuristicCost(gridConf)) + 1)
return None
def stringToGrid(value, k):
grid = []
for i in range(0, k):
grid.append([])
for j in range(0, k):
grid[i].append(value[i*k + j])
return grid
def gridToString(grid):
k = len(grid)
value = ''
for i in range(0, k):
for j in range(0, k):
value += grid[i][j]
return value
grid = readGrid("hackerrank/8Puzzle.txt")
end = readGrid("hackerrank/8PuzzleEnd.txt")
print('-'*10)
path = ucs(grid, gridToString(grid), gridToString(end))
print(len(path))
# for res in path:
# grid = stringToGrid(res, k)
# for row in grid:
# print(row)
# print('-'*10)
```
#### File: src/util/utils.py
```python
import os
import math
def dotProduct(v1, v2):
return sum(x * y for x, y in zip(v1, v2))
def sigmoid(x):
return 1.0 / (1.0 + math.exp(-x))
def getResourcesPath():
return os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir, 'resources'))
def readTrainModel(filePath, numOutputs = 1):
f = open(filePath, 'r')
res = []
for line in f:
sLine = list(map(float, line.strip().split(" ")))
res.append(((sLine[:-numOutputs]), sLine[-numOutputs:]))
return res
def readMatrix(filePath):
f = open(filePath, 'r')
res = []
for line in f:
res.append(list(map(float, line.strip().split(" "))))
return res
``` |
{
"source": "5AGE-zhang/TocoDecoy",
"score": 2
} |
#### File: 0_molecular_generation/ddc_pub/ddc_v3_unbiased.py
```python
import os
os.environ[
"TF_CPP_MIN_LOG_LEVEL"
] = "3" # Suppress UserWarning of TensorFlow while loading the model
import numpy as np
from datetime import datetime
from functools import wraps
import shutil, zipfile, tempfile, pickle
from tensorflow.keras.layers import (
Input,
Concatenate,
Dense,
TimeDistributed,
BatchNormalization,
)
from tensorflow.compat.v1.keras.layers import (
CuDNNLSTM as LSTM,
)
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ReduceLROnPlateau, LearningRateScheduler
from tensorflow.keras.utils import multi_gpu_model, plot_model
# Custom dependencies
from molvecgen import SmilesVectorizer
from ddc_pub.generators import SmilesGenerator2
from ddc_pub.custom_callbacks import ModelAndHistoryCheckpoint, LearningRateSchedule
def timed(func):
"""
Timer decorator to benchmark functions.
"""
@wraps(func)
def wrapper(*args, **kwargs):
tstart = datetime.now()
result = func(*args, **kwargs)
elapsed = (datetime.now() - tstart).microseconds / 1e6
print("Elapsed time: %.3f seconds." % elapsed)
return result
return wrapper
class DDC:
def __init__(self, **kwargs):
"""
# Arguments
kwargs:
x : model input - np.ndarray of np.bytes_ or np.float64
y : model output - np.ndarray of np.bytes_
model_name : model filename to load - string
dataset_info : dataset information including name, maxlen and charset - hdf5
noise_std : standard deviation of the noise layer in the latent space - float
lstm_dim : size of LSTM RNN layers - int
dec_layers : number of decoder layers - int
td_dense_dim : size of TD Dense layers inbetween the LSTM ones
to suppress network size - int
batch_size : the network's batch size - int
codelayer_dim: dimensionality of the latent space or number of descriptors - int
# Examples of __init__ usage
To *train* a blank model with encoder (autoencoder):
model = ddc.DDC(x = mols,
y = mols,
dataset_info = info,
noise_std = 0.1,
lstm_dim = 256,
dec_layers = 3,
td_dense_dim = 0,
batch_size = 128,
codelayer_dim = 128)
To *train* a blank model without encoder:
model = ddc.DDC(x = descriptors,
y = mols,
dataset_info = info,
noise_std = 0.1,
lstm_dim = 256,
dec_layers = 3,
td_dense_dim = 0,
batch_size = 128)
To *re-train* a saved model with encoder (autoencoder):
model = ddc.DDC(x = mols,
y = mols,
model_name = saved_model_name)
To *re-train* a saved model without encoder:
model = ddc.DDC(x = descriptors,
y = mols,
model_name = saved_model_name)
To *test* a saved model:
model = ddc.DDC(model_name = saved_model_name)
"""
# Identify the mode to start the model in
if "x" in kwargs:
x = kwargs.get("x")
y = kwargs.get("y")
if "model_name" not in kwargs:
self.__mode = "train"
else:
self.__mode = "retrain"
elif "model_name" in kwargs:
self.__mode = "test"
else:
raise NameError("Cannot infer mode from arguments.")
print("Initializing model in %s mode." % self.__mode)
if self.mode == "train":
# Infer input type from type(x)
if type(x[0]) == np.bytes_:
print("Input type is 'binary mols'.")
self.__input_type = "mols" # binary RDKit mols
else:
print("Check input type.")
self.__input_type = "other" # other molecular descriptors
self.__maxlen = (
kwargs.get("dataset_info")["maxlen"] + 10
) # Extend maxlen to avoid breaks in training
self.__charset = kwargs.get("dataset_info")["charset"]
self.__dataset_name = kwargs.get("dataset_info")["name"]
self.__lstm_dim = kwargs.get("lstm_dim", 256)
self.__h_activation = kwargs.get("h_activation", "relu")
self.__bn = kwargs.get("bn", True)
self.__bn_momentum = kwargs.get("bn_momentum", 0.9)
self.__noise_std = kwargs.get("noise_std", 0.01)
self.__td_dense_dim = kwargs.get(
"td_dense_dim", 0
) # >0 squeezes RNN connections with Dense sandwiches
self.__batch_size = kwargs.get("batch_size", 256)
self.__dec_layers = kwargs.get("dec_layers", 2)
self.__codelayer_dim = kwargs.get("codelayer_dim", 128)
# Create the left/right-padding vectorizers
self.__smilesvec1 = SmilesVectorizer(
canonical=False,
augment=True,
maxlength=self.maxlen,
charset=self.charset,
binary=True,
)
self.__smilesvec2 = SmilesVectorizer(
canonical=False,
augment=True,
maxlength=self.maxlen,
charset=self.charset,
binary=True,
leftpad=False,
)
# self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)
self.__input_shape = self.smilesvec1.dims
self.__dec_dims = list(self.smilesvec1.dims)
self.__dec_dims[0] = self.dec_dims[0] - 1
self.__dec_input_shape = self.dec_dims
self.__output_len = self.smilesvec1.dims[0] - 1
self.__output_dims = self.smilesvec1.dims[-1]
# Build data generators
self.__build_generators(x)
# Build full model out of the sub-models
self.__build_model()
# Retrain or Test mode
else:
self.__model_name = kwargs.get("model_name")
# Load the model
self.__load(self.model_name)
if self.mode == "retrain":
# Build data generators
self.__build_generators(x)
# Show the resulting full model
print(self.model.summary())
"""
Architecture properties.
"""
@property
def lstm_dim(self):
return self.__lstm_dim
@property
def h_activation(self):
return self.__h_activation
@property
def bn(self):
return self.__bn
@property
def bn_momentum(self):
return self.__bn_momentum
@property
def noise_std(self):
return self.__noise_std
@property
def td_dense_dim(self):
return self.__td_dense_dim
@property
def batch_size(self):
return self.__batch_size
@property
def dec_layers(self):
return self.__dec_layers
@property
def codelayer_dim(self):
return self.__codelayer_dim
@property
def steps_per_epoch(self):
return self.__steps_per_epoch
@property
def validation_steps(self):
return self.__validation_steps
@property
def input_shape(self):
return self.__input_shape
@property
def dec_dims(self):
return self.__dec_dims
@property
def dec_input_shape(self):
return self.__dec_input_shape
@property
def output_len(self):
return self.__output_len
@property
def output_dims(self):
return self.__output_dims
@property
def batch_input_length(self):
return self.__batch_input_length
#<EMAIL>
#def batch_input_length(self, value):
# self.__batch_input_length = value
# self.__build_sample_model(batch_input_length=value)
"""
Models.
"""
@property
def sample_model(self):
return self.__sample_model
@property
def multi_sample_model(self):
return self.__multi_sample_model
@property
def model(self):
return self.__model
"""
Train properties.
"""
@property
def epochs(self):
return self.__epochs
@property
def clipvalue(self):
return self.__clipvalue
@property
def lr(self):
return self.__lr
@property
def h(self):
return self.__h
"""
Other properties.
"""
@property
def mode(self):
return self.__mode
@property
def dataset_name(self):
return self.__dataset_name
@property
def model_name(self):
return self.__model_name
@property
def input_type(self):
return self.__input_type
@property
def maxlen(self):
return self.__maxlen
@property
def charset(self):
return self.__charset
@property
def smilesvec1(self):
return self.__smilesvec1
@property
def smilesvec2(self):
return self.__smilesvec2
@property
def train_gen(self):
return self.__train_gen
@property
def valid_gen(self):
return self.__valid_gen
"""
Private methods.
"""
def __build_generators(self, x, split=0.81050343):
"""
Build data generators to be used in (re)training.
"""
# Split dataset into train and validation sets
cut = int(split * len(x))
x_train = x[:cut]
x_valid = x[cut:]
self.__train_gen = SmilesGenerator2(
x_train,
None,
self.smilesvec1,
self.smilesvec2,
batch_size=self.batch_size,
shuffle=True,
)
self.__valid_gen = SmilesGenerator2(
x_valid,
None,
self.smilesvec1,
self.smilesvec2,
batch_size=self.batch_size,
shuffle=True,
)
# Calculate number of batches per training/validation epoch
train_samples = len(x_train)
valid_samples = len(x_valid)
self.__steps_per_epoch = train_samples // self.batch_size
self.__validation_steps = valid_samples // self.batch_size
print(
"Model received %d train samples and %d validation samples."
% (train_samples, valid_samples)
)
def __build_model(self):
"""
RNN that generates random SMILES strings.
"""
# This is the start character padded OHE smiles for teacher forcing
decoder_inputs = Input(shape=self.dec_input_shape, name="Decoder_Inputs")
# I/O tensor of the LSTM layers
x = decoder_inputs
for dec_layer in range(self.dec_layers):
# RNN layer
decoder_lstm = LSTM(
self.lstm_dim,
return_sequences=True,
name="Decoder_LSTM_" + str(dec_layer),
)
x = decoder_lstm(x)
if self.bn:
x = BatchNormalization(
momentum=self.bn_momentum, name="BN_Decoder_" + str(dec_layer)
)(x)
# Squeeze LSTM interconnections using Dense layers
if self.td_dense_dim > 0:
x = TimeDistributed(
Dense(self.td_dense_dim), name="Time_Distributed_" + str(dec_layer)
)(x)
# Final Dense layer to return soft labels (probabilities)
outputs = Dense(self.output_dims, activation="softmax", name="Dense_Decoder")(x)
# Define the batch_model
self.__model = Model(inputs=[decoder_inputs], outputs=[outputs])
# Name it!
self.__model._name = "model"
def __build_sample_model(self, batch_input_length) -> dict:
"""
Model that predicts a single OHE character.
This model is generated from the modified config file of the self.batch_model.
Returns:
The dictionary of the configuration.
"""
self.__batch_input_length = batch_input_length
# Get the configuration of the batch_model
config = self.model.get_config()
# Keep only the "Decoder_Inputs" as single input to the sample_model
config["input_layers"] = [config["input_layers"][0]]
# Find decoder states that are used as inputs in batch_model and remove them
idx_list = []
for idx, layer in enumerate(config["layers"]):
if "Decoder_State_" in layer["name"]:
idx_list.append(idx)
# Pop the layer from the layer list
# Revert indices to avoid re-arranging after deleting elements
for idx in sorted(idx_list, reverse=True):
config["layers"].pop(idx)
# Remove inbound_nodes dependencies of remaining layers on deleted ones
for layer in config["layers"]:
idx_list = []
try:
for idx, inbound_node in enumerate(layer["inbound_nodes"][0]):
if "Decoder_State_" in inbound_node[0]:
idx_list.append(idx)
# Catch the exception for first layer (Decoder_Inputs) that has empty list of inbound_nodes[0]
except:
pass
# Pop the inbound_nodes from the list
# Revert indices to avoid re-arranging
for idx in sorted(idx_list, reverse=True):
layer["inbound_nodes"][0].pop(idx)
# Change the batch_shape of input layer
config["layers"][0]["config"]["batch_input_shape"] = (
batch_input_length,
1,
self.dec_input_shape[-1],
)
# Finally, change the statefulness of the RNN layers
for layer in config["layers"]:
if "Decoder_LSTM_" in layer["name"]:
layer["config"]["stateful"] = True
# layer["config"]["return_sequences"] = True
# Define the sample_model using the modified config file
sample_model = Model.from_config(config)
# Copy the trained weights from the trained batch_model to the untrained sample_model
for layer in sample_model.layers:
# Get weights from the batch_model
weights = self.model.get_layer(layer.name).get_weights()
# Set the weights to the sample_model
sample_model.get_layer(layer.name).set_weights(weights)
if batch_input_length == 1:
self.__sample_model = sample_model
elif batch_input_length > 1:
self.__multi_sample_model = sample_model
return config
def __load(self, model_name):
"""
Load complete model from a zip file.
To be called within __init__.
"""
print("Loading model.")
tstart = datetime.now()
# Temporary directory to extract the zipped information
with tempfile.TemporaryDirectory() as dirpath:
# Unzip the directory that contains the saved model(s)
with zipfile.ZipFile(model_name + ".zip", "r") as zip_ref:
zip_ref.extractall(dirpath)
# Load metadata
metadata = pickle.load(open(dirpath + "/metadata.pickle", "rb"))
# Re-load metadata
self.__dict__.update(metadata)
# Load the model
self.__model = load_model(dirpath + "/model.h5")
# Build sample_model out of the trained batch_model
self.__build_sample_model(batch_input_length=1) # Single-output model
self.__build_sample_model(
batch_input_length=256
) # Multi-output model
print("Loading finished in %i seconds." % ((datetime.now() - tstart).seconds))
"""
Public methods.
"""
def fit(
self,
model_name,
epochs,
lr,
mini_epochs,
patience,
gpus=1,
workers=1,
use_multiprocessing=False,
verbose=2,
max_queue_size=10,
clipvalue=0,
save_period=5,
checkpoint_dir="/",
lr_decay=False,
lr_warmup=False,
sch_epoch_to_start=500,
sch_last_epoch=999,
sch_lr_init=1e-3,
sch_lr_final=1e-6,
):
"""
Fit the full model to the training data.
Supports multi-gpu training if gpus set to >1.
# Arguments
kwargs:
model_name : base name for the checkpoints - string
epochs : number of epochs to train in total - int
lr : initial learning rate of the training - float
mini_epochs : number of dividends of an epoch (==1 means no mini_epochs) - int
patience : minimum consecutive mini_epochs of stagnated learning rate to consider
before lowering it - int
gpus : number of gpus to use for multi-gpu training (==1 means single gpu) - int
workers : number of CPU workers - int
use_multiprocessing: flag for Keras multiprocessing - boolean
verbose : verbosity of the training - int
max_queue_size : max size of the generator queue - int
clipvalue : value of gradient clipping - float
save_period : mini_epochs every which to checkpoint the model - int
checkpoint_dir : directory to store the checkpoints - string
lr_decay : flag to use exponential decay of learning rate - boolean
lr_warmup : flag to use warmup for transfer learning - boolean
"""
# Get parameter values if specified
self.__epochs = epochs
self.__lr = lr
self.__clipvalue = clipvalue
# Optimizer
if clipvalue > 0:
print("Using gradient clipping %.2f." % clipvalue)
opt = Adam(lr=self.lr, clipvalue=self.clipvalue)
else:
opt = Adam(lr=self.lr)
checkpoint_file = (
checkpoint_dir + "%s--{epoch:02d}--{val_loss:.4f}--{lr:.7f}" % model_name
)
# If model is untrained, history is blank
try:
history = self.h
# Else, append the history
except:
history = {}
mhcp = ModelAndHistoryCheckpoint(
filepath=checkpoint_file,
model_dict=self.__dict__,
monitor="val_loss",
verbose=1,
mode="min",
period=save_period,
history=history
)
# Training history
self.__h = mhcp.history
if lr_decay:
lr_schedule = LearningRateSchedule(
epoch_to_start=sch_epoch_to_start,
last_epoch=sch_last_epoch,
lr_init=sch_lr_init,
lr_final=sch_lr_final,
)
lr_scheduler = LearningRateScheduler(
schedule=lr_schedule.exp_decay, verbose=1
)
callbacks = [lr_scheduler, mhcp]
elif lr_warmup:
lr_schedule = LearningRateSchedule(
epoch_to_start=sch_epoch_to_start,
last_epoch=sch_last_epoch,
lr_init=sch_lr_init,
lr_final=sch_lr_final,
)
lr_scheduler = LearningRateScheduler(
schedule=lr_schedule.warmup, verbose=1
)
callbacks = [lr_scheduler, mhcp]
else:
rlr = ReduceLROnPlateau(
monitor="val_loss",
factor=0.5,
patience=patience,
min_lr=1e-6,
verbose=1,
min_delta=1e-4,
)
callbacks = [rlr, mhcp]
# Inspect training parameters at the start of the training
self.summary()
# Parallel training on multiple GPUs
if gpus > 1:
parallel_model = multi_gpu_model(self.model, gpus=gpus)
parallel_model.compile(loss="categorical_crossentropy", optimizer=opt)
# This `fit` call will be distributed on all GPUs.
# Each GPU will process (batch_size/gpus) samples per batch.
parallel_model.fit_generator(
self.train_gen,
steps_per_epoch=self.steps_per_epoch / mini_epochs,
epochs=mini_epochs * self.epochs,
validation_data=self.valid_gen,
validation_steps=self.validation_steps / mini_epochs,
callbacks=callbacks,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
) # 1 to show progress bar
elif gpus == 1:
self.model.compile(loss="categorical_crossentropy", optimizer=opt)
self.model.fit_generator(
self.train_gen,
steps_per_epoch=self.steps_per_epoch / mini_epochs,
epochs=mini_epochs * self.epochs,
validation_data=self.valid_gen,
validation_steps=self.validation_steps / mini_epochs,
callbacks=callbacks,
max_queue_size=10,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
) # 1 to show progress bar
# Build sample_model out of the trained batch_model
self.__build_sample_model(batch_input_length=1) # Single-output model
self.__build_sample_model(
batch_input_length=self.batch_size
) # Multi-output model
# @timed
def predict(self, temp=1, rng_seed=None):
"""
Generate a single SMILES string.
The states of the RNN are set based on the latent input.
Careful, "latent" must be: the output of self.transform()
or
an array of molecular descriptors.
If temp>0, multinomial sampling is used instead of selecting
the single most probable character at each step.
If temp==1, multinomial sampling without temperature scaling is used.
Returns:
A single SMILES string and its NLL.
"""
# Pass rng_seed for repeatable sampling
if rng_seed is not None:
np.random.seed(rng_seed)
# Reset the states between predictions because RNN is stateful!
self.sample_model.reset_states()
# Prepare the input char
startidx = self.smilesvec1._char_to_int[self.smilesvec1.startchar]
samplevec = np.zeros((1, 1, self.smilesvec1.dims[-1]))
samplevec[0, 0, startidx] = 1
smiles = ""
# Initialize Negative Log-Likelihood (NLL)
NLL = 0
# Loop and predict next char
for i in range(1000):
o = self.sample_model.predict(samplevec)
# Multinomial sampling with temperature scaling
if temp:
temp = abs(temp) # Handle negative values
nextCharProbs = np.log(o) / temp
nextCharProbs = np.exp(nextCharProbs)
nextCharProbs = (
nextCharProbs / nextCharProbs.sum() - 1e-8
) # Re-normalize for float64 to make exactly 1.0 for np.random.multinomial
sampleidx = np.random.multinomial(
1, nextCharProbs.squeeze(), 1
).argmax()
# Else, select the most probable character
else:
sampleidx = np.argmax(o)
samplechar = self.smilesvec1._int_to_char[sampleidx]
if samplechar != self.smilesvec1.endchar:
# Append the new character
smiles += samplechar
samplevec = np.zeros((1, 1, self.smilesvec1.dims[-1]))
samplevec[0, 0, sampleidx] = 1
# Calculate negative log likelihood for the selected character given the sequence so far
NLL -= np.log(o[0][0][sampleidx])
else:
return smiles, NLL
# @timed
def predict_batch(self, temp=1, rng_seed=None):
"""
Generate multiple random SMILES strings.
If temp>0, multinomial sampling is used instead of selecting
the single most probable character at each step.
If temp==1, multinomial sampling without temperature scaling is used.
Low temp leads to elimination of characters with low conditional probabilities.
"""
# Pass rng_seed for repeatable sampling
if rng_seed is not None:
np.random.seed(rng_seed)
# Reset the states between predictions because RNN is stateful!
self.multi_sample_model.reset_states()
# Index of input char "^"
startidx = self.smilesvec1._char_to_int[self.smilesvec1.startchar]
# Vectorize the input char for all SMILES
samplevec = np.zeros((self.batch_input_length, 1, self.smilesvec1.dims[-1]))
samplevec[:, 0, startidx] = 1
# Initialize arrays to store SMILES, their NLLs and their status
smiles = np.array([""] * self.batch_input_length, dtype=object)
NLL = np.zeros((self.batch_input_length,))
finished = np.array([False] * self.batch_input_length)
# Loop and predict next char
for i in range(1000):
o = self.multi_sample_model.predict(
samplevec, batch_size=self.batch_input_length
).squeeze()
# Multinomial sampling with temperature scaling
if temp:
temp = abs(temp) # No negative values
nextCharProbs = np.log(o) / temp
nextCharProbs = np.exp(nextCharProbs) # .squeeze()
# Normalize probabilities
nextCharProbs = (nextCharProbs.T / nextCharProbs.sum(axis=1) - 1e-8).T
sampleidc = np.asarray(
[
np.random.multinomial(1, nextCharProb, 1).argmax()
for nextCharProb in nextCharProbs
]
)
else:
sampleidc = np.argmax(o, axis=1)
samplechars = [self.smilesvec1._int_to_char[idx] for idx in sampleidc]
for idx, samplechar in enumerate(samplechars):
if not finished[idx]:
if samplechar != self.smilesvec1.endchar:
# Append the SMILES with the next character
smiles[idx] += self.smilesvec1._int_to_char[sampleidc[idx]]
samplevec = np.zeros(
(self.batch_input_length, 1, self.smilesvec1.dims[-1])
)
# One-Hot Encode the character
# samplevec[:,0,sampleidc] = 1
for count, sampleidx in enumerate(sampleidc):
samplevec[count, 0, sampleidx] = 1
# Calculate negative log likelihood for the selected character given the sequence so far
NLL[idx] -= np.log(o[idx][sampleidc[idx]])
else:
finished[idx] = True
# print("SMILES has finished at %i" %i)
# If all SMILES are finished, i.e. the endchar "$" has been generated, stop the generation
if finished.sum() == len(finished):
return smiles, NLL
@timed
def get_smiles_nll(self, smiles_ref) -> float:
"""
Calculate the NLL of a given SMILES string if its descriptors are used as RNN states.
Returns:
The NLL of sampling a given SMILES string.
"""
# Reset the states between predictions because RNN is stateful!
self.sample_model.reset_states()
# Prepare the input char
startidx = self.smilesvec1._char_to_int[self.smilesvec1.startchar]
samplevec = np.zeros((1, 1, self.smilesvec1.dims[-1]))
samplevec[0, 0, startidx] = 1
# Initialize Negative Log-Likelihood (NLL)
NLL = 0
# Loop and predict next char
for i in range(1000):
o = self.sample_model.predict(samplevec)
samplechar = smiles_ref[i]
sampleidx = self.smilesvec1._char_to_int[samplechar]
if i != len(smiles_ref) - 1:
samplevec = np.zeros((1, 1, self.smilesvec1.dims[-1]))
samplevec[0, 0, sampleidx] = 1
# Calculate negative log likelihood for the selected character given the sequence so far
NLL -= np.log(o[0][0][sampleidx])
else:
return NLL
@timed
def get_smiles_nll_batch(self, smiles_ref) -> list:
"""
Calculate the individual NLL for a batch of known SMILES strings.
Batch size is equal to self.batch_input_length so reset it if needed.
Returns:
NLL of sampling all listed SMILES.
"""
# Reset the states between predictions because RNN is stateful!
self.multi_sample_model.reset_states()
# Index of input char "^"
startidx = self.smilesvec1._char_to_int[self.smilesvec1.startchar]
# Vectorize the input char for all SMILES
samplevec = np.zeros((self.batch_input_length, 1, self.smilesvec1.dims[-1]))
samplevec[:, 0, startidx] = 1
# Initialize arrays to store NLLs and flag if a SMILES is finished
NLL = np.zeros((self.batch_input_length,))
finished = np.array([False] * self.batch_input_length)
# Loop and predict next char
for i in range(1000):
o = self.multi_sample_model.predict(
samplevec, batch_size=self.batch_input_length
).squeeze()
samplechars = []
for smiles in smiles_ref:
try:
samplechars.append(smiles[i])
except:
# This is a finished SMILES, so "i" exceeds dimensions
samplechars.append("$")
sampleidc = np.asarray(
[self.smilesvec1._char_to_int[char] for char in samplechars]
)
for idx, samplechar in enumerate(samplechars):
if not finished[idx]:
if i != len(smiles_ref[idx]) - 1:
samplevec = np.zeros(
(self.batch_input_length, 1, self.smilesvec1.dims[-1])
)
# One-Hot Encode the character
for count, sampleidx in enumerate(sampleidc):
samplevec[count, 0, sampleidx] = 1
# Calculate negative log likelihood for the selected character given the sequence so far
NLL[idx] -= np.log(o[idx][sampleidc[idx]])
else:
finished[idx] = True
# If all SMILES are finished, i.e. the endchar "$" has been generated, stop the generation
if finished.sum() == len(finished):
return NLL
def summary(self):
"""
Echo the training configuration for inspection.
"""
print(
"\nModel trained with dataset %s that has maxlen=%d and charset=%s for %d epochs."
% (self.dataset_name, self.maxlen, self.charset, self.epochs)
)
print(
"noise_std: %.6f, lstm_dim: %d, dec_layers: %d, td_dense_dim: %d, batch_size: %d, codelayer_dim: %d, lr: %.6f."
% (
self.noise_std,
self.lstm_dim,
self.dec_layers,
self.td_dense_dim,
self.batch_size,
self.codelayer_dim,
self.lr,
)
)
def get_graphs(self):
"""
Export the graphs of the model and its submodels to png files.
Requires "pydot" and "graphviz" to be installed (pip install graphviz && pip install pydot).
"""
try:
from keras.utils import plot_model
from keras.utils.vis_utils import model_to_dot
# from IPython.display import SVG
plot_model(self.model, to_file="model.png")
print("Model exported to png.")
except:
print("Check pydot and graphviz installation.")
@timed
def save(self, model_name):
"""
Save model in a zip file.
"""
with tempfile.TemporaryDirectory() as dirpath:
# Save the Keras model
self.model.save(dirpath + "/model.h5")
# Exclude unpicklable and unwanted attributes
excl_attr = [
"_DDC__mode", # excluded because it is always identified within self.__init__()
"_DDC__train_gen", # unpicklable
"_DDC__valid_gen", # unpicklable
"_DDC__sample_model", # unpicklable
"_DDC__multi_sample_model", # unpicklable
"_DDC__model",
] # unpicklable
# Cannot deepcopy self.__dict__ because of Keras' thread lock so this is
# bypassed by popping and re-inserting the unpicklable attributes
to_add = {}
# Remove unpicklable attributes
for attr in excl_attr:
to_add[attr] = self.__dict__.pop(attr, None)
# Pickle metadata, i.e. almost everything but the Keras models and generators
pickle.dump(self.__dict__, open(dirpath + "/metadata.pickle", "wb"))
# Zip directory with its contents
shutil.make_archive(model_name, "zip", dirpath)
# Finally, re-load the popped elements for the model to be usable
for attr in excl_attr:
self.__dict__[attr] = to_add[attr]
print("Model saved.")
```
#### File: dataset_generation/0_molecular_generation/my_utils.py
```python
import csv
import os
import numpy as np
import h5py
from rdkit import Chem
from rdkit import DataStructs
from rdkit import RDLogger
from rdkit.Chem import AllChem, Descriptors, Fragments
RDLogger.DisableLog('rdApp.*')
class mol_controller():
def __init__(self, smiles=None, mols=None):
'''
:param smiles: list of smile
:param mols: list of rdkit mol
'''
# init
if mols and smiles:
self.mols = mols
self.smiles = smiles
self.checkFrommols()
elif mols:
self.mols = mols
self.checkFrommols()
elif smiles:
self.smiles = smiles
self.checkFromsmiles()
else:
print('initializing filed for None smiles and mols')
raise SyntaxError
def checkFrommols(self):
smiles = []
self.unparsable_smiles_idx = []
for idx, mol in enumerate(self.mols):
# 从 smiles 转成 mol 没有报错,但是从 mol 转成 smiles 可能会报错 rdkit.Chem.rdmolfiles.MolToSmiles(NoneType)
try:
smiles.append(Chem.MolToSmiles(mol))
except:
self.unparsable_smiles_idx.append(idx)
print(f'unparsable: {len(self.unparsable_smiles_idx)}')
print(f'valid smiles: {len(smiles)}')
# remove duplicate smiles
self.smiles = list(set(smiles)) #
self.smiles.sort(key=smiles.index) # sort to make rank united
self.mols = [Chem.MolFromSmiles(smile) for smile in self.smiles]
self.unique_num = len(self.smiles)
print(f'unique smiles: {len(self.smiles)}')
def checkFromsmiles(self):
self.mols = []
self.unparsable_smiles_idx = []
for idx, smile in enumerate(self.smiles):
try:
self.mols.append(Chem.MolFromSmiles(smile))
except:
self.unparsable_smiles_idx.append(idx)
print(f'unparsable: {len(self.unparsable_smiles_idx)}')
self.checkFrommols()
def cal_similarity(self, seed_mol):
# init
smis = []
# cal ecfp
radius = 2
seed_fp = AllChem.GetMorganFingerprint(seed_mol, radius)
# cal smis
print('start cal similarity....')
# for mol in tqdm(self.mols):
self.unparsable_smiles_idx = []
for i, mol in enumerate(self.mols):
try:
# cal fp
fp = AllChem.GetMorganFingerprint(mol, radius)
# cal smi
smi = DataStructs.DiceSimilarity(seed_fp, fp)
# apped smi
smis.append(smi)
except:
self.unparsable_smiles_idx.append(i)
# remobe bad smiles
for i in reversed(self.unparsable_smiles_idx):
self.mols.pop(i)
self.smiles.pop(i)
# to numpy
smis = np.array(smis)
# print
print(f'similarity: mean {smis.mean():.3f} || min {smis.min():.3f} || max {smis.max():.3f}')
return smis
def cal_MW(self):
# cal molecule weight
return [Descriptors.MolWt(mol) for mol in self.mols]
def cal_logp(self):
# cal molecule logp
return [Descriptors.MolLogP(mol) for mol in self.mols]
def cal_HB_acceptor(self):
# acceptor of hydrogen bond
return [Descriptors.NumHAcceptors(mol) for mol in self.mols]
def cal_HB_donor(self):
# donor of hydrogen bond
return [Descriptors.NumHDonors(mol) for mol in self.mols]
def cal_halogen(self):
# count num of halogen atoms
return [Fragments.fr_halogen(mol) for mol in self.mols]
def cal_rotable_bonds(self):
# count num of rotable bonds
return [Descriptors.NumRotatableBonds(mol) for mol in self.mols]
def cal_sulfi(self):
# count num of S
return [Fragments.fr_sulfide(mol) for mol in self.mols]
def cal_heavy_atoms(self):
# count num of heavy atoms
return [Descriptors.HeavyAtomCount(mol) for mol in self.mols]
def cal_rings(self):
# count ring nums
return [Descriptors.RingCount(mol) for mol in self.mols]
def cal_properties(self, labels=None):
'''
cal properties for mols (MW, logp, rotable_bonds, HBacceptor, HBdonor, halogen)
if labels are given, merge labels and mols
:param labels: (batch_size, 1)
:return: (batch_size, 7)
'''
try:
if labels is not None:
return np.array(list(zip(self.cal_MW(), self.cal_logp(), self.cal_rotable_bonds(), self.cal_HB_acceptor(),
self.cal_HB_donor(), self.cal_halogen(), labels)))
# return np.concatenate(
# [np.array(list(zip(self.cal_MW(), self.cal_logp(), self.cal_rotable_bonds(), self.cal_HB_acceptor(),
# self.cal_HB_donor(), self.cal_halogen()))), labels], axis=-1)
else:
return np.array(list(zip(self.cal_MW(), self.cal_logp(), self.cal_rotable_bonds(), self.cal_HB_acceptor(),
self.cal_HB_donor(), self.cal_halogen())))
except:
return np.ones((len(self.mols), 7))
class Smiles():
def __init__(self, smile_lis, names=None, save_path='./'):
self.smiles = smile_lis
self.mols = [Chem.MolFromSmiles(smile) for smile in self.smiles]
self.names = names if names else list(range(len(self.smiles)))
self.path = save_path
self.file_label = {
'sdf': '$$$$',
'mol2': '@<TRIPOS>MOLECULE'
}
def to3D(self, mol):
# add H
mol = Chem.AddHs(mol)
# to 3D
AllChem.EmbedMolecule(mol)
# delete H
# mol = Chem.RemoveHs(mol)
return mol
def save_to_file(self, mol, file_name, format='sdf'):
# file
sdf_file = f'{self.path}/{file_name}.sdf'
# write file to sdf
Chem.MolToMolFile(mol, sdf_file)
if format == 'mol2':
mol2_file = f'{self.path}/{file_name}.mol2'
# trans2 mol2
cmd = 'module load openbabel && obabel {0} -O {1}'.format(sdf_file, mol2_file)
os.system(cmd)
def merge_file(self, src_files, dst_file, format='sdf'):
# init
content = ''
# for-loop
for src_file in src_files:
src_file_full = f'{self.path}/{src_file}.{format}'
with open(src_file_full, 'r') as f:
content += f'{src_file}{f.read()}\n$$$$\n'
# output file
dst_file = f'{dst_file}.{format}'
with open(dst_file, 'w') as f:
f.write(content)
def split_file(self, src_file, dst_files, dst_path, format='sdf'):
'''
:param src_file: src_path/file.sdf
:param dst_files: [file_name, file_name1...]
:param dst_path:
:param format:
:return:
'''
# 读取数据,存到con中
with open(src_file, 'r') as f:
con = f.read()
# 根据@<TRIPOS>MOLECULE分割字符串
con = con.split(f'{self.file_label[format]}\n')
for i in range(0, len(con)):
if con[i] != '':
lig_name = con[i].split('\n')[0].strip() # 获取小分子名字
lig_file = '{}/{}.{}'.format(dst_path, lig_name, format) # 定义输出分子路径
# 递归检查重名在文件
def same_file(lig_file, n=0):
if os.path.exists(lig_file):
n += 1
lig_file = '{}/{}_{}.{}'.format(dst_path, lig_name, n, format)
return same_file(lig_file, n)
else:
return lig_file
lig_file = same_file(lig_file) # 检查是否重名
# 输出文件
with open(lig_file, 'w') as f:
if format == 'sdf':
f.write(con[i] + f'{self.file_label[format]}\n')
else:
# format == 'mol2'
f.write(f'{self.file_label[format]}\n' + con[i])
def transform(self, src_file, dst_file):
cmd = f'module load schrodinger && structconvert {src_file} {dst_file}'
os.system(cmd)
class Pharmacophore():
def __init__(self, soft='openeye'):
if soft == 'openeye':
self.load_module = 'module load openeye/applications-2018.11.3 &&'
self.phase_screen = 'rocs -dbase {0} -query {1} -cutoff 1.0 -nostructs -prefix {2}' # dbase unscreened file # result file = prefix_1.rpt prefix_hits_1.sdf
elif soft =='schrodinger':
self.load_module = 'module load schrodinger/2017-4 &&'
self.generate_hypo = 'create_hypoFiles {0} {1}' # 0 infile {reference file} 1 hypoID{output file name}
self.generate_hypo_mul = 'phase_hypothesis {0} -HOST "localhost:28" ' # phase_hypothesis phase_pharm_8.inp -HOST "localhost:16"
self.phase_screen = 'phase_screen {0} {1} {2} -distinct -nosort -report 1 -WAIT -HOST "localhost:28" -TMPLAUNCHDIR -ATTACHED' # 0 unscreened file 1 hypo_file 2 job name Hits will be returned in <jobName>-hits.maegz. -distinct -nosort
self.phase_screen_min = 'phase_screen {0} {1} {2} -refine -force_field OPLS3 -nosort -report 1 -match 6 -WAIT -HOST "localhost:28" -TMPLAUNCHDIR -ATTACHED' # Generate conformers on-the-fly for the highest scoring match and search for additional matches. Not subject to the above restrictions on -flex, but not valid in combination with -flex.
self.default_def = './phase_pharm.def'
self.phase_pharm_inp = '''INPUT_STRUCTURE_FILE {0} # phase_pharm_8.maegz
USE_PREALIGNED_LIGANDS False
USE_FAST_SCORING False
USE_LIGAND_GROUPING True
LIGAND_PERCEPTION stereo
GENERATE_CONFORMERS True
MAX_NUMBER_CONFORMERS 50
USE_CONFORMER_MINIMIZATION True
REQUIRED_MATCH_FRACTION {1} # 0.5
NUMBER_FEATURES_PER_HYPOTHESIS {2},{3} # 4, 5
PREFERRED_MIN_SITES {4} # 5
HYPOTHESIS_DIFFERENCE_CRITERIA 0.5
HYPOTHESES_KEPT_PER_FEATURE_SIZE 10
FEATURE_FREQUENCY_A 0, 3
FEATURE_FREQUENCY_D 0, 3
FEATURE_FREQUENCY_H 0, 3
FEATURE_FREQUENCY_R 0, 3
FEATURE_FREQUENCY_P 0, 3
FEATURE_FREQUENCY_N 0, 3
FEATURE_FREQUENCY_X 0, 3
FEATURE_FREQUENCY_Y 0, 3
FEATURE_FREQUENCY_Z 0, 3
FEATURE_DEFINITION_FILE {5} # phase_pharm.def
SCORE_WEIGHT_VECTOR 1.0
SCORE_WEIGHT_SITE 1.0
SCORE_WEIGHT_VOLUME 1.0
SCORE_WEIGHT_SELECTIVITY 1.0
SCORE_WEIGHT_LOG_MATCH 1.0
SCORE_WEIGHT_INACTIVE 1.0
SCORE_WEIGHT_SURVIVAL 0.06
SCORE_WEIGHT_BEDROC 1.0
FEATURE_TOLERANCE_A 2.0
FEATURE_TOLERANCE_D 2.0
FEATURE_TOLERANCE_H 2.0
FEATURE_TOLERANCE_R 2.0
FEATURE_TOLERANCE_P 2.0
FEATURE_TOLERANCE_N 2.0
FEATURE_TOLERANCE_X 2.0
FEATURE_TOLERANCE_Y 2.0
FEATURE_TOLERANCE_Z 2.0
APPEND_EXCLUDED_VOLUMES False
'''
def generate_hypo_file(self, path, seed_file, hypo_prefix):
cmd = f'cd {path} && {self.load_module + self.generate_hypo.format(seed_file, hypo_prefix)}'
# print(cmd)
os.system(cmd)
def phase_screen_file(self, path, unscreened_file, hypo_file, hypo_prefix):
cmd = f'cd {path} && {self.load_module + self.phase_screen.format(unscreened_file, hypo_file, hypo_prefix)}'
# print(cmd)
os.system(cmd)
def append2csv(csv_file, new_lis):
'''
:param csv_file: csv file
:param new_lis: list waited to be added to the end of the csv file
:return:
'''
csv_ = open(csv_file, 'a')
csv_writer = csv.writer(csv_)
csv_writer.writerow(new_lis)
csv_.close()
def append2txt(txt_file, src_file):
with open(src_file, 'r') as f:
content = f.read()
with open(txt_file, 'a') as f:
f.write(content)
def load_dataset(data_file):
dataset = h5py.File(data_file, "r")
binary_mols = dataset["mols"][:]
dataset.close()
mols = [Chem.Mol(binary_mol) for binary_mol in binary_mols]
return mols
```
#### File: TocoDecoy/utilities/cal_nn.py
```python
import os, sys, glob
import pandas as pd
import shutil
from multiprocessing import Pool
from functools import partial
from NNScore2module import PDB, binana, command_line_parameters
# def nn_score(lig_name, similarity, label, ligand_path, csv_file):
def nn_score(zip_lis, ligand_path, csv_file):
lig_name, similarity, label = zip_lis[0], zip_lis[1], zip_lis[2]
import time
ligand_pdbqt = '%s/%s.pdbqt' % (ligand_path, lig_name)
ligand_pdbqt_pred = '%s/%s_pre.pdbqt' % (ligand_path, lig_name)
# 准备小分子
# log_file = '%s/%s_nn.txt' % (log_dir, lig_name) # 分数文件
if not os.path.exists(ligand_pdbqt):
return None
# 额外处理
if not os.path.exists(ligand_pdbqt_pred):
with open(ligand_pdbqt, 'r')as f:
lines = f.readlines()
new_lines = []
for line in lines:
if line.startswith('ATOM'):
new_lines.append(line[:23] + ' ' + line[26:])
else:
new_lines.append(line)
new_lig = ''.join(new_lines)
with open(ligand_pdbqt_pred, 'w')as f:
f.write(new_lig)
# 计算
cmd = "/home/xujun/Soft/Score_Function/NNscore/NNScore2module.py -receptor %s -ligand %s" % (
protein_pred, ligand_pdbqt_pred)
try:
params_list = cmd.split()
cmd_params = command_line_parameters(params_list)
receptor = PDB()
receptor.LoadPDB_from_file(protein_pred)
receptor.OrigFileName = protein_pred
d = binana(ligand_pdbqt_pred, receptor, cmd_params, "", "", "")
result = [
lig_name] + d.vina_output + d.ligand_receptor_atom_type_pairs_less_than_two_half.values() + d.ligand_receptor_atom_type_pairs_less_than_four.values() \
+ d.ligand_atom_types.values() + d.ligand_receptor_atom_type_pairs_electrostatic.values() + d.rotateable_bonds_count.values() \
+ d.active_site_flexibility.values() + d.hbonds.values() + d.hydrophobics.values() + d.stacking.values() + d.pi_cation.values() \
+ d.t_shaped.values() + d.salt_bridges.values() + [similarity, label]
except:
result = [lig_name]
# with open(log_file, 'w')as f:
# f.write(str(result))
# # 整合结果
# with open(log_file, 'r')as f:
# lines = f.readlines()
# result = eval(lines[0].strip())
pd.DataFrame(result).T.to_csv(csv_file, index=None, header=None, mode='a')
# # 删除分数文件
# if len(result) != 1:
# os.remove(ligand_pdbqt_pred)
if __name__ == '__main__':
# 定义不同原子类型
vina_output_list = ['vina_affinity', 'vina_gauss_1', 'vina_gauss_2', 'vina_repulsion', 'vina_hydrophobic',
'vina_hydrogen']
ligand_receptor_atom_type_pairs_less_than_two_half_list = ['A_MN', 'OA_SA', 'HD_N', 'N_ZN', 'A_MG', 'HD_NA', 'A_CL',
'MG_OA', 'FE_HD', 'A_OA', 'NA_ZN', 'A_N', 'C_OA', 'F_HD',
'C_HD', 'NA_SA', 'A_ZN', 'C_NA', 'N_N', 'MN_N', 'F_N',
'FE_OA', 'HD_I', 'BR_C', 'MG_NA', 'C_ZN', 'CL_MG',
'BR_OA',
'A_FE', 'CL_OA', 'CL_N', 'NA_OA', 'F_ZN', 'HD_P',
'CL_ZN',
'C_C', 'C_CL', 'FE_N', 'HD_S', 'HD_MG', 'C_F', 'A_NA',
'BR_HD', 'HD_OA', 'HD_MN', 'A_SA', 'A_F', 'HD_SA', 'A_C',
'A_A', 'F_SA', 'C_N', 'HD_ZN', 'OA_OA', 'N_SA', 'CL_FE',
'C_MN', 'CL_HD', 'OA_ZN', 'MN_OA', 'C_MG', 'F_OA',
'CD_OA',
'S_ZN', 'N_OA', 'C_SA', 'N_NA', 'A_HD', 'HD_HD', 'SA_ZN']
ligand_receptor_atom_type_pairs_less_than_four_list = ['I_N', 'OA_SA', 'FE_NA', 'HD_NA', 'A_CL', 'MG_SA', 'A_CU',
'P_SA', 'C_NA', 'MN_NA', 'F_N', 'HD_N', 'HD_I', 'CL_MG',
'HD_S',
'CL_MN', 'F_OA', 'HD_OA', 'F_HD', 'A_SA', 'A_BR', 'BR_HD',
'SA_SA', 'A_MN', 'N_ZN', 'A_MG', 'I_OA', 'C_C', 'N_S', 'N_N',
'FE_N', 'NA_SA', 'BR_N', 'MN_N', 'A_P', 'BR_C', 'A_FE',
'MN_P',
'CL_OA', 'CU_HD', 'MN_S', 'A_S', 'FE_OA', 'NA_ZN', 'P_ZN',
'A_F',
'A_C', 'A_A', 'A_N', 'HD_MN', 'A_I', 'N_SA', 'C_OA', 'MG_P',
'BR_SA', 'CU_N', 'MN_OA', 'MG_N', 'HD_HD', 'C_FE', 'CL_NA',
'MG_OA', 'A_OA', 'CL_ZN', 'BR_OA', 'HD_ZN', 'HD_P', 'OA_P',
'OA_S', 'N_P', 'A_NA', 'CL_FE', 'HD_SA', 'C_MN', 'CL_HD',
'C_MG',
'FE_HD', 'MG_S', 'NA_S', 'NA_P', 'FE_SA', 'P_S', 'C_HD',
'A_ZN',
'CL_P', 'S_SA', 'CL_S', 'OA_ZN', 'N_NA', 'MN_SA', 'CL_N',
'NA_OA', 'C_ZN', 'C_CD', 'HD_MG', 'C_F', 'C_I', 'C_CL',
'C_N',
'C_P', 'C_S', 'A_HD', 'F_SA', 'MG_NA', 'OA_OA', 'CL_SA',
'S_ZN',
'N_OA', 'C_SA', 'SA_ZN']
ligand_atom_types_list = ['A', 'C', 'CL', 'I', 'N', 'P', 'S', 'BR', 'HD', 'NA', 'F', 'OA', 'SA']
ligand_receptor_atom_type_pairs_electrostatic_list = ['I_N', 'OA_SA', 'FE_NA', 'HD_NA', 'A_CL', 'MG_SA', 'P_SA',
'C_NA',
'MN_NA', 'F_N', 'HD_N', 'HD_I', 'CL_MG', 'HD_S', 'CL_MN',
'F_OA',
'HD_OA', 'F_HD', 'A_SA', 'A_BR', 'BR_HD', 'SA_SA', 'A_MN',
'N_ZN',
'A_MG', 'I_OA', 'C_C', 'N_S', 'N_N', 'FE_N', 'NA_SA', 'BR_N',
'MN_N', 'A_P', 'BR_C', 'A_FE', 'MN_P', 'CL_OA', 'CU_HD',
'MN_S',
'A_S', 'FE_OA', 'NA_ZN', 'P_ZN', 'A_F', 'A_C', 'A_A', 'A_N',
'HD_MN', 'A_I', 'N_SA', 'C_OA', 'MG_P', 'BR_SA', 'CU_N',
'MN_OA',
'MG_N', 'HD_HD', 'C_FE', 'CL_NA', 'MG_OA', 'A_OA', 'CL_ZN',
'BR_OA', 'HD_ZN', 'HD_P', 'OA_P', 'OA_S', 'N_P', 'A_NA',
'CL_FE',
'HD_SA', 'C_MN', 'CL_HD', 'C_MG', 'FE_HD', 'MG_S', 'NA_S',
'NA_P',
'FE_SA', 'P_S', 'C_HD', 'A_ZN', 'CL_P', 'S_SA', 'CL_S',
'OA_ZN',
'N_NA', 'MN_SA', 'CL_N', 'NA_OA', 'F_ZN', 'C_ZN', 'HD_MG',
'C_F',
'C_I', 'C_CL', 'C_N', 'C_P', 'C_S', 'A_HD', 'F_SA', 'MG_NA',
'OA_OA', 'CL_SA', 'S_ZN', 'N_OA', 'C_SA', 'SA_ZN']
rotateable_bonds_count_list = ['rot_bonds']
active_site_flexibility_list = ['SIDECHAIN_OTHER', 'SIDECHAIN_ALPHA', 'BACKBONE_ALPHA', 'SIDECHAIN_BETA',
'BACKBONE_BETA', 'BACKBONE_OTHER']
hbonds_list = ['HDONOR-LIGAND_SIDECHAIN_BETA', 'HDONOR-LIGAND_BACKBONE_OTHER', 'HDONOR-LIGAND_SIDECHAIN_ALPHA',
'HDONOR-RECEPTOR_SIDECHAIN_OTHER', 'HDONOR-RECEPTOR_BACKBONE_ALPHA',
'HDONOR-RECEPTOR_SIDECHAIN_BETA',
'HDONOR-RECEPTOR_SIDECHAIN_ALPHA', 'HDONOR-LIGAND_SIDECHAIN_OTHER', 'HDONOR-LIGAND_BACKBONE_BETA',
'HDONOR-RECEPTOR_BACKBONE_BETA', 'HDONOR-RECEPTOR_BACKBONE_OTHER', 'HDONOR-LIGAND_BACKBONE_ALPHA']
hydrophobics_list = ['SIDECHAIN_OTHER', 'SIDECHAIN_ALPHA', 'BACKBONE_ALPHA', 'SIDECHAIN_BETA', 'BACKBONE_BETA',
'BACKBONE_OTHER']
stacking_list = ['ALPHA', 'BETA', 'OTHER']
pi_cation_list = ['LIGAND-CHARGED_BETA', 'LIGAND-CHARGED_ALPHA', 'RECEPTOR-CHARGED_BETA', 'RECEPTOR-CHARGED_OTHER',
'RECEPTOR-CHARGED_ALPHA', 'LIGAND-CHARGED_OTHER']
t_shaped_list = ['ALPHA', 'BETA', 'OTHER']
salt_bridges_list = ['ALPHA', 'BETA', 'OTHER']
# 将不同原子类型组合成列名
header_list = ['name'] + vina_output_list + ['atp2_%s' % it for it in ligand_receptor_atom_type_pairs_less_than_two_half_list] \
+ ['atp4_%s' % it for it in ligand_receptor_atom_type_pairs_less_than_four_list] + ['lat_%s' % it for
it in
ligand_atom_types_list] \
+ ['ele_%s' % it for it in
ligand_receptor_atom_type_pairs_electrostatic_list] + rotateable_bonds_count_list + [
'siteflex_%s' % it for it in active_site_flexibility_list] \
+ ['hbond_%s' % it for it in hbonds_list] + ['hydrophobic_%s' % it for it in hydrophobics_list] + [
'stacking_%s' % it for it in stacking_list] \
+ ['pi_cation_%s' % it for it in pi_cation_list] + ['t_shaped_%s' % it for it in t_shaped_list] + [
'salt_bridges_%s' % it for it in salt_bridges_list] + ['similarity', 'label']
# 定义文件
job_type = 'aldh1' # fpb pcb
path = '/home/xujun/Project_5'
# protein and ligand path
src_path = '{0}/mix/{1}'.format(path, job_type)
src_pro_path = '{0}/docked'.format(src_path)
protein_pred = '{0}/5l2n_smina_p.pdbqt'.format(src_pro_path)
crystal_pdbqt = '{0}/5l2n_crystal_ligand.pdbqt'.format(src_pro_path)
lig_path = '{0}/ligands_mol2'.format(src_path)
log_dir = '{0}/nn_log'.format(src_path) # 分数日志文件夹
# descriptors dir
dst_path = '/home/xujun/Project_5/cal_descriptors/v_1/{}'.format(job_type)
pdbqt_dir = '{0}/pdbqt'.format(src_path) # pdbqt文件夹
csv_file = '{0}/nn.csv'.format(dst_path)
# get lig_names
src_csv = '{0}/{1}_filtered.csv'.format(dst_path, job_type)
docked_csv = '{0}/SP.csv'.format(dst_path)
df_sp = pd.read_csv(docked_csv, encoding='utf-8').dropna()
names = df_sp.iloc[:, 0].values
df = pd.read_csv(src_csv, encoding='utf-8')
df.index = df.iloc[:, 0].values
similarities = df.loc[names, 'train'].values # similarity
labels = df.loc[names, 'label'].values
# mkdir
if not os.path.exists(log_dir):
os.mkdir(log_dir)
# write csv
if not os.path.exists(csv_file):
pd.DataFrame(header_list).T.to_csv(csv_file, index=False, header=False)
# partial
nn_ = partial(nn_score, ligand_path=pdbqt_dir, csv_file=csv_file)
# multiprocessing
pool = Pool(28)
pool.map(nn_, zip(names, similarities, labels))
pool.close()
pool.join()
```
#### File: utilities/interaction_frequency/interaction_base.py
```python
import os
import pandas as pd
from oddt.interactions import hbond_acceptor_donor, halogenbond_acceptor_halogen, salt_bridges, hydrophobic_contacts, \
acceptor_metal
from oddt.toolkits.rdk import readfile
class oddt_interaction():
def __init__(self, protein_file, path_for_lig, path_for_complex, interaction_csv, data_collect_csv):
self.protein = protein_file
self.path_for_lig = path_for_lig
self.path_for_complex = path_for_complex
self.interaction_csv = interaction_csv
self.data_collect_csv = data_collect_csv
# 蛋白质mol对象
self.protein_mol = readfile(format='pdb', filename=self.protein).__next__()
# 蛋白质文本对象
self.protein_data = self.get_protein_data()
# 金属元素类型
self.metal_atoms = ['FE', 'MG', 'MN', 'ZN', 'CA', 'NA']
# 不同的相互作用
self.interactions = {
'hb': hbond_acceptor_donor, # 有cutoff参数,可以设置判定的阈值
'qq': salt_bridges,
'clb': halogenbond_acceptor_halogen,
'lipo': hydrophobic_contacts,
'metal': acceptor_metal
}
# 不同相互作用到列的映射
self.interactions2col = {
'hb': 1, # 有cutoff参数,可以设置判定的阈值
'clb': 2,
'qq': 3,
'lipo': 4,
'metal': 5
}
# 获取配体mol对象
def get_mol(self, ligand_file):
return readfile(format='mol', filename=ligand_file).__next__()
# 获取蛋白质文件字符串信息
def get_protein_data(self):
# 打开pdb文件
with open(self.protein, 'r') as f:
protein_data = f.readlines()
# 返回
return protein_data
# 获取残基信息
def get_rec(self, atm_num):
# 按行读取数据
for i in range(0, len(self.protein_data)):
# 获取对应行数据
if self.protein_data[i].startswith(f'ATOM{atm_num:>7d}'):
# 对行数据按空格分割
rec_data = self.protein_data[i].split()
# 残基元素
rec_element = rec_data[-1]
# 残基名称
rec_name = rec_data[3]
# 残基编号
rec_id = rec_data[5]
return str([rec_name, rec_id, rec_element])
return None
def has_metal(self, ligandorprotein):
# 读取数据
with open(ligandorprotein, 'r') as f:
content = f.read()
# 逐行判断
for metal in self.metal_atoms:
if metal in content: # 若蛋白中存在金属元素,返回True
return True
# 否则返回False
return False
# 计算蛋白配体氢键,并获取蛋白上形成氢键的原子编号
def interaction2recNum(self, ligand, interaction_type, out_lis):
# 定义空字典存放残基信息
rec_infos = {}
# 获取对象
mols = [[self.protein_mol, self.get_mol(ligand)], [self.get_mol(ligand), self.protein_mol]]
# 定义是否只计算一次
cal_once = False
# 分别计算 蛋白做受体和供体时的残基情况
for i in [0, 1]:
# 若为金属作用
if interaction_type == 'metal':
# 若蛋白和小分子都有金属
if self.has_metal(ligand) and self.has_metal(self.protein):
data_array = self.interactions[interaction_type](mols[i][0], mols[i][1]) # 交替次序
# 小分子含有金属
elif self.has_metal(ligand):
data_array = self.interactions[interaction_type](mols[0][0], mols[0][1]) # 小分子在后
cal_once = True
# 蛋白含有金属或者都没有金属
else:
data_array = self.interactions[interaction_type](mols[1][0], mols[1][1]) # 蛋白在后
cal_once = True
# 除金属外的其他作用
else:
data_array = self.interactions[interaction_type](mols[i][0], mols[i][1])
protein_infos = data_array[i] # 获取供体列表
# 判断是否含有该相互作用
if len(protein_infos) == 0: # 不含相互作用
pass
else: # 含有相互作用
protein_infos = set([i[0] for i in protein_infos])
# 获取残基信息
for atm_num in protein_infos: # 获取原子的编号
atm_num = self.get_rec(atm_num) # 获得残基信息 [MET, 86, O]
rec_infos[atm_num] = rec_infos.get(atm_num, 0) + 1 # 统计次数 dic[None] = {}
# 判断是否只计算一次
if cal_once:
break
# 数据样式 {"['MET', '86', 'O']": 1, "['MET', '86', 'N']": 1}
# 判断是否整理成list的格式
if out_lis:
# 空列表暂存
tmp_lis = []
# 循环
for rec_info, rec_fre in rec_infos.items():
# 把残基信息由字符串变为字典
rec_info = eval(rec_info)
# , 并加入频率
rec_info.append(rec_fre)
# 汇总加入新列表
tmp_lis.append(rec_info)
# 判断该相互作用残基是否全为空
if len(tmp_lis) != 0:
tmp_lis.sort(key=lambda x: x[-1], reverse=True) # 排序
# 变量
rec_infos = tmp_lis
# 返回数据
return rec_infos
def get_rec_frequence(self, interaction_type):
# 初始化字典存放变量
frequencies = {}
# 读数据
df = pd.read_csv(self.interaction_csv, encoding='utf-8').dropna()
# 获取列
interactions = df.iloc[:, self.interactions2col[interaction_type]].values
# 取出每个分子的相互作用
for lig_interaction in interactions:
lig_interaction = eval(lig_interaction) # 由字符串变回字典
# 取出字典中的每个残基
for rec in lig_interaction:
frequencies[rec] = lig_interaction.get(rec, 0) + frequencies.get(rec, 0) # 增加次数
# 由字典变成列表
# 获取残基信息和频率
frequency_lis = []
for rec_info, rec_fre in frequencies.items():
# 把残基信息由字符串变为字典
rec_info = eval(rec_info)
# , 并加入频率
rec_info.append(rec_fre)
# 汇总加入新列表
frequency_lis.append(rec_info)
# 判断该相互作用残基是否全为空
if len(frequency_lis) != 0:
frequency_lis.sort(key=lambda x: x[-1], reverse=True) # 排序
# 写入csv
pd.DataFrame([interaction_type, frequency_lis]).T.to_csv(self.data_collect_csv, header=False, index=False,
mode='a')
# con = '{}\n{}\n'.format('hb', frequency_lis) # 定义文本内容
# with open(self.data_collect_csv, 'a') as f: # 写入文件
# f.write(con)
else: # 若全空,返回空列表
pd.DataFrame([interaction_type, []]).T.to_csv(self.data_collect_csv, index=False, header=False,
mode='a') # 写入CSV
def openbabel_transform(self, src_file, dst_file):
cmd = f'module load openbabel && obabel {src_file} -O {dst_file}'
os.system(cmd)
# 读取含有某个关键字的最后一行,用于生成复合物
def get_final_index(self, data, key_word='ATOM'):
for final_index in range(len(data)):
# 若此行不包含关键字,上一行包含,则输出该行索引
if key_word not in data[final_index]:
if key_word in data[final_index - 1]:
return final_index
# 若没有找到符合条件的索引,输出所有行数
return len(data)
# 生成复合物
def generate_complex(self, active_ligand):
# 定义分子
active_file = '{}/{}.sdf'.format(self.path_for_lig, active_ligand) # 活性mol2分子
ligand_file = '{}/{}.pdb'.format(self.path_for_complex, active_ligand) # 活性pdb分子
complex_file = '{}/{}.pdb'.format(self.path_for_complex, active_ligand) # 复合物分子
# 转换配体到pdb
self.openbabel_transform(src_file=active_file, dst_file=ligand_file)
# 打开蛋白文件
with open(self.protein, 'r') as f:
protein = f.readlines()
# 读取对应数据
protein = [line for line in protein if line.startswith('ATOM')]
# 打开小分子文件
with open(ligand_file, 'r') as f:
ligand = f.readlines()
# 读取对应数据
ligand = [line for line in ligand if line.startswith('HETATM')]
# 替换'ATOM '为HETATM 要多加两个空格,否则替换后 原子序号会后移两位导致PDB格式不标准,3Dmoljs识别不了
ligand = [i.replace('ATOM ', 'HETATM') if i.startswith('ATOM') else i for i in ligand]
# 合并字符串
complex_content = f'HEADER {active_ligand}\nMODEL 1\n' + ''.join(protein) + f'COMPND {active_ligand}\n'\
+ ''.join(ligand) + 'ENDMDL\nEND\n'
# 写复合物
with open(complex_file, 'w') as f:
f.write(complex_content)
``` |
{
"source": "5akusei/test-project-django",
"score": 3
} |
#### File: test-project-django/animal_size/models.py
```python
from django.db import models
class AnimalSize(models.Model):
name = models.CharField(max_length=7, null=False, blank=False, unique=True)
description = models.CharField(max_length=250, null=False, blank=False)
class Meta:
db_table = 'animal_size'
def __str__(self):
return self.name
```
#### File: test-project-django/zoo/forms.py
```python
from django import forms
from zoo.models import Animal
class AnimalForm(forms.ModelForm):
class Meta:
model = Animal
exclude = ['type_of_bird']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].widget.attrs.update({'class': 'form-control'})
self.fields['description'].widget.attrs.update({'class': 'form-control'})
self.fields['gender'].widget.attrs.update({'class': 'form-control'})
self.fields['size'].widget.attrs.update({'class': "form-control form-select"})
self.fields['size'].empty_label = 'Escoja un tamaño'
self.fields['life_span'].widget.attrs.update({'class': 'form-control', 'max':'150', 'min':'1'})
self.fields['family'].widget.attrs.update({'class': 'form-control'})
``` |
{
"source": "5aledBos/python-for-android",
"score": 2
} |
#### File: recipes/jpeg/__init__.py
```python
from pythonforandroid.recipe import NDKRecipe
from pythonforandroid.logger import shprint
from pythonforandroid.util import current_directory
from os.path import join, exists
import sh
class JpegRecipe(NDKRecipe):
name = 'jpeg'
version = 'linaro-android'
url = 'git://git.linaro.org/people/tomgall/libjpeg-turbo/libjpeg-turbo.git'
patches = ['build-static.patch']
generated_libraries = ['libjpeg.a']
def prebuild_arch(self, arch):
super(JpegRecipe, self).prebuild_arch(arch)
build_dir = self.get_build_dir(arch.arch)
app_mk = join(build_dir, 'Application.mk')
if not exists(app_mk):
shprint(sh.cp, join(self.get_recipe_dir(), 'Application.mk'), app_mk)
jni_ln = join(build_dir, 'jni')
if not exists(jni_ln):
shprint(sh.ln, '-s', build_dir, jni_ln)
def build_arch(self, arch):
super(JpegRecipe, self).build_arch(arch)
with current_directory(self.get_lib_dir(arch)):
shprint(sh.mv, 'libjpeg.a', 'libjpeg-orig.a')
shprint(sh.ar, '-rcT', 'libjpeg.a', 'libjpeg-orig.a', 'libsimd.a')
recipe = JpegRecipe()
```
#### File: recipes/libgeos/__init__.py
```python
from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory
from os.path import exists, join
import sh
from multiprocessing import cpu_count
class LibgeosRecipe(Recipe):
version = '3.5'
#url = 'http://download.osgeo.org/geos/geos-{version}.tar.bz2'
url = 'https://github.com/libgeos/libgeos/archive/svn-{version}.zip'
depends = ['python2']
def should_build(self, arch):
super(LibgeosRecipe, self).should_build(arch)
return not exists(join(self.ctx.get_libs_dir(arch.arch), 'libgeos_c.so'))
def build_arch(self, arch):
super(LibgeosRecipe, self).build_arch(arch)
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
dst_dir = join(self.get_build_dir(arch.arch), 'dist')
bash = sh.Command('bash')
print("If this fails make sure you have autoconf and libtool installed")
shprint(bash, 'autogen.sh') # Requires autoconf and libtool
shprint(bash, 'configure', '--host=arm-linux-androideabi', '--enable-shared', '--prefix={}'.format(dst_dir), _env=env)
shprint(sh.make, '-j', str(cpu_count()), _env=env)
shprint(sh.make, 'install', _env=env)
shutil.copyfile('{}/lib/libgeos_c.so'.format(dst_dir), join(self.ctx.get_libs_dir(arch.arch), 'libgeos_c.so'))
def get_recipe_env(self, arch):
env = super(LibgeosRecipe, self).get_recipe_env(arch)
env['CXXFLAGS'] += ' -I{}/sources/cxx-stl/gnu-libstdc++/4.8/include'.format(self.ctx.ndk_dir)
env['CXXFLAGS'] += ' -I{}/sources/cxx-stl/gnu-libstdc++/4.8/libs/{}/include'.format(
self.ctx.ndk_dir, arch)
env['CXXFLAGS'] += ' -L{}/sources/cxx-stl/gnu-libstdc++/4.8/libs/{}'.format(
self.ctx.ndk_dir, arch)
env['CXXFLAGS'] += ' -lgnustl_shared'
env['LDFLAGS'] += ' -L{}/sources/cxx-stl/gnu-libstdc++/4.8/libs/{}'.format(
self.ctx.ndk_dir, arch)
return env
recipe = LibgeosRecipe()
``` |
{
"source": "5aledmaged/The-Mutex-Hash-Code-2018-Sol.",
"score": 3
} |
#### File: 5aledmaged/The-Mutex-Hash-Code-2018-Sol./main.py
```python
import heapq as heap
filename = 'e_high_bonus'
class Ride:
def __init__(self, start_row, start_col, finish_row, finish_col, early_start, late_finish, bonus, i):
self.start_row = start_row
self.start_col = start_col
self.finish_row = finish_row
self.finish_col = finish_col
self.early_start = early_start
self.late_finish = late_finish
self.id = i
self.score = 0
self.distance = abs(finish_row - start_row) + abs(finish_col - start_col)
self.init_dist = 0
self.bonus = bonus
self.calculate_score()
def calculate_score(self, next_start=0, row=0, col=0):
self.init_dist = abs(self.start_col - col) + abs(self.start_row - row)
if self.init_dist + next_start <= self.early_start:
self.score = self.bonus + self.distance
elif self.init_dist + next_start + self.distance <= self.late_finish:
self.score = self.distance
def __repr__(self):
return '(' + str(self.id) + ', ' + str(self.score) + ')'
def __lt__(self, other):
return self.score > other.score
class Vehicle:
def __init__(self):
self.rides = []
self.row = 0
self.col = 0
self.busy = False
self.finished_rides = 0
self.rides_order = []
self.next = 0
def update_rides(self):
this_ride = heap.heappop(self.rides)
heap.heappush(self.rides, this_ride)
def move(self, row, col):
row_dist = abs(row - self.row)
col_dist = abs(col - self.col)
self.row = row
self.col = col
return [row_dist, col_dist]
def assign_ride(self, ride):
self.finished_rides += 1
self.rides_order.append(ride.id)
self.next += ride.init_dist + ride.distance
self.row = ride.finish_row
self.col = ride.finish_col
return self
def __lt__(self, other):
return self.next < other.next
def start_process(ride, vehicle, grid_rows, grid_cols, bonus, sim_steps):
pass
def init_tasks(vehicles, ride_queue):
for i in range(len(vehicles)):
next_vehicle = heap.heappop(vehicles)
next_ride = heap.heappop(ride_queue)
next_vehicle = next_vehicle.assign_ride(next_ride)
heap.heappush(vehicles, next_vehicle)
def simulate(vehicles, ride_queue, t):
time = 0
while True:
if (len(ride_queue) == 0) or time > t:
break
print(time)
next_vehicle = heap.heappop(vehicles)
time = next_vehicle.next
new_scores = []
for ride in ride_queue:
ride.calculate_score(next_vehicle.next, next_vehicle.row, next_vehicle.col)
heap.heappush(new_scores, ride)
ride_queue = new_scores
next_ride = heap.heappop(new_scores)
next_vehicle = next_vehicle.assign_ride(next_ride)
heap.heappush(vehicles, next_vehicle)
def main():
with open(filename + '.in') as input_file:
input_data = input_file.read()
input_lines = input_data.splitlines()
grid_rows, grid_cols, vehicle_no, rides_no, bonus, sim_steps = [int(i) for i in input_lines[0].split(' ')]
ride_data = input_lines[1:]
ride_queue = []
for i in range(rides_no):
ride = Ride(*[int(i) for i in ride_data[i].split(' ')], bonus, i)
heap.heappush(ride_queue, ride)
vehicle = []
for i in range(vehicle_no):
vehicle.append(Vehicle())
# print(vehicle)
# print(ride_queue)
init_tasks(vehicle, ride_queue)
simulate(vehicle, ride_queue, sim_steps)
# start_process(ride, vehicle, grid_rows, grid_cols, bonus, sim_steps)
write_file(vehicle)
def write_file(vehicle):
with open(filename + '.out', 'w') as f:
for car in vehicle:
# rides = ' '.joincar.rides_order
f.write(str(car.finished_rides) + ' ' + ' '.join([str(i) for i in car.rides_order]) + '\n')
main()
``` |
{
"source": "5alt/VulHint",
"score": 2
} |
#### File: 5alt/VulHint/VulHint.py
```python
import sublime
import sublime_plugin
import re
g_regions = []
g_region_lines = []
g_jump_index = 0
g_line_regions = {}
class VulHint(sublime_plugin.EventListener):
lang = None
data = {}
def on_load_async(self, view):
if not sublime.load_settings("plugin.sublime-settings").get("enable", 1):
return
self.init(view)
self.mark_vul(view)
get_lines(view)
def on_post_save_async(self, view):
if not sublime.load_settings("plugin.sublime-settings").get("enable", 1):
return
global g_regions
self.init(view)
self.mark_vul(view)
get_lines(view)
def on_hover(self, view, point, hover_zone):
if not sublime.load_settings("plugin.sublime-settings").get("enable", 1):
return
global g_regions
global g_region_lines
global g_jump_index
global g_line_regions
if not self.lang or not self.data:
return
#self.init(view)
# locate smiles in the string. smiles string should be at the beginning and followed by tab (cxsmiles)
# hovered_line_text = view.substr(view.word(point)).strip()
#hovered_line_text = view.substr(view.line(point)).strip()
if (hover_zone == sublime.HOVER_TEXT):
word = view.substr(view.word(point)).strip()
for key in g_regions:
val = self.data[key]
if word in val["keyword"]:
hovered_text = '<p>%s</p>'%(val["discription"])
view.show_popup(hovered_text,
flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,
location=point)
g_jump_index = g_region_lines.index(view.rowcol(point)[0])
return
line = view.rowcol(point)[0]
if g_line_regions.get(line):
hovered_text = ''
for key in g_line_regions.get(line):
val = self.data[key]
hovered_text += '<p>%s</p><br>'%(val["discription"])
view.show_popup(hovered_text, flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY, location=point)
g_jump_index = g_region_lines.index(view.rowcol(point)[0])
return
def init(self, view):
global g_regions
clear_mark(view)
g_regions = []
self.lang = self.guess_lang(view)
if self.lang in ['html', 'htm']:
self.lang = 'js'
self.data = sublime.load_settings("VulData.sublime-settings").get(self.lang, {})
def mark_vul(self, view):
global g_regions
#print([self.data[i]["discription"] for i in self.data])
if not self.lang or not self.data:
return
for key,val in self.data.items():
if not val['enable']: continue
vul = view.find_all(val['pattern'])
if not vul: continue
for i in vul:
i.a += val["abais"]
i.b += val["bbais"]
view.add_regions(key, vul, "string", "cross", sublime.DRAW_OUTLINED|sublime.DRAW_STIPPLED_UNDERLINE)
g_regions.append(key)
def guess_lang(self, view=None, path=None, sublime_scope=None):
if not view:
return None
filename = view.file_name()
return filename.split('.')[-1].lower()
def clear_mark(view):
global g_regions
if not g_regions: return
for i in g_regions:
view.erase_regions(i)
def get_lines(view):
global g_regions
global g_region_lines
global g_line_regions
g_line_regions = {}
g_region_lines = set()
for region in g_regions:
for i in view.get_regions(region):
line = view.rowcol(i.a)[0]
g_region_lines.add(line)
if g_line_regions.get(line, None):
g_line_regions[view.rowcol(i.a)[0]].add(region)
else:
g_line_regions[view.rowcol(i.a)[0]] = set([region])
g_region_lines = sorted(g_region_lines)
class GotoNextCommand(sublime_plugin.TextCommand):
def run(self, edit):
global g_jump_index, g_region_lines
# Convert from 1 based to a 0 based line number
line = g_region_lines[g_jump_index]
g_jump_index = (g_jump_index + 1)%len(g_region_lines)
# Negative line numbers count from the end of the buffer
if line < 0:
lines, _ = self.view.rowcol(self.view.size())
line = lines + line + 1
pt = self.view.text_point(line, 0)
self.view.sel().clear()
self.view.sel().add(sublime.Region(pt))
self.view.show(pt)
class EnableCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.load_settings("plugin.sublime-settings").set("enable", 1)
sublime.save_settings("plugin.sublime-settings")
class DisableCommand(sublime_plugin.TextCommand):
def run(self, edit):
sublime.load_settings("plugin.sublime-settings").set("enable", 0)
sublime.save_settings("plugin.sublime-settings")
class ClearCommand(sublime_plugin.TextCommand):
def run(self, edit):
clear_mark(self.view)
``` |
{
"source": "5alt/ZeroExploit",
"score": 2
} |
#### File: ZeroExploit/exploits/PUT.py
```python
import requests
import os
import config
author = 'md5_salt' # 可选
timeout = 5 # 建议
vtype = 'all' #value type: all/int/float/str/url/json/special
stype = 'java|asp|aspx'
# https://paper.seebug.org/403/
# https://mp.weixin.qq.com/s/uTiWDsPKEjTkN6z9QNLtSA
# https://pivotal.io/security/cve-2017-8046
def exploit(request, response, method, key, is_array=False):
if config.dbconn().fetch_rows('result', condition="exploit='%s' and result != 'continue' and `host`='%s'" % (os.path.basename(__file__)[:-3], request['host']), order="id asc", limit="1", fetchone=True): return
allow = requests.options(request['uri']).headers.get('Allow', '')
if allow.find('PUT') != -1 or allow.find('PATCH') != -1:
return {'result': 'vul', 'info': "Server support put/patch method", 'hash': None, 'level': "middle"}
else:
return {'result': 'safe', 'info': "Server does not support put/patch method", 'hash': None, 'level': "middle"}
if __name__ == '__main__':
pass
```
#### File: 5alt/ZeroExploit/zoneresolver.py
```python
import os
import copy
import re
from dnslib import RR, QTYPE, RCODE
from dnslib.server import DNSServer, DNSHandler, BaseResolver, DNSLogger
import config
class MysqlLogger():
def log_data(self, dnsobj):
pass
def log_error(self, handler, e):
pass
def log_pass(self, *args):
pass
def log_prefix(self, handler):
pass
def log_recv(self, handler, data):
pass
def log_reply(self, handler, reply):
pass
def log_request(self, handler, request):
domain = request.q.qname.__str__()
h = domain.split('.')[0]
if domain.endswith(config.DNS_DOMAIN + '.'):
if h and re.match(config.hash_pattern, h):
#update database
config.dbconn().update('result', {'result': "vul"}, {'hash':h})
config.dbconn().insert('hashlog', {'hash':h})
def log_send(self, handler, data):
pass
def log_truncated(self, handler, reply):
pass
class ZoneResolver(BaseResolver):
"""
Simple fixed zone file resolver.
"""
def __init__(self, zone, glob=False):
"""
Initialise resolver from zone file.
Stores RRs as a list of (label,type,rr) tuples
If 'glob' is True use glob match against zone file
"""
self.zone = [(rr.rname, QTYPE[rr.rtype], rr) for rr in RR.fromZone(zone)]
self.glob = glob
self.eq = 'matchGlob' if glob else '__eq__'
def resolve(self, request, handler):
"""
Respond to DNS request - parameters are request packet & handler.
Method is expected to return DNS response
"""
reply = request.reply()
qname = request.q.qname
qtype = QTYPE[request.q.qtype]
for name, rtype, rr in self.zone:
# Check if label & type match
if getattr(qname, self.eq)(name) and (
qtype == rtype or qtype == 'ANY' or rtype == 'CNAME'):
# If we have a glob match fix reply label
if self.glob:
a = copy.copy(rr)
a.rname = qname
reply.add_answer(a)
else:
reply.add_answer(rr)
# Check for A/AAAA records associated with reply and
# add in additional section
if rtype in ['CNAME', 'NS', 'MX', 'PTR']:
for a_name, a_rtype, a_rr in self.zone:
if a_name == rr.rdata.label and a_rtype in ['A', 'AAAA']:
reply.add_ar(a_rr)
if not reply.rr:
reply.header.rcode = RCODE.NXDOMAIN
return reply
def main():
zone = '''
*.{dnsdomain}. IN A {serverip}
{dnsdomain}. IN A {serverip}
'''.format(
dnsdomain=config.DNS_DOMAIN, serverip=config.SERVER_IP)
resolver = ZoneResolver(zone, True)
logger = MysqlLogger()
print("Starting Zone Resolver (%s:%d) [%s]" % ("*", 53, "UDP"))
udp_server = DNSServer(resolver,
port=53,
address='',
logger=logger)
udp_server.start()
if __name__ == '__main__':
main()
``` |
{
"source": "5alt/ZeroScan",
"score": 3
} |
#### File: 5alt/ZeroScan/helper.py
```python
import config
import sqlite3 as db
import os
import requests
def load_domain_from_file():
with open(config.INPUT_DOMAIN_FILE, 'r') as f:
data = f.read().strip()
return set(data.split('\n'))
def load_alldomains_from_file():
with open(config.INPUT_ALL_DOMAINS_FILE, 'r') as f:
data = f.read().strip()
return set(data.split('\n'))
def load_ips_from_file():
with open(os.path.join(config.OUTPUT_DIR,config.IPS), 'r') as f:
data = f.read().strip()
return set(data.split('\n'))
def parse_domains_brute(domain, extip=None):
'''
如果域名泛解析,则通过HTTP请求的Host来判断是否真的绑定在webserver上
在检查响应的时候,一般同一个错误页面的响应长度是一样的,除非响应中包含 host,所以需要在替换掉host之后再比较长度
'''
def get_error_page(extip, fhost):
error_page = ''
try:
error_page = requests.get('https://%s' % extip, headers={'host': fhost}, verify=True).text.replace(fhost, "")
except Exception as e:
pass
if not error_page:
try:
fhost = 'salt66666666.'+domain
error_page = requests.get('http://%s' % extip, headers={'host': fhost}).text.replace(fhost, "")
except Exception as e:
pass
return len(error_page)
with open(os.path.join(config.OUTPUT_DIR, '%s.txt'%domain), 'r') as f:
data = f.read().strip()
ret = {}
if extip:
fhost = 'salt66666666.'+domain
error_page = get_error_page(extip, fhost)
for line in data.split('\n'):
if not line.strip():
continue
line = line.replace(' ', '').replace('\t', '')
parts = line.split(domain)
if extip and extip in line:
if not error_page:
continue
else:
page = get_error_page(extip, parts[0]+domain)
if page == error_page:
continue
ret[parts[0]+domain] = parts[1]
return ret
def get_domains_conn():
sqlitepath = os.path.join(config.OUTPUT_DIR, "domains.db")
conn = db.connect(sqlitepath)
conn.text_factory = str
return conn
def get_ports_conn():
sqlitepath = os.path.join(config.OUTPUT_DIR, "ports.db")
conn = db.connect(sqlitepath)
conn.text_factory = str
return conn
def insert_port(ip, port, service=None):
conn = get_ports_conn()
cursor = conn.cursor()
sql = "INSERT INTO open(ip, port, service) VALUES(?, ?, ?)"
try:
status = cursor.execute(sql, (ip, port, service))
conn.commit()
except Exception as e:
print e
conn.close()
def check_port_scanned(ip, port):
conn = get_ports_conn()
cursor = conn.cursor()
sql = "SELECT * FROM open WHERE ip=? and port=?"
cursor.execute(sql, (ip, port))
rows = cursor.fetchall()
if rows:
return True
else:
return False
def install_ports():
sqlitepath = os.path.join(config.OUTPUT_DIR, "ports.db")
install = ''
if not os.path.exists(sqlitepath):
install = '''
CREATE TABLE open(
`ip` VARCHAR(64) NOT NULL,
`port` INTEGER,
`service` varchar(64),
`comment` TEXT,
PRIMARY KEY(`ip`, `port`)
);
'''
if install:
conn = conn = get_ports_conn()
cursor = conn.cursor()
cursor.execute(install)
conn.commit()
conn.close()
def install_domains():
sqlitepath = os.path.join(config.OUTPUT_DIR, "domains.db")
install = ''
if not os.path.exists(sqlitepath):
install = '''
CREATE TABLE `domains`(
`domain` varchar(255) NOT NULL,
`ip` TEXT NOT NULL,
`cname` varchar(255),
`cdn` INTEGER,
`internal` INTEGER,
PRIMARY KEY(`domain`, `ip`)
);
'''
if install:
conn = get_domains_conn()
cursor = conn.cursor()
cursor.execute(install)
conn.commit()
conn.close()
if __name__ == '__main__':
conn = get_domains_conn()
cur = conn.cursor()
cur.execute("SELECT * FROM domains")
rows = cur.fetchall()
for row in rows:
print row
```
#### File: ZeroScan/passive/crtsh.py
```python
import requests
import json
def certsh_api(domain):
data_set = set()
url = "https://crt.sh/?q=%25.{0}&output=json".format(domain)
resp = requests.get(url)
if resp.status_code != 200:
return []
#fixed_raw = '[%s]' % str(resp.text).replace('}{', '},{')
for cert in json.loads(resp.text):
data_set.update([cert.get('name_value')])
return data_set
def passive_search(domain):
return certsh_api(domain)
if __name__ == '__main__':
print passive_search('5alt.me')
```
#### File: ZeroScan/passive/fofa.py
```python
import os
import requests
import base64
import re
# from urllib.parse import urlparse
from urlparse import urlparse
def fofa_search(domain):
if not os.environ.get('fofa_username') or not os.environ.get('fofa_password'):
return []
data_set = set()
# get访问登录页面,获取到token,session,It,这三个数据时页面随机生成的,请求数据时需要加上
loginurl='https://i.nosec.org/login'
getlogin=requests.get(loginurl)
token0=re.findall('<input type="hidden" name="authenticity_token" value="(.*)" />',getlogin.text)
session0=re.findall('(_nosec_cas_session=.*); path=/',getlogin.headers['Set-Cookie'])
It0=re.findall('<input type="hidden" name="lt" id="lt" value="(.*)" />',getlogin.text)
token=token0[0]
session1=session0[0]
It=It0[0]
# 设置data数据和header头,将我们获取的数据加到里面
datas={
'utf8':'%E2%9C%93',
'authenticity_token': token,
'lt': It,
'username': os.environ.get('fofa_username'),
'password': os.<PASSWORD>('fofa_password'),
'rememberMe':'1',
'button': ''
}
headers={
'Host': 'i.nosec.org',
'Connection': 'close',
'Content-Length': '302',
'Cache-Control': 'max-age=0',
'Origin': 'https://i.nosec.org',
'Upgrade-Insecure-Requests': '1',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Referer': 'https://i.nosec.org/login',
'Accept-Encoding': 'gzip, deflate, br',
'Cookie': '__lnkrntdmcvrd=-1; '+session1,
'Accept-Language': 'zh-CN,zh;q=0.9'
}
# 使用session登录,可以保证在之后的访问中保持登录信息
session=requests.Session()
postlogin=session.post(loginurl,headers=headers,data=datas)
sess_headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
'Accept': 'text/javascript'
}
# fofa的登陆界面和一般网站不同,他是类似于一个第三方的登录界面,在nosec登录成功后,你直接访问fofa是出于未登录状态,因为只是存在nosec的cookie,并没有fofa的cookie,
# 需要访问该链接才会生成fofa的cookie
signlogin=session.get('https://fofa.so/users/sign_in',headers=sess_headers)
search='domain="%s"' % domain
#searchbs64=(str(base64.b64encode(search.encode('utf-8')),'utf-8'))
searchbs64=str(base64.b64encode(search.encode('utf-8')))
pageurl=session.get('https://fofa.so/result?full=true&qbase64='+searchbs64)
pagenum=re.findall('>(\d*)</a> <a class="next_page" rel="next"',pageurl.text)
pagenum=int(pagenum[0]) if pagenum else 1
session.headers.update(sess_headers)
for i in range(1, pagenum+1):
finurl=session.get('https://fofa.so/result?full=true&page='+str(i)+'&qbase64='+searchbs64)
finurl=re.findall(r'<a target=\\\"_blank\\\" href=\\\"(.*?)\\\">.*?<i class=\\\"fa fa-link\\\"><\\/i>',finurl.text)
for j in finurl:
data_set.add(urlparse(j).hostname)
return data_set
def passive_search(domain):
return fofa_search(domain)
if __name__ == '__main__':
print(fofa_search('5alt.me'))
```
#### File: 5alt/ZeroScan/recv.py
```python
from scapy.all import *
import config
import helper
from tools import check_port_service
# http://biot.com/capstats/bpf.html
# http://www.freebuf.com/sectool/94507.html
helper.install_ports()
whitelist = helper.load_ips_from_file()
f="tcp[tcpflags] & tcp-syn != 0 and tcp[tcpflags] & tcp-ack != 0"
def callback(pkt):
#pkt.show()
if pkt[IP].src in whitelist:
print "%s:%s"%(pkt[IP].src, pkt[TCP].sport)
if helper.check_port_scanned(pkt[IP].src, pkt[TCP].sport):
return
service = check_port_service(pkt[IP].src, pkt[TCP].sport)
helper.insert_port(pkt[IP].src, pkt[TCP].sport, service)
sniff(prn=callback, filter=f, store=0)
```
#### File: ZeroScan/takeover/github_pages.py
```python
import requests
APEX_VALUES = ['172.16.31.10', '192.168.3.11', '192.168.3.11', '192.168.127.12', '192.168.3.11', '172.16.17.32']
CNAME_VALUE = [".github.io"]
RESPONSE_FINGERPRINT = "There isn't a GitHub Pages site here."
def detector(domain, ip, cname):
if APEX_VALUES:
if ip in APEX_VALUES:
return True
if filter(lambda x: x in cname, CNAME_VALUE):
return True
try:
if RESPONSE_FINGERPRINT in requests.get('http://%s' % domain).text:
return True
except Exception as e:
pass
return False
```
#### File: ZeroScan/takeover/instapage.py
```python
import requests
APEX_VALUES = None
CNAME_VALUE = ["pageserve.co", "secure.pageserve.co"]
RESPONSE_FINGERPRINT = "You've Discovered A Missing Link. Our Apologies!"
def detector(domain, ip, cname):
if APEX_VALUES:
if ip in APEX_VALUES:
return True
if filter(lambda x: x in cname, CNAME_VALUE):
return True
try:
if RESPONSE_FINGERPRINT in requests.get('http://%s' % domain).text:
return True
except Exception as e:
pass
return False
```
#### File: ZeroScan/takeover/tumblr.py
```python
import requests
APEX_VALUES = ['172.16.31.10']
CNAME_VALUE = ["domains.tumblr.com"]
RESPONSE_FINGERPRINT = "Whatever you were looking for doesn't currently exist at this address."
def detector(domain, ip, cname):
if APEX_VALUES:
if ip in APEX_VALUES:
return True
if filter(lambda x: x in cname, CNAME_VALUE):
return True
try:
if RESPONSE_FINGERPRINT in requests.get('http://%s' % domain).text:
return True
except Exception as e:
pass
return False
``` |
{
"source": "5amessi/camel_tools",
"score": 2
} |
#### File: camel_tools/cli/camel_morphology.py
```python
from __future__ import absolute_import
import collections
import sys
import re
from docopt import docopt
import six
import camel_tools as camelt
from camel_tools.utils.charsets import AR_DIAC_CHARSET
from camel_tools.utils.stringutils import force_unicode, force_encoding
from camel_tools.morphology.database import MorphologyDB
from camel_tools.morphology.analyzer import Analyzer
from camel_tools.morphology.generator import Generator
from camel_tools.morphology.reinflector import Reinflector
from camel_tools.morphology.errors import DatabaseError, AnalyzerError
from camel_tools.morphology.errors import GeneratorError, MorphologyError
__version__ = camelt.__version__
_ANALYSIS_BACKOFFS = frozenset(('NONE', 'NOAN_ALL', 'NOAN_PROP', 'ADD_ALL',
'ADD_PROP'))
_GENARATION_BACKOFFS = frozenset(('NONE', 'REINFLECT'))
_BUILTIN_DBS = frozenset([db.name for db in MorphologyDB.list_builtin_dbs()])
_DEFAULT_DB = 'calima-msa-r13'
_DIAC_RE = re.compile(r'[' + re.escape(u''.join(AR_DIAC_CHARSET)) + r']')
def _tokenize(s):
return s.split()
def _dediac(word):
return _DIAC_RE.sub('', word)
def _to_int(s):
s = str(s)
try:
if not s.isdigit():
return None
return int(s)
except Exception:
return None
def _open_files(finpath, foutpath):
if finpath is None:
fin = sys.stdin
else:
try:
fin = open(finpath, 'r', encoding='utf-8')
except IOError:
sys.stderr.write('Error: Couldn\'t open input file {}.'
'\n'.format(repr(finpath)))
sys.exit(1)
if foutpath is None:
fout = sys.stdout
else:
try:
fout = open(foutpath, 'w', encoding='utf-8')
except IOError:
sys.stderr.write('Error: Couldn\'t open output file {}.'
'\n'.format(repr(foutpath)))
if finpath is not None:
fin.close()
sys.exit(1)
return fin, fout
def _list_dbs():
for db in sorted(MorphologyDB.list_builtin_dbs()):
sys.stdout.write('{}\t{}\n'.format(db.name, db.version))
def _serialize_analyses(fout, word, analyses, order, generation=False):
buff = collections.deque()
buff.append(u'#{}: {}'.format(u'LEMMA' if generation else u'WORD',
force_unicode(word)))
if len(analyses) == 0:
buff.append(u'NO_ANALYSIS')
else:
sub_buff = collections.OrderedDict()
for a in analyses:
output = u' '.join([u'{}:{}'.format(force_unicode(f),
force_unicode(str(a[f])))
for f in order if f in a])
if output not in sub_buff:
sub_buff[output] = True
buff.extend(sub_buff.keys())
return u'\n'.join(buff)
def _parse_generator_line(line):
lemma = None
feats = {}
tokens = line.strip().split()
if len(tokens) < 1:
return None
lemma = tokens[0]
for token in tokens[1:]:
subtokens = token.split(':')
if len(subtokens) < 2:
return None
else:
feat = subtokens[0]
val = ':'.join(subtokens[1:])
feats[feat] = val
return (lemma, feats)
def _parse_reinflector_line(line):
word = None
feats = {}
tokens = line.strip().split()
if len(tokens) < 1:
return None
word = tokens[0]
for token in tokens[1:]:
subtokens = token.split(':')
if len(subtokens) < 2:
return None
else:
feat = subtokens[0]
val = ':'.join(subtokens[1:])
feats[feat] = val
return (word, feats)
def _analyze(db, fin, fout, backoff, cache):
if cache:
analyzer = Analyzer(db, backoff, cache_size=1024)
else:
analyzer = Analyzer(db, backoff)
line = force_unicode(fin.readline())
while line:
if len(line) == 0:
line = force_unicode(fin.readline())
continue
line = line.strip()
tokens = _tokenize(line)
for token in tokens:
analyses = analyzer.analyze(token)
serialized = _serialize_analyses(fout, token, analyses, db.order)
if six.PY3:
fout.write(serialized)
else:
fout.write(force_encoding(serialized))
fout.write('\n\n')
line = force_unicode(fin.readline())
def _generate(db, fin, fout, backoff):
generator = Generator(db)
reinflector = Reinflector(db) if backoff == 'REINFLECT' else None
line = force_unicode(fin.readline())
line_num = 1
while line:
line = line.strip()
if len(line) == 0:
line = force_unicode(fin.readline())
line_num += 1
continue
parsed = _parse_generator_line(line)
if parsed is None:
if fin is sys.stdin:
sys.stderr.write('Error: Invalid input line.\n')
else:
sys.stderr.write(
'Error: Invalid input line ({}).\n'.format(line_num))
else:
lemma = parsed[0]
feats = parsed[1]
# Make sure lemma and pos are specified first
if lemma is None:
if fin is sys.stdin:
sys.stderr.write('Error: Missing lex/lemma feature.\n')
else:
sys.stderr.write(
'Error: Missing lex/lemma feature. [{}].\n'.format(
line_num))
elif 'pos' not in feats:
if fin is sys.stdin:
sys.stderr.write('Error: Missing pos feature.\n')
else:
sys.stderr.write(
'Error: Missing pos feature. [{}]\n'.format(
line_num))
else:
try:
analyses = generator.generate(lemma, feats)
if len(analyses) == 0 and backoff == 'REINFLECT':
word = _dediac(lemma)
analyses = reinflector.reinflect(word, feats)
serialized = _serialize_analyses(fout, lemma, analyses,
db.order, True)
if six.PY3:
fout.write(serialized)
else:
fout.write(force_encoding(serialized))
fout.write('\n\n')
except GeneratorError as error:
if fin is sys.stdin:
sys.stderr.write('Error: {}.\n'.format(error.msg))
else:
sys.stderr.write('Error: {}. [{}]\n'.format(error.msg,
line_num))
line = force_encoding(fin.readline())
line_num += 1
def _reinflect(db, fin, fout):
reinflector = Reinflector(db)
line = force_unicode(fin.readline())
line_num = 1
while line:
line = line.strip()
if len(line) == 0:
line = force_unicode(fin.readline())
line_num += 1
continue
parsed = _parse_reinflector_line(line)
if parsed is None:
if fin is sys.stdin:
sys.stderr.write('Error: Invalid input line.\n')
else:
sys.stderr.write(
'Error: Invalid input line. [{}]\n'.format(line_num))
else:
word = parsed[0]
feats = parsed[1]
try:
analyses = reinflector.reinflect(word, feats)
serialized = _serialize_analyses(fout, word, analyses,
db.order)
if six.PY3:
fout.write(serialized)
else:
fout.write(force_encoding(serialized))
fout.write('\n\n')
except MorphologyError as error:
# This could be thrown by the analyzer, generator, or
# reinflector.
if fin is sys.stdin:
sys.stderr.write('Error: {}.\n'.format(error.msg))
else:
sys.stderr.write('Error: {}. [{}]\n'.format(error.msg,
line_num))
line = force_unicode(fin.readline())
line_num += 1
def main(): # pragma: no cover
try:
version = ('CAMeL Tools v{}'.format(__version__))
arguments = docopt(__doc__, version=version)
if arguments.get('--list', False):
_list_dbs()
sys.exit(1)
analyze = arguments.get('analyze', False)
generate = arguments.get('generate', False)
reinflect = arguments.get('reinflect', False)
cache = arguments.get('--cache', False)
backoff = arguments.get('--backoff', 'NONE')
# Make sure we have a valid backoff mode
if backoff is None:
backoff = 'NONE'
if analyze and backoff not in _ANALYSIS_BACKOFFS:
sys.stderr.write('Error: invalid backoff mode.\n')
sys.exit(1)
if generate and backoff not in _GENARATION_BACKOFFS:
sys.stderr.write('Error: invalid backoff mode.\n')
sys.exit(1)
# Open files (or just use stdin and stdout)
fin, fout = _open_files(arguments['FILE'], arguments['--output'])
# Determine required DB flags
if analyze:
dbflags = 'a'
elif generate and backoff == 'NONE':
dbflags = 'g'
else:
dbflags = 'r'
# Load DB
try:
dbname = arguments.get('--db', _DEFAULT_DB)
if dbname in _BUILTIN_DBS:
db = MorphologyDB.builtin_db(dbname, dbflags)
else:
db = MorphologyDB(dbname, dbflags)
except DatabaseError:
sys.stderr.write('Error: Couldn\'t parse database.\n')
sys.exit(1)
except IOError:
sys.stderr.write('Error: Database file could not be read.\n')
sys.exit(1)
# Continue execution in requested mode
if analyze:
try:
_analyze(db, fin, fout, backoff, cache)
except AnalyzerError as error:
sys.stderr.write('Error: {}\n'.format(error.msg))
sys.exit(1)
except IOError:
sys.stderr.write('Error: An IO error occurred.\n')
sys.exit(1)
elif generate:
try:
_generate(db, fin, fout, backoff)
except IOError:
sys.stderr.write('Error: An IO error occurred.\n')
sys.exit(1)
elif reinflect:
try:
_reinflect(db, fin, fout)
except IOError:
sys.stderr.write('Error: An IO error occurred.\n')
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.stderr.write('Exiting...\n')
sys.exit(1)
except Exception:
sys.stderr.write('Error: An unknown error occurred.\n')
sys.exit(1)
if __name__ == '__main__':
main()
```
#### File: camel_tools/utils/transliterate.py
```python
from __future__ import absolute_import
from collections import deque
import re
import six
from camel_tools.utils.charmap import CharMapper
_WHITESPACE_RE = re.compile(r'\s')
class Transliterator(object):
"""A class for transliterating text using a
:obj:`~camel_tools.utils.charmap.CharMapper`. This class adds the extra
utility of marking individual tokens to not be transliterated. It assumes
that tokens are whitespace seperated.
Args:
mapper (:obj:`~camel_tools.utils.charmap.CharMapper`): The
:obj:`~camel_tools.utils.charmap.CharMapper` instance to be used
for transliteration.
marker (:obj:`str`, optional): A string that is prefixed to all
tokens that shouldn't be transliterated. Should not contain any
whitespace characters. Defaults to '@@IGNORE@@'.
Raises:
:obj:`TypeError`: If mapper is not a
:obj:`~camel_tools.utils.charmap.CharMapper` instance or marker is
not a string.
:obj:`ValueError`: If marker contains whitespace or is an empty string.
"""
def __init__(self, mapper, marker='@@IGNORE@@'):
self._mapper = mapper
if not isinstance(mapper, CharMapper):
raise TypeError('Mapper is not a CharMapper instance.')
if not isinstance(marker, six.string_types):
raise TypeError('Marker is not a string.')
if not marker:
raise ValueError('Marker is empty.')
elif _WHITESPACE_RE.search(marker) is None:
self._marker = marker
else:
raise ValueError('Marker contains whitespace.')
self._markerre = re.compile(
r'({}\S+)'.format(re.escape(marker)),
re.UNICODE | re.MULTILINE
)
def transliterate(self, s, strip_markers=False, ignore_markers=False):
"""Transliterate a given string.
Args:
s (:obj:`str`): The string to transliterate.
strip_markers (:obj:`bool`, optional): Output is stripped of
markers if `True`, otherwise markers are kept in the output.
Defaults to `False`.
ignore_markers (:obj:`bool`, optional): If set to `True`, all text,
including marked tokens are transliterated as well excluding
the markers. If you would like to transliterate the markers as
well, use :obj:`~camel_tools.utils.charmap.CharMapper`
directly instead. Defaults to `False`.
Returns:
:obj:`str`: The transliteration of **s** with the exception of
marked words.
"""
buff = deque()
splits = self._markerre.split(s)
for spl in splits:
if spl.startswith(self._marker):
if ignore_markers:
if not strip_markers:
buff.append(self._marker)
buff.append(
self._mapper.map_string(spl[len(self._marker):])
)
else:
if strip_markers:
buff.append(spl[len(self._marker):])
else:
buff.append(spl)
else:
buff.append(self._mapper.map_string(spl))
return u''.join(buff)
``` |
{
"source": "5amron/pre-ml",
"score": 2
} |
#### File: preml/feature_select/baco.py
```python
import numpy as np
import random
from sklearn.neighbors import KNeighborsClassifier
import timeit
from . import heuristics
from ..io import portie
import sys
def baco(x_data, y_data, t_percent=40, heu_meth="method_1", ml_alg="knn1", iter_num=10):
(my_bool, msg_err) = check_baco_args(t_percent, heu_meth, ml_alg, iter_num)
if(not my_bool):
print("problem with arguments for abaco()!!!")
print(msg_err)
exit() #############
check = portie.CheckDataset(x_data, y_data)
(state, msg) = check.get_state()
if(not state): # data had problems
print("+++ " + msg + " +++")
exit() #############
train_percentage = 100 - int(t_percent)
time_temp = 0
start = timeit.default_timer()
(best_fitnesses_each_iter, average_fitnesses_each_iter, num_of_features_selected_by_best_ant_each_iter, best_fit_so_far, best_ant_road) = run_feature_selection(generations = iter_num, alpha = 1, beta = 0.5, T0 = 0.1, Min_T = 0.1, Max_T = 6, q = 0.95, Q = 0.3, heu_meth = heu_meth, ant_num = 50, feature_num = len(x_data[1]), dataset=x_data, targets=y_data, train_percentage=train_percentage)
end = timeit.default_timer()
time_temp = time_temp + (end - start)
# making new dataset :
new_dataset = make_new_dataset(best_ant_road, x_data)
acc_before_run = get_single_fit(x_data, y_data, train_percentage)
total_feature_num = len(x_data[1])
sample_num = len(x_data[:,1])
best_selected_features_num = np.sum(best_ant_road)
return (new_dataset, best_ant_road, acc_before_run, best_fit_so_far, total_feature_num, best_selected_features_num, best_fitnesses_each_iter, average_fitnesses_each_iter ,num_of_features_selected_by_best_ant_each_iter, time_temp, sample_num)
def check_baco_args(t_percent, heu_meth, ml_alg, iter_num):
msg_err = ""
try:
int(t_percent)
except Exception as e:
msg_err = "t_percent should be integer!"
return (False, msg_err)
try:
int(iter_num)
except Exception as e:
msg_err = "iter_num should be integer!"
return (False, msg_err)
if(iter_num > 100):
msg_err = "iter_num should be less than 100!"
return (False, msg_err)
if(iter_num < 5):
msg_err = "iter_num should be more than 5!"
return (False, msg_err)
# if(type(heu_meth) != "str" or "str" != type(ml_alg)):
if(heu_meth != "method_1" and heu_meth != "method_2" and heu_meth != "method_3" and heu_meth != "method_4" and heu_meth != "method_5"):
msg_err = "heu_meth isn't write, please check the docs!"
return (False, msg_err)
# should check the ml_alg tooooooooo
return (True, msg_err)
def run_feature_selection(generations, alpha, beta , T0, Min_T, Max_T, q, Q, heu_meth, ant_num, feature_num, dataset, targets, train_percentage):
best_fitnesses_each_iter = []
average_fitnesses_each_iter = []
num_of_features_selected_by_best_ant_each_iter = []
road_map = np.random.randint(2, size=ant_num*feature_num).reshape((ant_num, feature_num))
road_maps = np.zeros(ant_num*feature_num*generations, dtype="int64").reshape(generations, ant_num, feature_num)
best_roads_list = []
best_fit_so_far = 0
best_road_so_far = np.zeros(feature_num, dtype="int64")
np.set_printoptions(suppress=True, threshold=1000)
roads_T = np.zeros(feature_num*feature_num*4, dtype="float64").reshape(4, feature_num, feature_num) + T0
for i in range(0, generations):
# print("+++++++++ run : ("+ str(heu_meth) +") Iteration : (" + str(i+1) + ")+++++++++")
if(heu_meth == "method_1"):
roads_E = heuristics.hueristic_value_fscore(feature_num, dataset, targets)
elif(heu_meth == "method_2"):
roads_E = heuristics.heuristic_value_min_redundency(feature_num, dataset)
elif(heu_meth == "method_3"):
roads_E = heuristics.heuristic_value_min_redundency_max_relevence(feature_num, dataset)
elif(heu_meth == "method_4"):
roads_E = heuristics.heuristic_value_method_4(feature_num, dataset)
elif(heu_meth == "method_5"):
roads_E = heuristics.heuristic_value_mutual_info(feature_num, dataset)
(road_map, pointer) = baco_road_selection(roads_T, roads_E, alpha, beta, ant_num, feature_num)
(iter_best_fit, best_road_so_far, best_fit_so_far, iter_best_road, fitnesses, iter_average_fit, ants_num_of_features_selected) = do_calculations(road_map, dataset, targets, best_fit_so_far, best_road_so_far, train_percentage)
roads_T = trial_update(fitnesses, roads_T, Min_T, Max_T, Q, q, iter_best_road, feature_num)
road_maps[i] = road_map
best_fitnesses_each_iter.append(iter_best_fit)
average_fitnesses_each_iter.append(iter_average_fit)
num_of_features_selected_by_best_ant_each_iter.append(sum(best_road_so_far))
best_roads_list.append(best_road_so_far)
ccc = 0
maxx = max(best_fitnesses_each_iter)
for each in best_fitnesses_each_iter:
if(each == maxx):
my_indx = ccc
ccc = ccc + 1
return (best_fitnesses_each_iter, average_fitnesses_each_iter, num_of_features_selected_by_best_ant_each_iter, best_fit_so_far, best_roads_list[my_indx])
def get_accuracy_for_this_solution(train_dataset, train_targets, test_dataset, test_targets):
K = 1
knn = KNeighborsClassifier(n_neighbors=K)
knn.fit(train_dataset, train_targets) # X, Y
# evaluating our trained model
predicted_targets = knn.predict(test_dataset)
l = len(test_targets)
num_of_correct = 0
for i in range(l):
if(test_targets[i] == predicted_targets[i]):
num_of_correct = num_of_correct + 1
return num_of_correct/l
def separate_datasets(dataset, targets, train_percentage):
# in case you wanted the data to be random every single time you wanted get fitnesses
leng = len(dataset[:, 0])
s = int(leng*(train_percentage/100))
samples_list = random.sample(range(0, leng), s)
mask = np.zeros((leng), dtype=bool)
mask[samples_list] = True
train_dataset = dataset[mask, :]
test_dataset = dataset[~mask, :]
train_targets = targets[mask]
test_targets = targets[~mask]
return (train_dataset, test_dataset, train_targets, test_targets)
def get_fitnesses(road_map, dataset, targets, train_percentage):
total_feature_num = len(road_map[1])
total_sample_num = len(dataset[:,0])
num_of_features_selected = list()
fitnesses = list()
count = 0
for ant_solution in road_map:
count = count + 1
if np.sum(ant_solution) == 0:
print("all of row "+ str(count) +" was 0!!!")
fitnesses.append(0)
# print(np.sum(ant_solution)) ##### problemmmmmm
else:
new_dataset = np.zeros(total_sample_num, dtype="float64").reshape(total_sample_num, 1)
for i in range(0, len(ant_solution)):
if(ant_solution[i] == 1):
new_dataset = np.append(new_dataset, dataset[:, i].reshape(total_sample_num, 1), axis=1)
new_dataset = np.delete(new_dataset, 0, axis=1) # removing first column
num_of_features_selected.append(new_dataset.shape[1])
(train_dataset, test_dataset, train_targets, test_targets) = separate_datasets(new_dataset, targets, train_percentage)
fitnesses.append(get_accuracy_for_this_solution(train_dataset, train_targets, test_dataset, test_targets))
return num_of_features_selected, fitnesses
def make_new_dataset(solution_road, dataset):
total_sample_num = len(dataset[:,0])
new_dataset = np.zeros(total_sample_num, dtype="float64").reshape(total_sample_num, 1)
if np.sum(solution_road) == 0:
print("allll of it was 0!!!!")
return new_dataset
else:
for i in range(0, len(solution_road)):
if(solution_road[i] == 1):
new_dataset = np.append(new_dataset, dataset[:, i].reshape(total_sample_num, 1), axis=1)
new_dataset = np.delete(new_dataset, 0, axis=1) # removing first column
return new_dataset
def get_single_fit(dataset, targets, train_percentage):
(train_dataset, test_dataset, train_targets, test_targets) = separate_datasets(dataset, targets, train_percentage)
return get_accuracy_for_this_solution(train_dataset, train_targets, test_dataset, test_targets)
def roulette_wheel(probs, feature_num): # picking one item's index randomly but with considering probabilities.
sum = 0
zero_or_one = 1
r = np.random.random_sample()
for x in range(len(probs)):
sum = sum + probs[x]
if(r < sum):
index = x
# because it is now (feature_num + feature_num) long, we should correct it :
if(index >= feature_num):
index = index - feature_num
zero_or_one = 1
else:
zero_or_one = 0
return (index, zero_or_one)
def baco_road_selection(roads_T, roads_E, alpha, beta, ant_num, feature_num):
road_map = np.zeros(ant_num*feature_num, dtype="int64").reshape(ant_num, feature_num)
pointer = np.zeros(ant_num*feature_num, dtype="int64").reshape(ant_num, feature_num)
for k in range(0, ant_num):
indx = np.multiply(np.power(roads_T, alpha), np.power(roads_E, beta))
for j in range(0, feature_num):
# for the first feature :
if(j == 0):
cur_feature = np.random.randint(0, feature_num, 1)[0]
pointer[k,j] = cur_feature
# this is just for selection of 0 or 1 for the first feature (if it's more interesting the likelihood is higher)
temp = np.sum(roads_T[0, :, cur_feature] + roads_T[2, :, cur_feature]) / np.sum(roads_T[0, :, cur_feature] + roads_T[1, :, cur_feature] + roads_T[2, :, cur_feature] + roads_T[3, :, cur_feature])
rand = np.random.random_sample()
if (rand < temp):
road_map[k, cur_feature] = 0
else:
road_map[k, cur_feature] = 1
else:
if(road_map[k, pointer[k,j-1]] == 1):
nominator = np.hstack((indx[2, pointer[k,j-1], :], indx[3, pointer[k,j-1], :]))
denominator = sum(nominator) ##################################### should be right!!!!!
probability = np.divide(nominator, denominator) # total=total/sum(total) # should be editted.it is not
(selected_feature_indx, zero_or_one) = roulette_wheel(probability, feature_num)
pointer[k,j] = selected_feature_indx
if(zero_or_one == 0):
road_map[k, pointer[k,j]] = 0
else:
road_map[k, pointer[k,j]] = 1
else: # == 0
nominator = np.hstack((indx[0, pointer[k,j-1], :], indx[1, pointer[k,j-1], :]))
denominator = sum(nominator)
probability = np.divide(nominator, denominator)
(selected_feature_indx, zero_or_one) = roulette_wheel(probability, feature_num)
pointer[k,j] = selected_feature_indx
if(zero_or_one == 0):
road_map[k, pointer[k,j]] = 0
else:
road_map[k, pointer[k,j]] = 1
# update indx (so by doing this, the probability of selection in roulette wheel for this feature, is gonna be zero!)
indx[:, :, pointer[k, j]] = 0
return (road_map, pointer)
def do_calculations(road_map, dataset, targets, best_fit_so_far, best_road_so_far, train_percentage):
ants_num_of_features_selected, fitnesses = get_fitnesses(road_map, dataset, targets, train_percentage)
iter_average_fit = np.mean(fitnesses, axis=0)
iter_best_fit = max(fitnesses)
iter_best_ant = fitnesses.index(iter_best_fit)
iter_best_road = road_map[iter_best_ant, :]
if(iter_best_fit > best_fit_so_far):
best_fit_so_far = iter_best_fit
best_road_so_far = iter_best_road
return (iter_best_fit, best_road_so_far, best_fit_so_far, iter_best_road, fitnesses, iter_average_fit, ants_num_of_features_selected)
def trial_update(fitnesses, roads_T, Min_T, Max_T, Q, q, iter_best_road, feature_num):
roads_T = roads_T * q # i think this is pheromone evaporation
# class_err = 1 - fitnesses # not this because fitnesses is a list and doesn't work this way
class_err = np.array([1-i for i in fitnesses])
min_err = min(class_err)
min_err_indx = np.where(class_err == min_err)[0][0]
roads_T_temp = np.zeros(feature_num*feature_num*4, dtype="float64").reshape(4, feature_num, feature_num)
# here we assign one to best road edges in roads_T_temp.
for i in range(0, len(iter_best_road)):
if(iter_best_road[i] == 0):
roads_T_temp[0, :, i] = 1
roads_T_temp[2, :, i] = 1
else:
roads_T_temp[1, :, i] = 1
roads_T_temp[3, :, i] = 1
if(class_err[min_err_indx] == 0):
roads_T_temp = (Q/(class_err[min_err_indx] + 0.001)) * roads_T_temp
else:
roads_T_temp = (Q/(class_err[min_err_indx])) * roads_T_temp
roads_T = roads_T + roads_T_temp
# now we make sure all of them are in interval :
for each in np.nditer(roads_T, op_flags=['readwrite']):
if(each > Max_T):
each[...] = Max_T
else:
if(each < Min_T):
each[...] = Min_T
# print(roads_T)
# roads_T = np.add(roads_T[roads_T < Min_T] * Min_T , roads_T * roads_T[roads_T > Min_T]) +++++++++++!
# roads_T = np.add(roads_T[roads_T > Max_T] * Max_T , roads_T * roads_T[roads_T < Max_T]) +++++++++++!
return roads_T
def get_Fr(feature_num, selected):
return (feature_num-selected)/feature_num
```
#### File: preml/feature_select/heuristics.py
```python
import numpy as np
import sklearn
def hueristic_value_fscore(feature_num, dataset, targets):
roads_E = np.zeros(feature_num*feature_num*4, dtype="float64").reshape(4, feature_num, feature_num)
# arr = np.corrcoef(dataset)
# R = abs(arr)
## F-score :
classes = np.unique(targets)
class_num = len(classes)
total_mean_f = dataset.mean(0)
nominator = 0
denominator = 0
# nominator = np.zeros(feature_num, dtype="int64")
# denominator = np.zeros(feature_num, dtype="int64")
sample_num_of_this_tag = np.zeros(class_num, dtype="int64")
for i in range(0, class_num):
tags = np.zeros((len(targets)), dtype="int64")
bool_arr = np.equal(targets, classes[i])
tags[bool_arr] = 1
sample_num_of_this_tag[i] = np.sum(tags)
dataset_only_class = dataset[bool_arr, :]
class_mean_f = dataset_only_class.mean(0)
class_mean_f = np.round(class_mean_f, decimals=4)
nominator = nominator + np.power(np.subtract(class_mean_f, total_mean_f), 2)
denominator = denominator + sum(np.power(np.subtract(dataset_only_class, np.matlib.repmat(total_mean_f, dataset_only_class.shape[0],1)), 2)) / (sample_num_of_this_tag[i]-1)
F_score = np.divide(nominator, denominator)
roads_E[0, :, :] = (0.5/feature_num) * sum(F_score)
roads_E[1, :, :] = np.matlib.repmat(F_score, feature_num, 1)
roads_E[2, :, :] = (0.5/feature_num) * sum(F_score)
roads_E[3, :, :] = np.matlib.repmat(F_score, feature_num, 1)
return roads_E
def heuristic_value_min_redundency(feature_num, dataset):
roads_E = np.zeros(feature_num*feature_num*4, dtype="float64").reshape(4, feature_num, feature_num)
arr = np.corrcoef(dataset.T)
R = abs(arr)
roads_E[0, :, :] = R
roads_E[1, :, :] = 1 - R
roads_E[2, :, :] = R
roads_E[3, :, :] = 1 - R
return roads_E
def heuristic_value_min_redundency_max_relevence(feature_num, dataset):
roads_E = np.zeros(feature_num*feature_num*4, dtype="float64").reshape(4, feature_num, feature_num)
class_corrs = np.zeros(feature_num, dtype="float64")
arr = np.corrcoef(dataset.T)
R = abs(arr)
for i in range(0, feature_num):
class_corrs[i] = abs(np.corrcoef(dataset.T[i, :], targets)[0, 1])
class_corr_sel = np.matlib.repmat(class_corrs, feature_num, 1)
class_corr_desel = np.matlib.repmat(1-class_corrs, feature_num, 1)
roads_E[0, :, :] = np.sqrt(np.multiply(R, class_corr_desel))
roads_E[1, :, :] = np.sqrt(np.multiply(1-R, class_corr_sel))
roads_E[2, :, :] = np.sqrt(np.multiply(R, class_corr_desel))
roads_E[3, :, :] = np.sqrt(np.multiply(1-R, class_corr_sel))
return roads_E
def heuristic_value_method_4(feature_num, dataset):
roads_E = np.zeros(feature_num*feature_num*4, dtype="float64").reshape(4, feature_num, feature_num)
class_corrs = np.zeros(feature_num, dtype="float64")
arr = np.corrcoef(dataset.T)
R = abs(arr)
# has problem (i don know MI and it's getting values bigger than 1 ...)
for i in range(0, feature_num):
class_corrs[i] = abs(np.corrcoef(dataset.T[i, :], targets)[0, 1])
class_corr_sel = np.matlib.repmat(class_corrs, feature_num, 1)
class_corr_desel = np.matlib.repmat(1-class_corrs, feature_num, 1)
roads_E[0, :, :] = class_corr_desel
roads_E[1, :, :] = class_corr_sel
roads_E[2, :, :] = R
roads_E[3, :, :] = 1 - R
return roads_E
def heuristic_value_mutual_info(feature_num, dataset):
roads_E = np.zeros(feature_num*feature_num*4, dtype="float64").reshape(4, feature_num, feature_num)
mutual_f_f = np.zeros(feature_num*feature_num, dtype="float64").reshape(feature_num, feature_num)
for i in range(0, feature_num):
for j in range(0, feature_num):
if(i == j):
mutual_f_f[i, j] = 1
else:
mutual_f_f[i, j] = sklearn.metrics.normalized_mutual_info_score(dataset[:, i], dataset[:, j])
# print(mutual_f_f)
roads_E[0, :, :] = mutual_f_f
roads_E[1, :, :] = 1 - mutual_f_f
roads_E[2, :, :] = mutual_f_f
roads_E[3, :, :] = 1 - mutual_f_f
return roads_E
```
#### File: preml/showtime/showie.py
```python
from . import baco_show
# solution === (new_dataset, best_ant_road, acc_before_run, best_fit_so_far, total_feature_num, best_selected_features_num, best_fitnesses_each_iter, average_fitnesses_each_iter ,num_of_features_selected_by_best_ant_each_iter, time_temp, sample_num)
def draw_baco(solution):
if(len(solution) != 11):
print("+++ can't draw the solution due to problem with it! +++")
return
(new_dataset, best_ant_road, acc_before_run, best_fit_so_far, total_feature_num, best_selected_features_num, best_fitnesses_each_iter, average_fitnesses_each_iter ,num_of_features_selected_by_best_ant_each_iter, time_temp, sample_num) = solution
baco_show.show_res_for_this_run(best_fitnesses_each_iter, average_fitnesses_each_iter, num_of_features_selected_by_best_ant_each_iter, total_feature_num)
```
#### File: 5amron/pre-ml/test.py
```python
import pre_ml
import numpy as np
import re
def import_data(data_name):
with open("samples/datasets/"+data_name+"_data.csv","r", encoding="utf-8") as f_data_in: #waveform_data
lines = f_data_in.readlines()
# print(type(lines))
dataset = list()
for line in lines:
line = re.sub("\s+", ",", line.strip())
parts = line.split(",")
dataset.append(parts)
dataset = np.array(dataset, dtype=np.float64)
# print("dataset :")
# dataset = dataset.T
# print(dataset.shape)
# print(dataset)
with open("samples/datasets/"+data_name+"_targets.csv","r", encoding="utf-8") as f_target_in:
lines = f_target_in.readlines()
targets = list()
for line in lines:
targets.append(line)
targets = np.array(targets, dtype=np.int64)
# print("targets :")
# targets = targets.reshape(len(targets), 1) # to make it vertical if it is needed
# print(targets.shape)
# print(targets)
return (dataset, targets)
def run_sample():
(x, y) = import_data("waveform")
solution = pre_ml.baco(x, y, t_percent=40, heu_meth="method_1", ml_alg="knn1", iter_num=10)
pre_ml.draw_baco(solution)
if __name__ == "__main__":
# execute only if run as a script
run_sample()
``` |
{
"source": "5atouristspot/sql_audit",
"score": 3
} |
#### File: botasky/api_0_1/register_verify_user.py
```python
from users_model import User, Init
from . import api
from flask import request, jsonify
import ConfigParser
from botasky.utils.MyCONN import MySQL
from botasky.utils.MyFILE import project_abdir, recursiveSearchFile
from botasky.utils.MyLOG import MyLog
logConfig = recursiveSearchFile(project_abdir, '*logConfig.ini')[0]
mylog = MyLog(logConfig, 'register_verify_user.py')
logger = mylog.outputLog()
__all__ = ['register_user', 'verify_user']
__author__ = 'zhihao'
@api.route('/register', methods=['GET', 'POST'])
def register_user():
'''API users register'''
username = request.args.get('username', type=str, default=None)
password = request.args.get('password', type=str, default=None)
config = ConfigParser.ConfigParser()
metaConfig = recursiveSearchFile(project_abdir, '*metaConfig.ini')[0]
config.read(metaConfig)
engine = Init.Engine(config.get('META', 'user'), config.get('META', 'pwd'),
config.get('META', 'host'), config.get('META', 'port'),
config.get('META', 'db'))
session = Init.Session(engine)
try:
Init.Insert_User(session, username, password)
exec_info = "[action]:register user" \
"[status]:OK" \
"[username]:{username}".format(username=username)
logger.info(exec_info)
except Exception, e:
error_msg = "[action]:register user" \
"[status]:FAIL" \
"[username]:{username}" \
"[Errorcode]:{e}".format(username=username, e=e)
logger.error(error_msg)
return jsonify({'status': '[FAIL]',
'msg': 'register fail, may be repeated because of username or password',
'data': {'username': username, 'password': password}})
return jsonify({'status': '[OK]',
'msg': 'register OK',
'data': {'username': username, 'password': password}})
from flask_httpauth import HTTPBasicAuth
auth = HTTPBasicAuth()
@auth.verify_password
def verify_user(username, password):
'''API users verify decorator'''
config = ConfigParser.ConfigParser()
metaConfig = recursiveSearchFile(project_abdir, '*metaConfig.ini')[0]
config.read(metaConfig)
dbconfig = {'host': config.get('META', 'host'),
'port': int(config.get('META', 'port')),
'user': config.get('META', 'user'),
'passwd': config.get('META', 'pwd'),
'db': config.get('META', 'db'),
'charset': 'utf8'}
db = MySQL(dbconfig)
sql = "select id,name,password_hash from users where name = '{username}'".format(username=username)
db.query(sql)
info = db.fetchOneRow()
db.close()
check_user = User(id=info[0], name=info[1], password_hash=info[2])
if not check_user or not check_user.verify_password(password):
error_msg = "[action]:verify user" \
"[status]:FAIL" \
"[username]:{username}" \
"[verify status]:{status}".format(username=check_user.name,
status=check_user.verify_password(password))
logger.error(error_msg)
return False
exec_info = "[action]:verify user" \
"[status]:OK" \
"[username]:{username}".format(username=username)
logger.info(exec_info)
return True
'''
@auth.verify_password
def verify_user(username, password):
#API users verify decorator
config = ConfigParser.ConfigParser()
metaConfig = recursiveSearchFile(project_abdir, '*metaConfig.ini')[0]
config.read(metaConfig)
engine = Init.Engine(config.get('META', 'user'), config.get('META', 'pwd'),
config.get('META', 'host'), config.get('META', 'port'),
config.get('META', 'db'))
session = Init.Session(engine)
info = session.execute("select id,name,password_hash from users where name = '{username}'".format(username=username)).first()
session.close()
check_user = User(id=info[0], name=info[1], password_hash=info[2])
if not check_user or not check_user.verify_password(password):
error_msg = "[action]:verify user" \
"[status]:FAIL" \
"[username]:{username}" \
"[verify status]:{status}".format(username=check_user.name,
status=check_user.verify_password(password))
logger.error(error_msg)
return False
exec_info = "[action]:verify user" \
"[status]:OK" \
"[username]:{username}".format(username=username)
logger.info(exec_info)
return True
'''
@api.route('/resource')
@auth.login_required
def get_resource():
'''verify example'''
return jsonify({'data': 'Hello'})
"""
@api.route('/verify', methods=['GET', 'POST'])
def verify_user():
'''API users verify'''
username = request.args.get('username', type=str, default=None)
password = request.args.get('password', type=str, default=None)
engine = Init.Engine('admin', 'tfkj705', '192.168.41.40', 3306, 'zhihao_test')
session = Init.Session(engine)
info = session.execute("select * from users where name = '{username}'".format(username=username)).first()
check_user = User(id=info[0], name=info[1], password_hash=info[2])
verify_status = check_user.verify_password(password)
return jsonify({'username': username, 'password': password, 'verify_status': verify_status})
"""
```
#### File: botasky/api_0_1/users_model.py
```python
from sqlalchemy import String, Integer, UniqueConstraint, Index, create_engine
from sqlalchemy import Column
from sqlalchemy.orm import sessionmaker
import pymysql
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
__all__ = ['User', 'Init']
__author__ = 'zhihao'
class User(Base):
'''users model'''
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(20))
password_hash = Column(String(128))
__table_args__ = (
UniqueConstraint('name', 'password_hash', name='uix_name_pwd'),
Index('ix_name', 'name'),
)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
class Init():
'''init users model'''
@staticmethod
def Engine(usr, pwd, host, port, db):
dbconn = "mysql+pymysql://{usr}:{pwd}@{host}:{port}/{db}".format(usr=usr, pwd=pwd, host=host,
port=port, db=db)
engine = create_engine(dbconn, echo=True)
return engine
@staticmethod
def Session(engine):
# init session
DBSession = sessionmaker(bind=engine)
session = DBSession()
return session
@staticmethod
def Insert_User(session, username, password):
u = User()
u.password = password
new_user = User(name=username, password_hash=u.password_hash)
session.add(new_user)
session.commit()
session.close()
```
#### File: sql_audit/botasky/run.py
```python
import os
from flask import Flask
from gevent import monkey
from gevent.pywsgi import WSGIServer
monkey.patch_all()
from botasky.utils.MyDAEMON import daemonize
__all__ = ['create_app', 'main']
__author__ = 'zhihao'
#used of running gunicorn
#apt-get install figlet
#os.system('figlet botasky')
app = Flask(__name__)
from api_0_1 import api as api_1_0_blueprint
app.register_blueprint(api_1_0_blueprint, url_prefix='/api/v1000')
from api_0_1.audit import api as api_1_0_audit_blueprint
app.register_blueprint(api_1_0_audit_blueprint, url_prefix='/api/v1000/audit')
from api_0_1.execute import api as api_1_0_execute_blueprint
app.register_blueprint(api_1_0_execute_blueprint, url_prefix='/api/v1000/execute')
from api_0_1.add import api as api_1_0_add_blueprint
app.register_blueprint(api_1_0_add_blueprint, url_prefix='/api/v1000/add')
def create_app():
app = Flask(__name__)
from api_0_1.audit import api as api_1_0_audit_blueprint
app.register_blueprint(api_1_0_audit_blueprint, url_prefix='/api/v1000/audit')
from api_0_1.execute import api as api_1_0_execute_blueprint
app.register_blueprint(api_1_0_execute_blueprint, url_prefix='/api/v1000/execute')
return app
def main():
#apt-get install figlet
os.system('figlet botasky')
daemonize('/dev/null', '/tmp/botasky_stdout.log', '/tmp/botasky_stdout.log')
app = create_app()
server = WSGIServer(('10.20.4.47', 3621), app)
server.serve_forever()
if __name__ == '__main__':
#curl -u da:xinxin -i -X GET http://192.168.41.12:3621/api/v1000/
app.run(debug=False, host='192.168.74.95', port=3621)
```
#### File: botasky/utils/MyLOG.py
```python
from logging.handlers import RotatingFileHandler
import time
import logging
import threading
import ConfigParser
import sys
reload(sys)
__all__ = ['MyLog']
__author__ = 'zhihao'
class MyLog:
file_handler = ''
def __init__(self, log_config, name):
"""
used : init config and get value
:param name : name of local file
:param log_config : name of log config file
"""
self.name = name
self.logger = logging.getLogger(self.name)
config = ConfigParser.ConfigParser()
config.read(log_config)
mythread = threading.Lock()
mythread.acquire() # thread lock
self.log_file_path = config.get('LOGGING', 'log_file_path')
self.maxBytes = config.get('LOGGING', 'maxBytes')
self.backupCount = int(config.get('LOGGING', 'backupCount'))
self.outputConsole_level = int(config.get('LOGGING', 'outputConsole_level'))
self.outputFile_level = int(config.get('LOGGING', 'outputFile_level'))
self.outputConsole = int(config.get('LOGGING', 'outputConsole'))
self.outputFile = int(config.get('LOGGING', 'outputFile'))
self.formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
self.console_handler = ''
self.file_handler = ''
mythread.release() # thread lock relax
def outputLog(self):
"""
used : output log to console and file
"""
if self.outputConsole == 1:
# if true ,it should output log in console
self.console_handler = logging.StreamHandler()
self.console_handler.setFormatter(self.formatter)
self.logger.setLevel(self.outputConsole_level)
self.logger.addHandler(self.console_handler)
else:
pass
if self.outputFile == 1:
self.file_handler = RotatingFileHandler(self.log_file_path, maxBytes=10*1024*1024, backupCount=10)
# define RotatingFileHandler, file output path, one file max byte, max backup number
self.file_handler.setFormatter(self.formatter)
self.logger.setLevel(self.outputFile_level)
self.logger.addHandler(self.file_handler)
else:
pass
return self.logger
if __name__ == '__main__':
'''
mylog = MyLog('logConfig.ini','jjjjj')
logger = mylog.outputLog()
logger.error("jjjjjjjjjjjjjjj")
'''
import MyLOG
help(MyLOG)
```
#### File: botasky/utils/MyMAIL.py
```python
import smtplib
import mimetypes
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from MyLOG import MyLog
from botasky.utils.MyFILE import project_abdir, recursiveSearchFile
logConfig = recursiveSearchFile(project_abdir, '*logConfig.ini')[0]
mylog = MyLog(logConfig, 'MyMAIL.py')
logger = mylog.outputLog()
__all__ = ['MyMail']
__author__ = 'zhihao'
mail_info = {'mail_host': 'smtp.163.com',
'mail_user': '15895890858',
'mail_pass': '<PASSWORD>',
'mail_postfix': '163.com'}
class MyMail():
'''
used : send mail
'''
def __init__(self, mail_info):
'''
used : init mail
:param mail_info: smtp server config
'''
self.mail_info = mail_info
def send_mail(self, to_list, mail_type, subject, content, attachment_list, img_list):
'''
used : send mail
:param to_list: target mail adresses
:param mail_type: plain or html
:param subject: title
:param content: main body
:param attachment_list: attachment
:param img_list: picture
:return:
'''
my_adress = "0905zhihao" + "<" + self.mail_info['mail_user'] + "@" + self.mail_info['mail_postfix'] + ">"
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = my_adress
msg['To'] = ";".join(to_list)
#main text
if mail_type == 'plain' or mail_type == 'html':
try:
body_msg = MIMEText(content, _subtype=mail_type, _charset='gb2312')
msg.attach(body_msg)
exec_info = "[action]:init msg" \
"[status]:OK" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'], To=msg['To'])
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:init msg" \
"[status]:FAIL" \
"[Errorcode]:{e}" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'], To=msg['To'], e=e)
logger.error(error_msg)
else:
error_msg = "[action]:send mail_type" \
"[status]:FAIL" \
"[Errorcode]mail_type is not format" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'], To=msg['To'])
print error_msg
logger.info(error_msg)
#attachment
if attachment_list == '' or len(attachment_list) == 0:
pass
else:
for attachment in attachment_list:
try:
att = MIMEText(open(attachment, 'rb').read(), 'base64', 'gb2312')
att["Content-Type"] = 'application/octet-stream'
#display name
att["Content-Disposition"] = 'attachment; filename="'+attachment+'\"\''
msg.attach(att)
exec_info = "[action]:add attachment" \
"[status]:OK" \
"[attachment]:{attachment}" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(attachment=attachment, Subject=msg['Subject'],
From=msg['From'], To=msg['To'])
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:add attachment" \
"[status]:FAIL" \
"[Errorcode]:{e}" \
"[attachment]={attachment}" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'],
attachment=attachment, To=msg['To'], e=e)
logger.error(error_msg)
#img
if img_list == '' or len(img_list) == 0:
pass
else:
for image_adress in img_list:
try:
image = MIMEImage(open(image_adress, 'rb').read())
image.add_header('Content-ID', '<image1>')
msg.attach(image)
exec_info = "[action]:add image" \
"[status]:OK" \
"[image]:{image}" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(image=image_adress, Subject=msg['Subject'],
From=msg['From'], To=msg['To'])
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:add image" \
"[status]:FAIL" \
"[Errorcode]:{e}" \
"[image]:{image}" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'],
image=image_adress, To=msg['To'], e=e)
logger.error(error_msg)
#send mail
try:
server = smtplib.SMTP()
server.connect(self.mail_info['mail_host'])
server.login(self.mail_info['mail_user'], self.mail_info['mail_pass'])
server.sendmail(msg['from'], msg['to'], msg.as_string())
server.quit()
exec_info = "[action]:send mail" \
"[status]:OK" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'],To=msg['To'])
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:send mail" \
"[status]:FAIL" \
"[Errorcode]:{e}" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'], To=msg['To'], e=e)
logger.error(error_msg)
if __name__ == '__main__':
'''
mail_info = {'mail_host': 'smtp.163.com',
'mail_user': '15002283621',
'mail_pass': '<PASSWORD>',
'mail_postfix': '163.com'}
#to_list = ['<EMAIL>']
to_list = ['<EMAIL>']
subject = 'xxxxxxxxxxxxx'
content = 'xxxxxxxxxxxxx'
#attachment_list = ['F:\img\img.rar', 'F:\img\img2.rar']
attachment_list = []
#img_list = ['F:\img\\1025.jpg', 'F:\img\\1041.jpg']
img_list = []
mail = MyMail(mail_info)
mail.send_mail(to_list, 'plain', subject, content, attachment_list, img_list)
'''
import MyMAIL
help(MyMAIL)
```
#### File: botasky/utils/MyPROC.py
```python
from multiprocessing import Pool
from MyLOG import MyLog
from botasky.utils.MyFILE import project_abdir, recursiveSearchFile
logConfig = recursiveSearchFile(project_abdir, '*logConfig.ini')[0]
mylog = MyLog(logConfig, 'MyPROC.py')
logger = mylog.outputLog()
__all__ = ['MyProc']
__author__ = 'zhihao'
'''
from time import sleep
def f(x):
for i in range(10):
print '%s --- %s ' % (i, x)
sleep(1)
return "tttttttt"
def k():
print "wwwwwwwww"
sleep(1)
return "eeeeeeee"
'''
class MyProc():
"""
used : multiprocessing
"""
def __init__(self):
pass
@staticmethod
def run(pitch_num, func_name, *args):
"""
used : multiprocessing
:param pitch_num : the number of max processing
:param func_name : function name
:param *args : function parameter
"""
try:
# set the processes max number
pool = Pool(pitch_num)
#have no args
if len(*args) == 0:
result = pool.apply_async(func_name)
exec_info = "[action]:mutliprocessing task distribution" \
"[status]:OK" \
"[funtion]:{funtion}" \
"[args]:NULL".format(funtion=func_name)
logger.info(exec_info)
pool.close()
pool.join()
exec_tasks_status = result.successful()
# task join
if exec_tasks_status:
exec_info = "[action]:mutliprocessing task join" \
"[status]:OK" \
"[exec_tasks_status]:{exec_tasks_status}".format(exec_tasks_status=exec_tasks_status)
logger.info(exec_info)
# or not
else:
error_msg = "[action]:mutliprocessing task join" \
"[status]:FAIL" \
"[exec_tasks_status]:{exec_tasks_status}".format(exec_tasks_status=exec_tasks_status)
logger.error(error_msg)
return result.get()
#have args
else:
result = []
for i in range(len(*args)):
# print args[0][i]
# get each element of tuple
result.append(pool.apply_async(func_name, args[0][i]))
exec_info = "[action]:mutliprocessing task distribution" \
"[status]:OK" \
"[funtion]:{funtion}" \
"[args]:{args}".format(funtion=func_name, args=args[0][i])
logger.info(exec_info)
pool.close()
pool.join()
ret_info = []
for j in range(len(result)):
# task join
if result[j].successful():
exec_info = "[action]:mutliprocessing task join" \
"[status]:OK" \
"[funtion]:{funtion}" \
"[args]:{args}" \
"[tasks_done_status]:{exec_tasks_status}".format(funtion=func_name,
args=args[0][j],
exec_tasks_status=result[j].successful() )
logger.info(exec_info)
ret_info.append(result[j].get())
#or not
else:
exec_info = "[action]:mutliprocessing task join" \
"[status]:FAIL" \
"[funtion]:{funtion}" \
"[args]:{args}" \
"[tasks_done_status]:{exec_tasks_status}".format(funtion=func_name,
args=args[0][j],
exec_tasks_status=result[j].successful())
logger.error(exec_info)
ret_info.append("join FAIL")
return ret_info
except Exception, e:
print Exception, ":", e
error_msg = "[action]:mutliprocessing task" \
"[status]:FAIL" \
"[Errorcode]:{e}".format(e=e)
logger.error(error_msg)
if __name__ == '__main__':
import MyPROC
help(MyPROC)
"""
'''aaa is args tuple of function f'''
myproc = MyProc()
aaa = ((1,), (2,), (3,), (4,), (5,))
ss = myproc.run(16, f, aaa)
print ss
"""
"""
aaa = ()
ss = myproc.run(16, k, aaa)
print ss
"""
```
#### File: botasky/utils/MyTHREAD.py
```python
from time import sleep
import threading
import threadpool
from MyLOG import MyLog
from botasky.utils.MyFILE import project_abdir, recursiveSearchFile
logConfig = recursiveSearchFile(project_abdir, '*logConfig.ini')[0]
mylog = MyLog(logConfig, 'MyTHREAD.py')
logger = mylog.outputLog()
__all__ = ['MyThread','MyThread_ns', 'run', 'call', 'results']
__author__ = 'zhihao'
#thread safe
class MyThread():
def __init__(self):
'''
used: init args : func_list ,threads
'''
self.func_list = None
self.threads = []
def set_thread_func_list(self, func_list):
'''
:param func_list:
used: get function
'''
self.func_list = func_list
def start(self):
'''
used: start treading
'''
#init treading
for func_dict in self.func_list:
try:
if func_dict["args"]:
t = threading.Thread(target=func_dict["func"], args=func_dict["args"])
t.setDaemon(True)
exec_info = "[action]:init mutlithreading task" \
"[status]:OK" \
"[thread name]:{thread_name}" \
"[funtion]:{funtion}" \
"[args]:{args}".format(thread_name=t.name, funtion=func_dict["func"],
args=func_dict["args"])
logger.info(exec_info)
else:
t = threading.Thread(target=func_dict["func"])
t.setDaemon(True)
exec_info = "[action]:init mutlithreading task" \
"[status]:OK" \
"[thread name]:{thread_name}" \
"[funtion]:{funtion}" \
"[args]:NULL".format(thread_name=t.name, funtion=func_dict["func"])
logger.info(exec_info)
self.threads.append(t)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:init mutlithreading task" \
"[status]:FAIL" \
"[funtion]:{funtion}" \
"[Errorcode]:{e}".format(funtion=func_dict["func"],e=e)
logger.error(error_msg)
for thread_obj in self.threads:
try:
thread_obj.start()
exec_info = "[action]:mutlithreading task distribution" \
"[status]:OK" \
"[thread name]:{thread_name}" .format(thread_name=thread_obj.name)
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:mutlithreading task distribution" \
"[status]:FAIL" \
"[thread name]:{thread_name}" \
"[Errorcode]:{e}".format(thread_name=thread_obj.name,e=e)
logger.error(error_msg)
for thread_obj in self.threads:
try:
thread_obj.join()
if thread_obj.isAlive() == False:
exec_info = "[action]:mutlithreading task join" \
"[status]:OK" \
"[thread name]:{thread_name}" .format(thread_name=thread_obj.name)
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:mutlithreading task join" \
"[status]:FAIL" \
"[thread name]:{thread_name}" \
"[Errorcode]:{e}".format(thread_name=thread_obj.name,e=e)
logger.error(error_msg)
'''
def run(func_name, *args):
# init a mutex
#mutex = threading.RLock()
try:
# have no args
if len(*args) == 0:
#mutex.acquire()
t = threading.Thread(target=func_name)
t.setDaemon(True)
t.start()
try:
if t.isAlive() == True:
exec_info = "[action]:mutlithreading task distribution" \
"[status]:OK" \
"[thread name]:{thread_name}" \
"[funtion]:{funtion}" \
"[args]:NULL".format(thread_name=t.name,funtion=func_name)
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:mutlithreading task distribution" \
"[status]:FAIL" \
"[thread name]:{thread_name}" \
"[funtion]:{funtion}" \
"[args]:NULL" \
"[Errorcode]:{e}".format(thread_name=t.name,funtion=func_name,e=e)
logger.error(error_msg)
t.join()
try:
if t.isAlive() == False:
exec_info = "[action]:mutlithreading task join" \
"[status]:OK" \
"[thread name]:{thread_name}" \
"[funtion]:{funtion}" \
"[args]:NULL".format(thread_name=t.name,funtion=func_name)
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:mutlithreading task join" \
"[status]:FAIL" \
"[thread name]:{thread_name}" \
"[funtion]:{funtion}" \
"[args]:NULL" \
"[Errorcode]:{e}".format(thread_name=t.name,funtion=func_name,e=e)
logger.error(error_msg)
#mutex.release()
# have args
else:
threads = []
for i in range(len(*args)):
# print args[0][i]
# get each element of tuple
threads.append(threading.Thread(target=func_name, args=args[0][i]))
for t in threads:
t.setDaemon(True)
try:
t.start()
if t.isAlive() == True:
exec_info = "[action]:mutlithreading task distribution" \
"[status]:OK" \
"[thread name]:{thread_name}" \
"[funtion]:{funtion}" \
"[args]:{args}".format(thread_name=t.name, funtion=func_name,
args=args[0][i])
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:mutlithreading task distribution" \
"[status]:FAIL" \
"[thread name]:{thread_name}" \
"[funtion]:{funtion}" \
"[args]:{args}" \
"[Errorcode]:{e}".format(thread_name=t.name, funtion=func_name,
args=args[0][i],e=e)
logger.error(error_msg)
for t in threads:
t.join()
try:
if t.isAlive() == False:
exec_info = "[action]:mutlithreading task join" \
"[status]:OK" \
"[thread name]:{thread_name}" \
"[funtion]:{funtion}" \
"[args]:{args}".format(thread_name=t.name,funtion=func_name,
args=args[0][i])
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:mutlithreading task join" \
"[status]:FAIL" \
"[thread name]:{thread_name}" \
"[funtion]:{funtion}" \
"[args]:{args}" \
"[Errorcode]:{e}".format(thread_name=t.name,funtion=func_name,
args=args[0][i],e=e)
logger.error(error_msg)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:mutlithreading task" \
"[status]:FAIL" \
"[Errorcode]:{e}".format(e=e)
logger.error(error_msg)
'''
class MyThread_ns():
#used: not thread safe
def __init__(self):
pass
tt = []
@staticmethod
def fff(request,result):
global tt
tt.append(result)
@staticmethod
def run(pitch_num, func_name, *args):
pool = threadpool.ThreadPool(pitch_num)
requests = threadpool.makeRequests(func_name, *args)
[pool.putRequest(req) for req in requests]
pool.wait()
'''
results = []
from botasky.utils.MyQUEUE import MyQueue
myqueque = MyQueue(200, 'FIFO')
myq,myloc = myqueque.build_queue_lock()
def call(request, result):
#results.append(result)
MyQueue.write(myq, myloc, result)
results = MyQueue.read(myq, 3)
def run(pitch_num, func_name, func_var):
pool = threadpool.ThreadPool(pitch_num)
requests = threadpool.makeRequests(func_name, func_var, call)
[pool.putRequest(req) for req in requests]
pool.wait()
'''
'''
def bb(x):
sleep(2)
print x
return x
def aa():
print 'aaaaaaa'
def func1(ret_num):
sleep(2)
print "func1 ret:%d" % ret_num
return ret_num
def func2(ret_num):
sleep(8)
print "func2 ret:%d" % ret_num
return ret_num
def func3():
sleep(2)
print "func3 ret:100"
return 100
'''
if __name__ == '__main__':
#import MyTHREAD
#help(MyTHREAD)
#myth = MyThread()
#a = [[1,],[2,],[3,],[4,],[5,],[6,],[7,],[8,],[9,],[10,],[11,],[12,],[13,],[14,],[15,],[16,],[17,],[18,]]
#myth.run(bb,a)
'''
#MyThread()
mt = MyThread()
g_func_list = []
g_func_list.append({"func": func1, "args": (1,)})
g_func_list.append({"func": func2, "args": (2,)})
g_func_list.append({"func": func3, "args": None})
mt.set_thread_func_list(g_func_list)
mt.start()
'''
'''
#MyThread_ns()
a=['1','2','3','4','5']
mythns = MyThread_ns()
mythns.run(2,bb,a)
'''
paramikoconfig = {'username': 'root',
'password': '<PASSWORD>',
'key_file': ''}
host = '192.168.71.142'
port = 22
from MyGO import MyMiko
miko = MyMiko(host, port, paramikoconfig)
#mfsmaster_exist_info = miko.exec_cmd(miko.go(), 'killall -0 mfsmaster && echo $?')
lst_vars_1 = [miko.go(), 'killall -0 mfsmaster && echo $?']
lst_vars_2 = [miko.go(), 'killall -0 mfsmaster && echo $?']
lst_vars_3 = [miko.go(), 'killall -0 mfsmaster && echo $?']
lst_vars_4 = [miko.go(), 'killall -0 mfsmaster && echo $?']
lst_vars_5 = [miko.go(), 'killall -0 mfsmaster && echo $?']
lst_vars_6 = [miko.go(), 'killall -0 mfsmaster && echo $?']
lst_vars_7 = [miko.go(), 'killall -0 mfsmaster && echo $?']
func_var = [(lst_vars_1, None), (lst_vars_2, None), (lst_vars_3, None), (lst_vars_4, None), (lst_vars_5, None), (lst_vars_6, None)]
#run(8, miko.exec_cmd, func_var)
#print results
``` |
{
"source": "5aumy4/greenBerry",
"score": 3
} |
#### File: greenberry/gb_utils/greenberry_plot.py
```python
from collections import OrderedDict
import inspect
from symbols import *
from debug_cp import *
from gb_utils.greenberry_search import GreenBerrySearch
L_USER = "<NAME>"
# another lex would be to identify blobks first this is a side effect
MATH_OPS = ["+", "-", "*", "/"]
BOOLS = [S.TRUE, S.FALSE]
BOOL_OPS = [S.GREATER, S.LESS]
EOS = [S.NL, S.EOF]
class GreenBerryPlot(object):
def __init__(self):
print(self, "does not have an initialiser")
def linear_plot(self, dataX, labelX, dataY, labelY):
"""simple line plot"""
try:
import matplotlib.pyplot as plt
plt.plot(dataX, dataY)
plt.xlabel(labelX)
plt.ylabel(labelY)
plt.show()
except ImportError:
print("matplotlib unimported")
def plot_handling(self, i, words, line):
"""
handles plotting of points
"""
try:
comma_i = GreenBerrySearch.search_symbol(i, 1, words, S.COMMA)[1]
if words[i + 1][0] != "'" and words[i + 1][0] != '"':
dataX = list(map(float, words[i + 1].split("-")))
else:
file_name = words[i + 1][1:-1]
with open(file_name, "r") as file:
dataX = list(map(float, file.read().split("-")))
if words[comma_i + 1][0] != "'" and words[comma_i + 1][0] != '"':
dataY = list(map(float, words[comma_i + 1].split("-")))
else:
file_name = words[comma_i + 1][1:-1]
with open(file_name, "r") as file:
dataY = list(map(float, file.read().split("-")))
labelX = GreenBerrySearch.search(i, 1, words, S.COMMA)
labelY = GreenBerrySearch.search(comma_i, 1, words, [S.NL, S.EOF])
self.linear_plot(dataX, labelX, dataY, labelY)
except:
print(E.PLOT, line)
```
#### File: greenBerry/greenberry/greenBerry.py
```python
from collections import OrderedDict
import inspect
from symbols import *
from debug_cp import *
from gb_utils.greenberry_print import GreenBerryPrint
from gb_utils.greenberry_lex import GreenBerryLex
from gb_utils.greenberry_parse import GreenBerryParse
from gb_utils.greenberry_plot import GreenBerryPlot
from gb_utils.greenberry_search import GreenBerrySearch
from gb_utils.greenberry_var_type import GreenBerryVarType
L_USER = "<NAME>"
# another lex would be to identify blobks first this is a side effect
MATH_OPS = ["+", "-", "*", "/"]
BOOLS = [S.TRUE, S.FALSE]
BOOL_OPS = [S.GREATER, S.LESS]
EOS = [S.NL, S.EOF]
"""
Function with nested functions with different core
greenberry functionalities
"""
def greenBerry_eval(x):
global L_USER
###
# program starts here
###
M.g_vars = {}
M.g_fs = {}
M.g_cls = {}
F.bStart = 100
F.bEnd = 0
F.isDebugOn = 0 # this is a reset needed for gb_ide
KWDs = [
getattr(S, i)
for i in [
b[0]
for b in [
a
for a in inspect.getmembers(S, lambda a: not inspect.isroutine(a))
if not (a[0].startswith("__") and a[0].endswith("__"))
]
]
]
g_vars = M.g_vars
g_fs = M.g_fs
g_cls = M.g_cls
words = GreenBerryLex.lex(x, KWDs, add_eof=1)
GreenBerryPrint.printd(words)
line = 1
"""
if elem.value == S.NL
error : elem.line
"""
for i, elem in enumerate(words): # mainloop for complex parsing
# printd(elem)
#
# newline
#
if elem == S.NL:
line += 1
#
# minified for loop
#
elif elem == S.FOR:
try:
F.bStart = i
times_by = int(words[i + 1])
string = GreenBerrySearch.search(i, 3, words, [S.NL, S.EOF])
wds = GreenBerryLex.lex(string, KWDs)
GreenBerryPrint.printd(wds)
for d in range(times_by):
GreenBerryParse.simple_parse(g_vars, wds, line)
# colon_i = search_symbol(i, 1, words, [S.COLON])[1]
F.bEnd = GreenBerrySearch.search_symbol(i, 1, words, [S.NL, S.EOF])[1]
except:
print(E.FOR, line)
#
# if statement
#
elif elem == S.IF: # to be rededefined
try:
F.bStart = i
L, R = 0, 0
raw = GreenBerrySearch.search_symbol(
i,
1,
words,
[S.EQUAL, S.LESS, S.GREATER, S.EQUAL_GREATER, S.EQUAL_LESS],
)
symbol = raw[0]
symbol_i = raw[1]
colon_i = GreenBerrySearch.search_symbol(i, 1, words, S.COLON)[1]
to_do = GreenBerrySearch.search(colon_i, 0, words, [S.NL, S.EOF])
wds = GreenBerryLex.lex(to_do, KWDs)
if words[i + 1] == S.VAR_REF:
# print('L @ detected')
L = g_vars[words[i + 2]][0]
elif words[i + 1].isdigit():
# print('L int detected')
L = int(words[i + 1])
else:
# print('L str detected')
L = GreenBerrySearch.search(i, 0, words, [symbol, S.COLON])
if words[symbol_i + 1] == S.VAR_REF:
# print('R @ detected')
R = g_vars[words[symbol_i + 2]][0]
elif words[symbol_i + 1].isdigit():
# print("R", words[symbol_i+1])
R = int(words[symbol_i + 1])
else:
# print('R str detected')
R = GreenBerrySearch.search(symbol_i, 0, words, [S.COLON])
# print(L, R, symbol)
if symbol == S.EQUAL:
if L == R:
GreenBerryParse.simple_parse(g_vars, wds, line)
elif symbol == S.GREATER:
if L > R:
GreenBerryParse.simple_parse(g_vars, wds, line)
elif symbol == S.LESS:
if L < R:
GreenBerryParse.simple_parse(g_vars, wds, line)
elif symbol == S.EQUAL_GREATER:
if L >= R:
GreenBerryParse.simple_parse(g_vars, wds, line)
elif symbol == S.EQUAL_LESS:
if L <= R:
GreenBerryParse.simple_parse(g_vars, wds, line)
# colon_i = search_symbol(i, 1, words, [S.COLON])[1]
F.bEnd = GreenBerrySearch.search_symbol(i, 1, words, [S.NL, S.EOF])[1]
except:
print(E.IF, line)
# resolve flag
#
# function definition
#
elif elem == S.FUNCDEF: # func vector : print aaa #func vector x : print @x
params = []
try:
F.bStart = i
func_name = words[i + 1]
if words[i + 2] == S.COLON:
body = GreenBerrySearch.search(i, 2, words, [S.NL, S.EOF])
g_fs[func_name] = {"params": None, "body": body}
else:
params = GreenBerrySearch.search_toks(i, 1, words, [S.COLON])
col_i = GreenBerrySearch.search_symbol(i, 1, words, [S.COLON])[1]
body = GreenBerrySearch.search(col_i, 0, words, [S.NL, S.EOF])
registry = OrderedDict()
for param in params:
registry[param] = None
g_fs[func_name] = {"params": registry, "body": body}
# colon_i = search_symbol(i, 1, words, [S.COLON])[1]
F.bEnd = GreenBerrySearch.search_symbol(i, 1, words, [S.NL, S.EOF])[1]
except:
print(E.FUNCDEF, line)
#
# function call
#
elif elem == S.FUNCCALL: # call vector
try:
func_name = words[i + 1]
if g_fs[func_name]["params"] is None:
# print(g_fs)
# print(func_name)
wds = GreenBerryLex.lex(g_fs[func_name]["body"], KWDs)
GreenBerryParse.simple_parse(g_vars, wds, line)
else:
param_vals = GreenBerrySearch.search_toks(
i, 1, words, [S.NL, S.EOF]
)
registry = g_fs[func_name]["params"]
i = 0
for key in registry:
registry[key] = [
param_vals[i],
GreenBerryVarType.var_type(param_vals[i]),
] # data
i += 1
wds = lex(g_fs[func_name]["body"], KWDs)
GreenBerryParse.simple_parse(registry, wds, line)
except:
print(E.FUNCCALL, line)
#
# class definition
#
elif elem == S.CLASS: # class Man : power = 10 action walk : print a
# attrs = {} future
try:
F.bStart = i
class_name = words[
i + 1
] # subsequent changed to action for mult attribs
attr_name = words[
i + 3
] # search_symbol var_data(i+4, words, [S.NL, S.EOF])
attr_val = GreenBerryVarType.var_data(i + 4, words, [S.ACTION])
action_name = words[i + 7]
action_body = GreenBerrySearch.search(i + 7, 1, words, [S.NL, S.EOF])
g_cls[class_name] = {
"attributes": {attr_name: attr_val},
"actions": {action_name: action_body},
}
# colon_i = search_symbol(i, 1, words, [S.COLON])[1]
F.bEnd = GreenBerrySearch.search_symbol(i, 1, words, [S.NL, S.EOF])[1]
"""
class_name = {
name = name,
attributes = {
x = 1,
y = 2
}
methods = {
walk = string here,
raise hand = string here
}
}
"""
except:
print(E.CLASSDEC, line)
#
# call class method.
#
elif elem == S.MAKE: # make Man walk
try:
class_name = words[i + 1]
if class_name not in g_cls:
print("wrong class name berry")
action_name = words[i + 2]
raw_text = g_cls[class_name]["actions"][action_name]
wds = GreenBerryLex.lex(raw_text, KWDs)
GreenBerryParse.simple_parse(g_vars, wds, line)
except:
print(E.CLASSACT, line)
#
# attribute viewing
#
elif elem == S.SEE: # see power of Man
try:
attr = words[i + 1]
class_name = words[i + 2]
print(g_cls[class_name]["attributes"][attr][0])
except:
print(E.CLASSATT, line)
#
# add attribute to class
#
elif elem == S.ADD: # add to Man attribute name = string i am me
try:
F.bStart = i
if words[i + 1] in g_cls:
if words[i + 2] == S.ATTRIB:
if words[i + 4] == S.EQUAL:
value = GreenBerryVarType.var_data(
i + 4, words, [S.NL, S.EOF]
)
g_cls[words[i + 1]]["attributes"][words[i + 3]] = value
else:
print(E.EQUAL, line)
elif (
words[i + 2] == S.ACTION
): # add to Man action run : print running...
if words[i + 4] == S.COLON:
g_cls[words[i + 1]]["actions"][
words[i + 3]
] = GreenBerrySearch.search(i, 4, words, [S.NL, S.EOF])
else:
print(E.COLON, line)
else:
print(E.CLASSNAME, line)
F.bEnd = GreenBerrySearch.search_symbol(i, 1, words, [S.NL, S.EOF])[1]
except:
print(E.ADD, line)
#
# debug on or off
#
elif elem == S.SET: # set debug on - set debug off
try:
if words[i + 1] == "debug":
if words[i + 2] == "on":
F.isDebugOn = 1
elif words[i + 2] == "off":
F.isDebugOn = 0
except:
print(E.DEBUG, line)
else:
if i < F.bStart or i > F.bEnd and elem != S.EOF:
F.bStart = i
F.bEnd = GreenBerrySearch.search_symbol(i, 1, words, [S.NL, S.EOF])[1]
to_do = GreenBerrySearch.search(i - 1, 0, words, [S.NL, S.EOF])
wds = GreenBerryLex.lex(to_do, KWDs)
GreenBerryParse.simple_parse(g_vars, wds, line)
GreenBerryPrint.printd(g_vars)
GreenBerryPrint.printd(g_fs)
GreenBerryPrint.printd(g_cls)
# python greenBerry_REPL.py
``` |
{
"source": "5aurabhpathak/all-I-ve-done",
"score": 3
} |
#### File: machine_learning/ann/irissupervised.py
```python
from matplotlib import pyplot as pl
from mpl_toolkits.mplot3d import Axes3D
from numpy import genfromtxt, matrix, zeros, exp, tanh
from ann import ANN
cols = {0, 1, 4}
data = genfromtxt('data/iris.data.train', delimiter=',', converters={4: lambda x: 0. if x == b'Iris-setosa' else 1. if x == b'Iris-versicolor' else 2.}, usecols=cols)
dataset, y = matrix(data[:,:2]), data[:,2]
def bitmapper():
y_new = matrix(zeros((y.shape[0], 3), 'float64'))
for i in range(y.size): y_new[i, y[i]] = 1
return y_new.T
y = bitmapper()
print(y)
#nn = ANN(lambda x: 1 / (1 + exp(-x)), (dataset.shape[1], 4, 5, 4, 3))
nn = ANN(tanh, (dataset.shape[1], 7, 3))
nn.learn(dataset, y)
``` |
{
"source": "5aurabhpathak/src",
"score": 3
} |
#### File: masters_courses/image_and_vision_processing/cameramansplit.py
```python
from matplotlib import pyplot as pl
from numpy import matrix, vsplit, hsplit
c = pl.imread('data/cameraman.tif')
c1, c2 = vsplit(c, 2)
(c11, c12), (c21, c22) = hsplit(c1, 2), hsplit(c2, 2)
def subpl(im, subfig):
pl.subplot(220 + subfig)
pl.axis('off')
pl.imshow(im, cmap=pl.get_cmap('Greys_r'), interpolation='none')
subpl(c11, 1)
subpl(c12, 2)
subpl(c21, 3)
subpl(c22, 4)
pl.figure()
subpl(c22, 1)
subpl(c21, 2)
subpl(c12, 3)
subpl(c11, 4)
pl.show()
```
#### File: masters_courses/image_and_vision_processing/intensityscale.py
```python
from matplotlib import pyplot as pl
from numpy import max, min
def disp(im, n, t):
pl.subplot(120 + n)
pl.title(t)
pl.axis('off')
pl.imshow(im, pl.get_cmap('Greys_r'))
im = pl.imread('data/cameraman.tif').astype('uint16')
print('In=\n',im)
disp(im,1, 'input')
out = (30 + im * 150/(max(im) - min(im))).astype('uint8')
print('out=\n', out,'\nMaximum intensity',max(out),'\nMinimum intensity', min(out))
disp(out,2,'output')
pl.show()
```
#### File: image_and_vision_processing/morph/morph.py
```python
from matplotlib import pyplot as pl
from numpy import matrix, vstack, hstack, zeros, any, logical_and, array_equal, ogrid
from numpy.ma import masked_equal
def move_mask(im, mask, op):
x = mask.shape[0] // 2
def pad(im):
padvert, padhoriz = zeros((im.shape[0], x)), zeros((x, im.shape[1]+2*x))
return vstack((padhoriz, hstack((padvert, im, padvert)), padhoriz))
im = pad(im)
out = zeros((im.shape[0], im.shape[1]))
for i in range(x, im.shape[0] - x):
for j in range(x, im.shape[1] - x):
out[i,j] = op(mask, im[i-x:i+x+1, j-x:j+x+1])
return out[x:out.shape[0]-x, x:out.shape[1]-x]
def work(im, mask, s):
pl.figure('SE size: '+s)
im = pl.imread(im)
outdilate, outerode = move_mask(im, mask, dilation), move_mask(im, mask, erosion)
outopen, outclose = move_mask(outerode, mask, dilation), move_mask(outdilate, mask, erosion)
def show(im, n, t):
pl.subplot(230+n)
pl.imshow(im, 'binary_r')
pl.title(t)
pl.axis('off')
show(im, 1, 'Input')
show(outerode, 2, 'Eroded')
show(outdilate, 3, 'Dilated')
show(outopen, 4, 'Opened')
show(outclose, 5, 'Closed')
pl.show()
def se(size):
s = size // 2
x, y = ogrid[-s:s+1, -s:s+1]
return x * x + y * y <= s * s
im = 'data/blobs.png'
erosion, dilation = lambda x,y: array_equal(x, logical_and(x,y)), lambda x,y: any(logical_and(x,y))
work(im, se(3), '3X3')
work(im, se(5), '5X5')
work(im, se(7), '7X7')
```
#### File: masters_courses/image_and_vision_processing/rampcircledisc.py
```python
from matplotlib import pyplot as pl
from numpy import zeros, sqrt, around
def display(im, cmap='Greys_r'):
pl.imshow(im, cmap=pl.get_cmap(cmap))
pl.axis('off')
pl.show()
im = []
for i in range(256): im += [i]*256,
display(im)
def draw_circle(im, centre, radius):
'''Bresenham's circle drawing algorithm'''
for i in range(int(around(radius / sqrt(2), decimals=0) + 1)):
k = int(around(sqrt(radius**2 - i**2), decimals=0))
im[centre[0] + i, centre[1] + k] = im[centre[0] - i, centre[1] - k] = im[centre[0] + k, centre[1] +i] = im[centre[0] - k, centre[1] - i] = im[centre[0] +i, centre[1] - k] = im[centre[0] - i, centre[1] + k] = im[centre[1] - k , centre[0] + i] = im[centre[1] + k, centre[0] - i] = True
im = zeros((256, 256), bool)
draw_circle(im, (128, 128), 50)
display(im, 'binary_r')
cam = pl.imread('data/cameraman.tif')
croppedcam = zeros((256,256))
for i in range(cam.shape[0]):
for j in range(cam.shape[1]):
if im[i,j] or (any(im[i, :j]) and any(im[i, j:])): croppedcam[i,j] = cam[i,j]
display(croppedcam)
draw_circle(im, (128, 128), 100)
for i in range(im.shape[0]):
boundary = False
x = []
for j in range(im.shape[1]):
if not boundary and im[i,j]:
boundary = True
x += j,
elif boundary and not im[i,j]:
boundary = False
if len(x) > 1:
im[i, x[0]:x[1]] = True
if len(x) == 3:
im[i, x[1]:x[2]] = True
elif len(x) > 3:
im[i, x[2]:x[3]] = True
display(im, 'binary_r')
```
#### File: pca/singleeigenface/singleface.py
```python
from matplotlib import pyplot as pl
from cv2 import cvtColor, COLOR_RGB2GRAY
from numpy import cov, matrix, argsort, abs, around
from numpy.linalg import eig
from sys import argv
n = 30 if len(argv) == 1 else int(argv[1])
dataset = matrix(cvtColor(pl.imread('me.jpg'), COLOR_RGB2GRAY), 'float64')
if (n > dataset.shape[1]):
print('Wrong parameter.')
exit(1)
e, v = eig(cov(dataset, rowvar=0))
v = v[:, argsort(abs(e))[::-1]][:,:n]
def disp(n, string, im):
pl.subplot(120 + n)
pl.title(string)
pl.imshow(im, 'Greys_r')
pl.axis('off')
disp(1, 'Input', dataset)
reduced = around(dataset * v * v.T).astype('uint8')
disp(2, 'Output with '+str(n)+' Principal components', reduced)
pl.show()
```
#### File: machine_learning/svm/svm.py
```python
from matplotlib import pyplot as pl, patches
from mpl_toolkits.mplot3d import Axes3D
from numpy import genfromtxt, multiply, ones, zeros, identity, around, meshgrid, amin, argsort, vstack, matrix as mat, array, hstack
from numpy.random import random_integers
from scipy.spatial.distance import cdist, pdist, squareform
from cvxopt.solvers import qp, options
from cvxopt import matrix
cols, condition, s_size, test = {0, 1, 2, 4}, b'Iris-setosa', 3, False
data = genfromtxt('data/iris.data.train', delimiter=',', converters={4: lambda x: 1.0 if x == condition else -1.0}, usecols=cols)
dataset_primal, y_primal = data[:,:3], data[:,3]
#training data selection - begin
def selectfromtrainset(s):
dataset_1, dataset_2, y1, y2 = [], [], [], []
for i in range(dataset_primal.shape[0]):
if y_primal[i] == -1:
dataset_1 += dataset_primal[i],
y1 += y_primal[i],
else:
dataset_2 += dataset_primal[i],
y2 += y_primal[i],
dataset_1, dataset_2, y1, y2 = mat(dataset_1), mat(dataset_2), array(y1), array(y2)
def choosemetric():
crossdist = cdist(dataset_1, dataset_2, 'euclidean')
m_1, m_2 = amin(crossdist, 1), amin(crossdist, 0)
s1, s2 = 2 * s_size // 3, s_size // 3
if s == 'hausdorff': samples_1, samples_2 = argsort(m_1)[:s1], argsort(m_2)[:s2]
if s == 'confidence':
withindist_1, withindist_2 = squareform(pdist(dataset_1, 'euclidean')), squareform(pdist(dataset_2, 'cityblock'))
def radius(w, m):
ni = zeros(w.shape[0])
for i in range(w.shape[0]):
for j in range(w.shape[1]):
if w[i,j] > ni[i] and w[i,j] < m[i]: ni[i] = w[i,j]
return ni
ni_1, ni_2 = radius(withindist_1, m_1), radius(withindist_2, m_2)
samples_1, samples_2 = argsort(ni_1)[:s1], argsort(ni_2)[:s2]
elif s == 'random': samples_1, samples_2 = random_integers(0, dataset_1.shape[0] - 1, s1), random_integers(0, dataset_2.shape[0] - 1, s2)
return dataset_1[samples_1], dataset_2[samples_2], y1[samples_1], y2[samples_2]
dataset_1, dataset_2, y1, y2 = choosemetric()
return matrix(vstack((dataset_1, dataset_2))), matrix(hstack((y1, y2)))
def plotsurface(w, b, maxrange, ax, color):
surface_x, surface_y = meshgrid(range(maxrange), range(maxrange))
surface_z = (-w[0] * surface_x - w[1] * surface_y - b) / w[2]
ax.plot_surface(surface_x, surface_y, surface_z, color=color, alpha=0.5)
def applysvm(selection_metric, test):
if selection_metric == 'none': dataset, y = matrix(dataset_primal), matrix(y_primal)
else: dataset, y = selectfromtrainset(selection_metric)
innerproducts = matrix(multiply(y * y.T, dataset * dataset.T))
options['show_progress'] = False
solution = qp(innerproducts, matrix(-ones(dataset.size[0])), matrix(-identity(dataset.size[0])), matrix(zeros(dataset.size[0])), y.T, matrix(0.0))
alpha = matrix(around(solution['x'], 5))
params = [(alpha[i], y[i], dataset[i,:]) for i in range(dataset.size[0]) if alpha[i] != 0.0]
w = sum([params[i][0] * params[i][1] * params[i][2] for i in range(len(params))])
b = 1 / params[0][1] - w * params[0][2].T
#training complete. Rest is plotting and test
maxrange = None
def plot(ax, s):
nonlocal maxrange
for i in range(dataset.size[0]):
if y[i] == -1:
if alpha[i] == 0.0: ax.scatter(dataset[i, 0], dataset[i,1], dataset[i,2], s=30, c='b')
else: ax.scatter(dataset[i, 0], dataset[i,1], dataset[i,2], s=40, c='b', marker='D')
else:
if alpha[i] == 0.0: ax.scatter(dataset[i, 0], dataset[i,1], dataset[i,2], s=30, c='r')
else: ax.scatter(dataset[i, 0], dataset[i,1], dataset[i,2], s=40, c='r', marker='D')
pl.title('Linear SVM - Hard Margin - ' + s)
maxrange = int(max(dataset[:,:2])) + 1
plotsurface(w, b, maxrange, ax, 'g')
if not test:
pl.figure('Using selection method: ' + selection_metric)
ax = pl.subplot(111, projection='3d')
plot(ax, 'Train')
return w, b, maxrange
#begin testing
pl.figure('Using selection method: ' + selection_metric)
ax = pl.subplot(111, projection='3d')
data = genfromtxt('data/iris.data.test', delimiter=',', converters={4: lambda x: 1.0 if x == condition else -1.0}, usecols=cols)
testset, y_actual= matrix(data[:,:3]), matrix(data[:,3])
y_obs, numerrors = zeros(testset.size[0]), 0
for i in range(testset.size[0]):
y_obs[i] = 1 if (w * testset[i,:].T + b)[0] > 0.0 else -1
if y_actual[i] != y_obs[i]: numerrors += 1
if y_actual[i] == -1: ax.scatter(testset[i,0], testset[i,1], testset[i,2], c='aqua')
else: ax.scatter(testset[i,0], testset[i,1], testset[i,2], c='plum')
plot(ax, 'Test')
print('% Error using {}:'.format(selection_metric), 100.0 * numerrors / testset.size[0])
return w, b, maxrange
pl.figure('Comparison')
handles = []
ax = pl.subplot(111, projection='3d')
def compare(t, c):
global handles
plotsurface(*applysvm(t, test), ax, c)
handles += patches.Patch(color=c, label=t),
compare('none', 'r')
compare('hausdorff', 'g')
compare('confidence', 'b')
compare('random', 'y')
pl.figure('Comparison')
pl.figlegend(handles, ('none', 'hausdorff', 'confidence', 'random'), 'upper right')
pl.show()
```
#### File: masters_courses/nlp/hobbs.py
```python
from collections import deque
from nltk.tree import ParentedTree
from classes import NounPhrase, ispleonastic
def findX(np):
x = np.parent()
p = [np]
while x.label() not in {'S', 'NP'}:
p += x,
x = x.parent()
p += x,
return x, p[::-1]
def breadth_first(tree):
queue = deque([(tree,0)])
while queue:
node, depth = queue.popleft()
if isinstance(node, ParentedTree):
queue.extend((c, depth + 1) for c in iter(node))
yield node
def breadth_first_left(tree, p):
queue = deque([(tree,0)])
while queue:
node, depth = queue.popleft()
if isinstance(node, ParentedTree):
if node in p:
flag = True
try:
for c in iter(node):
if c is p[p.index(node) + 1]:
flag = False
queue.append((c, depth + 1))
if flag: queue.append((c, depth + 1))
except IndexError: pass
else: queue.extend((c, depth + 1) for c in iter(node))
yield node
def breadth_first_no_np(tree, p):
queue = deque([(tree,0)])
while queue:
node, depth = queue.popleft()
if isinstance(node, ParentedTree) and node.label() != 'NP':
if node in p:
flag = False
try:
for c in iter(node):
if c is p[p.index(node) + 1]: flag = True
except IndexError: pass
else: queue.extend((c, depth + 1) for c in iter(node))
yield node
def bfs(x, p, np, direction='left'):
func = breadth_first_no_np if direction == 'right' else breadth_first_left
for node in func(x, p):
if node.label() == 'NP' and consider(node, np): return node
return None
def consider(node, np):
if NounPhrase(np).agreeswith(NounPhrase(node)): return True
# print('rejected', NounPhrase(node).name)
return False
def bfsresolve(np, i):
if i < 0: return None
for node in breadth_first(ptlist[i]):
if isinstance(node, ParentedTree) and node.label() == 'NP' and consider(node, np): return node
return bfsresolve(np, i-1)
def hobbsresolve(np, i):
x, p = findX(np)
node = bfs(x, p, np)
if node: return node
if x.parent().label() == 'ROOT': return bfsresolve(np, i-1)
x, p= findX(x)
if x.label() == 'NP' and x[0].label() not in {'NNS', 'NNP', 'NNPS', 'NN', 'CD'} and consider(x, np): return x
node = bfs(x, p, np)
if node: return node
if x.label() == 'S': return bfs(x, p, np, 'riht')
return hobbsresolve(x, i)
ptlist = None
def hobbs(parsetree, i, l):
global ptlist
ptlist = l
for np in parsetree.subtrees(lambda x: x.label() == 'NP'):
if 'PRP' in np[0].label():
if np[0,0].lower() == 'it' and ispleonastic(np, parsetree): continue
referent = hobbsresolve(np, i)
if not referent: continue
referent = NounPhrase(referent)
orig = np[0,0]
if np[0].label() == 'PRP$':
np[0] = ParentedTree.fromstring('(PRP$ <'+ referent.name + "'s>)")
print('PRP$ substitution', orig, '-->', referent.name)
else:
np[0] = ParentedTree.fromstring('(PRP <' + referent.name + '>)')
print('PRP substitution', orig, '-->', referent.name)
```
#### File: competitive_coding/dynamic_programming/knapsack01td.py
```python
(w, n), wt, v = (map(int, input().split())), [], []
for _ in range(n):
a,b = map(int, input().split())
wt += a,
v += b,
mem = [[0 for _ in range(w+1)] for _ in range(n+1)]
def knapsack(n, w, wt, v):
if n == 0 or w == 0: return 0
if mem[n][w] != 0: return mem[n][w]
if wt[n-1] > w: mem[n][w] = knapsack(n-1, w, wt, v)
else: mem[n][w] = max(v[n-1] + knapsack(n-1, w - wt[n-1], wt, v), knapsack(n-1, w, wt, v))
return mem[n][w]
print(knapsack(n, w, wt, v))
``` |
{
"source": "5boon/backend",
"score": 2
} |
#### File: api/users/serializers.py
```python
import re
from django.conf import settings
from rest_framework import serializers
from apps.users.models import User
class SimpleUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'name', 'email']
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'date_joined', 'name', 'username', 'email']
class UserRegisterSerializer(serializers.ModelSerializer):
password = serializers.CharField(min_length=6, write_only=True)
email = serializers.EmailField(allow_blank=False, required=True)
class Meta:
model = User
fields = ('username', 'name', 'password', 'email')
def validate_username(self, username):
code_regex = re.compile('[a-zA-Z|0-9|\-_]') # 영어 + 숫자 + -,_
if code_regex.sub('', username):
raise serializers.ValidationError('유효하지 않은 정규식입니다.', 'regex_error')
return username
def validate(self, data):
try:
user = User.objects.filter(username=data.get('username'))
if len(user) > 0:
raise serializers.ValidationError("Username already exists")
except User.DoesNotExist:
pass
if User.objects.filter(email=data.get('email')).exists():
raise serializers.ValidationError("email already exists")
return data
def create(self, validated_data):
instance = User.objects.create_user(**validated_data)
return instance
class PasswordFindSerializer(serializers.Serializer):
email = serializers.EmailField(allow_null=False, allow_blank=False, required=True)
username = serializers.CharField(max_length=150, required=True)
class Meta:
fields = ('email', 'username')
class IDFindSerializer(serializers.Serializer):
email = serializers.EmailField(allow_null=False, allow_blank=False, required=True)
name = serializers.CharField(max_length=50, required=True)
class Meta:
fields = ('email', 'name')
class SNSLoginSerializer(serializers.Serializer):
type = serializers.ChoiceField(choices=['apple', 'kakao'], required=True)
email = serializers.EmailField(allow_null=False, allow_blank=False, required=True)
name = serializers.CharField(max_length=50, required=True)
class Meta:
fields = ['type', 'unique_id', 'email', 'name']
class SNSUserPasswordSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'username', 'email', 'date_joined']
def to_representation(self, user):
new_password = '{}{}{}'.format(
user.email.split('@')[0],
settings.SNS_AUTH_USER_KEY,
user.date_joined.strftime('%y%m%d')
)
ret = {
'username': user.username,
'password': <PASSWORD>
}
return ret
```
#### File: users/tests/test_users.py
```python
import mock
import pytest
from rest_framework import status
from rest_framework.reverse import reverse
from apps.users.models import User
# Oauth2 인증 Mock 처리 ( TODO: Oauth2.0 도 테스트 될 수 있게 로직 추가해야함 )
from tests.request_helper import pytest_request
@pytest.fixture(scope='function')
def mock_is_authenticated():
with mock.patch('rest_framework.permissions.IsAuthenticated') as patch:
yield patch
@pytest.fixture(scope='function')
def mock_update_employment_center_name():
with mock.patch('api.users.views.slack_notify_new_user') as patch:
yield patch
@pytest.fixture(scope='function')
def mock_send_pw_email():
with mock.patch('api.users.views.send_pw_email') as patch:
yield patch
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
def test_user_register(rf, client, mock_update_employment_center_name):
url = reverse(viewname="users:user_register")
data = {
'username': 'test',
'password': '<PASSWORD>',
'name': 'name',
'email': '<EMAIL>'
}
response = pytest_request(rf,
method='post',
url=url,
user=None,
data=data)
assert response.status_code == status.HTTP_201_CREATED
assert response.data.get('username') == data.get('username')
assert response.data.get('name') == data.get('name')
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
def test_user_password_find(rf, client, user_context, mock_send_pw_email):
url = reverse(viewname="users:user_password")
user = user_context.init.create_user()
data = {
'username': user.username,
'email': user.email
}
response = pytest_request(rf,
method='post',
url=url,
user=user,
data=data)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
def test_user_password_update(rf, client, user_context, mock_is_authenticated):
user = user_context.init.create_user()
data = {
'new_password': '<PASSWORD>'
}
url = reverse(viewname="users:user_password")
response = pytest_request(rf,
method='patch',
url=url,
data=data,
user=user)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
def test_user_id_find(rf, client, user_context, mock_send_pw_email):
url = reverse(viewname="users:user_id")
user = user_context.init.create_user()
data = {
'name': user.name,
'email': user.email
}
response = pytest_request(rf,
method='post',
url=url,
user=user,
data=data)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
def test_user_email_check(rf, client, user_context, mock_send_pw_email):
url = reverse(viewname="users:user_check")
user = user_context.init.create_user()
data = {
'email': user.email
}
response = pytest_request(rf,
method='get',
url=url,
user=user,
data=data)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
@pytest.mark.parametrize(
'social_type',
['kakao', 'apple']
)
def test_sns_first_login(rf, client, user_context, social_type):
"""
data 는 클라이언트에서 소셜인증으로 받아온 데이터를 가져온다.
첫 로그인시 user 생성
"""
url = reverse(viewname="users:user_sns")
data = {
'type': social_type,
'email': '<EMAIL>',
'name': '5boon_user'
}
response = pytest_request(rf,
method='post',
url=url,
data=data)
assert response.status_code == status.HTTP_201_CREATED
@pytest.mark.urls(urls='urls')
@pytest.mark.django_db
@pytest.mark.parametrize(
'social_type',
['kakao', 'apple']
)
def test_sns_login(rf, client, user_context, social_type):
"""
data 는 클라이언트에서 소셜인증으로 받아온 데이터를 가져온다.
"""
url = reverse(viewname="users:user_sns")
data = {
'type': social_type,
'email': '<EMAIL>',
'name': '5boon_user'
}
User.objects.create(
username='{}-{}'.format(data.get('type'), data.get('email').split('@')[0]),
name=data.get('name'),
password="<PASSWORD>",
email=data.get('email'),
)
response = pytest_request(rf,
method='post',
url=url,
data=data)
assert response.status_code == status.HTTP_200_OK
```
#### File: apps/mood_groups/signals.py
```python
from django.db.models.signals import post_delete
from django.dispatch import receiver
from apps.mood_groups.models import UserMoodGroup, MoodGroup
@receiver(post_delete, sender=UserMoodGroup)
def delete_user_mood_group(sender, instance, **kwargs):
if not UserMoodGroup.objects.filter(mood_group_id=instance.mood_group.id).exists():
MoodGroup.objects.filter(id=instance.mood_group.id).delete()
``` |
{
"source": "5cs/spdk",
"score": 2
} |
#### File: scripts/rpc/cmd_parser.py
```python
args_global = ['server_addr', 'port', 'timeout', 'verbose', 'dry_run', 'conn_retries',
'is_server', 'rpc_plugin', 'called_rpc_name', 'func', 'client']
def strip_globals(kwargs):
for arg in args_global:
kwargs.pop(arg, None)
def remove_null(kwargs):
keys = []
for key, value in kwargs.items():
if value is None:
keys.append(key)
for key in keys:
kwargs.pop(key, None)
def apply_defaults(kwargs, **defaults):
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
def group_as(kwargs, name, values):
group = {}
for arg in values:
if arg in kwargs and kwargs[arg] is not None:
group[arg] = kwargs.pop(arg, None)
kwargs[name] = group
```
#### File: scripts/rpc/subsystem.py
```python
from .helpers import deprecated_alias
@deprecated_alias('get_subsystems')
def framework_get_subsystems(client):
return client.call('framework_get_subsystems')
@deprecated_alias('get_subsystem_config')
def framework_get_config(client, name):
params = {'name': name}
return client.call('framework_get_config', params)
def framework_get_pci_devices(client):
return client.call('framework_get_pci_devices')
``` |
{
"source": "5d-jh/circruit",
"score": 3
} |
#### File: app/blueprints/user.py
```python
import os, sys
sys.path.append(os.path.dirname(__file__))
from flask import Blueprint, request, render_template, redirect, url_for
from flask_dance.contrib.github import github
from modules.api_func import get_gh_user_info
from modules import db_required
blueprint = Blueprint('user', __name__)
@blueprint.route("/create", methods=['GET', 'POST'])
@db_required
def create_user(db):
user = get_gh_user_info()
if request.method == "GET":
return render_template(
"user/create_user.html",
username=user["login"],
dev_stacks=db.devstacks.find()
)
elif request.method == "POST":
db.users.insert_one({
"username": user["login"],
"avatar_url": user["avatar_url"],
"bio": user['bio'],
"rank": 0,
"dev_stacks": request.form["dev_stacks"].split(" ")[1:],
"contacts": request.form["contacts"],
"joined_projects": []
})
return redirect("/mypage")
@blueprint.route("/authorize")
@db_required
def authorize(db):
if not github.authorized:
#깃허브 승인 페이지로 이동
return redirect(url_for("github.login"))
#승인 후 다시 이 페이지로 돌아와서 깃허브 유저 정보 획득
user = get_gh_user_info()
users_collection = db.users
#이미 회원가입된 사람인지 확인
result = users_collection.find_one({
"username": user["login"]
})
#신규 사용자라면 도큐먼트 생성 후 회원가입 페이지로 이동
if not result:
return redirect(f"/user/create")
#기존 사용자라면 피드로 이동
return redirect("/")
@blueprint.route("/login")
def user_login_view():
return render_template("user/login.html")
``` |
{
"source": "5ebec/Ameyadar",
"score": 3
} |
#### File: src/accounts/AccountEditor.py
```python
import json
from pathlib import Path
from decouple import config
from mastodon import Mastodon
from requests_oauthlib import OAuth1Session
class AccountEditor:
def __init__(self):
self.name = "default"
def get_name(self):
return self.name
def post_name(self, name):
self.name = name
print(name)
return True
class TwitterAccountEditor(AccountEditor):
def __init__(self):
super().__init__()
self.account = OAuth1Session(config("CONSUMER_KEY"),
config("CONSUMER_SECRET"),
config("ACCESS_TOKEN"),
config("ACCESS_TOKEN_SECRET"))
get_url = 'https://api.twitter.com/1.1/account/verify_credentials.json'
self.name = json.loads(self.account.get(get_url).text)['name']
def post_name(self, name):
super().post_name(name)
post_url = 'https://api.twitter.com/1.1/account/' \
'update_profile.json?name=%s' % name
try:
self.account.post(post_url)
except Exception:
raise Exception
return True
class MastodonAccountEditor(AccountEditor):
def __init__(self):
super().__init__()
self.server_url = config("MASTODON_SERVER")
mastodon_data_path = Path(__file__).parents[
2] / "data" / "mastodon"
self.mastodon = Mastodon(
client_id=(mastodon_data_path / "my_clientcred.txt").as_posix(),
access_token=(mastodon_data_path / "my_usercred.txt").as_posix(),
api_base_url=self.server_url
)
self.name = self.mastodon.account_verify_credentials()['username']
def post_name(self, name):
super().post_name(name)
self.mastodon.account_update_credentials(display_name=name)
``` |
{
"source": "5elenay/hyaline",
"score": 3
} |
#### File: hyaline/models/Activity.py
```python
from dataclasses import dataclass
from datetime import datetime
from typing import Union
@dataclass
class Activity:
# Attrs
def __init__(self, json) -> None:
self.name: str = json['name']
self.type: int = json['type']
self.url: Union[str, None] = json['url'] if 'url' in json else None
self.created_at: datetime = datetime.fromtimestamp(json['created_at'])
```
#### File: hyaline/models/AuditLog.py
```python
from dataclasses import dataclass
@dataclass
class AuditLog:
# Attrs
def __init__(self, json, token) -> None:
self.__token: str = token
from .Webhook import Webhook
from .User import User
for key in json:
if key == "webhooks":
setattr(self, key, [Webhook(i, self.__token)
for i in json[key]])
elif key == "users":
setattr(self, key, [User(i, self.__token) for i in json[key]])
else:
setattr(self, key, json[key])
```
#### File: hyaline/models/Member.py
```python
from dataclasses import dataclass
from dateutil.parser import parse
@dataclass
class Member:
# Attrs
def __init__(self, json, token) -> None:
self.__token: str = token
from .User import User
for key in json:
if key == "user":
setattr(self, key, User(json[key], self.__token))
elif key in ("joined_at", "premium_since"):
setattr(self, key, parse(json[key]) if json[key] else None)
else:
setattr(self, key, json[key])
```
#### File: hyaline/models/MessageReference.py
```python
from dataclasses import dataclass
from typing import Union
@dataclass
class MessageReference:
# Attrs
def __init__(self, json) -> None:
self.message_id: Union[str,
None] = json['message_id'] if 'message_id' in json else None
self.channel_id: Union[str,
None] = json['channel_id'] if 'channel_id' in json else None
self.guild_id: Union[str,
None] = json['guild_id'] if 'guild_id' in json else None
self.fail_if_not_exists: Union[bool,
None] = json['fail_if_not_exists'] if 'fail_if_not_exists' in json else None
```
#### File: hyaline/models/Reaction.py
```python
from dataclasses import dataclass
@dataclass
class Reaction:
# Attrs
def __init__(self, json, token) -> None:
self.__token: str = token
from .Emoji import Emoji
self.count: int = json['count']
self.reacted: bool = json['me']
self.emoji: Emoji = Emoji(json['emoji'], self.__token)
```
#### File: hyaline/models/RoleTags.py
```python
from dataclasses import dataclass
from typing import Union
@dataclass
class RoleTags:
# Attrs
def __init__(self, json) -> None:
self.bot_id: Union[str,
None] = json['bot_id'] if 'bot_id' in json else None
self.integration_id: Union[str,
None] = json['integration_id'] if 'integration_id' in json else None
self.premium_subscriber: None = json['premium_subscriber'] if 'premium_subscriber' in json else None
``` |
{
"source": "5elenay/pewn",
"score": 3
} |
#### File: pewn/pewn/http.py
```python
from typing import Union
from pewn.classes import Option, NotSavedData
from pewn.utils import raise_error
from aiohttp import ClientSession
from aiofiles import open as aiopen
from os import makedirs, path
from asyncio import gather
async def download(url: str, option: Option = None, **kwargs) -> Union[str, NotSavedData]:
"""Download data from URL.
Parameters:
url (str): URL for fetch and download.
option (Option): Option object. [Optional]
**kwargs: Settings for aiohttp request.
Returns:
str: Saved path.
NotSavedData: NotSavedData object if you don't add option parameter.
"""
raise_error(url, "url", str)
write_file = False
full_path = None
if option is not None:
raise_error(option, "option", Option)
write_file = True
async with ClientSession(trust_env=True) as session:
async with session.get(url, **kwargs) as response:
data = await response.read()
if write_file:
if not path.isdir(option.folder):
makedirs(option.folder)
full_path = f"{option.folder}/{option.file_name}"
async with aiopen(full_path, mode="wb") as file:
await file.write(data)
return full_path or NotSavedData(data, url)
async def download_multiple(urls: tuple, options: Union[tuple, Option] = None, **kwargs):
"""Download multiple file.
Parameters:
urls (tuple): List of URL that will be downloaded.
options (tuple, Option): List of Option or only one Option object. [Optional]
**kwargs: Settings for aiohttp request.
Returns:
list (str): Saved paths.
list (NotSavedData): List of NotSavedData object if you don't add options parameter.
"""
raise_error(urls, "urls", tuple)
results = ()
if options is not None:
raise_error(options, "option", (tuple, Option))
if isinstance(options, tuple):
results = await gather(*[
download(url, opt, **kwargs) for url, opt in zip(urls, options)
])
elif isinstance(options, Option):
def change_file_name(option: Option, number: int):
splitted_name = option.file_name.split('.')
real_file_name = splitted_name[-2]
real_file_name += f"_{number}"
splitted_name[-2] = real_file_name
return ".".join(splitted_name)
results = await gather(*[
download(url, opt, **kwargs) for url, opt in zip(urls, [Option(file_name=change_file_name(options, i + 1), folder=options.folder) for i, _ in enumerate(urls)])
])
else:
results = await gather(*[
download(url, **kwargs) for url in urls
])
return results
``` |
{
"source": "5elenay/postgrey",
"score": 3
} |
#### File: postgrey/postgrey/utils.py
```python
def raise_error(variable, name, check) -> None:
if isinstance(check, tuple):
if not type(variable) in check:
raise TypeError(
f"Argument '{name}' type must be {' / '.join([i.__name__ if i != None else 'None' for i in check])}, not {type(variable).__name__}.")
else:
if not isinstance(variable, check):
raise TypeError(
f"Argument '{name}' type must be {check.__name__ if check != None else 'None'}, not {type(variable).__name__}.")
def parse_data(data) -> tuple:
d_keys, d_values, d_count = [], [], 1
for key in data.keys():
if not key.startswith("__") and not key.endswith("__"):
d_keys.append(
f"{key} {data.get(f'__{key}__') or '='} ${d_count}")
d_values.append(data.get(key))
d_count += 1
return d_keys, d_values, d_count
``` |
{
"source": "5emu7/roadtofreelancer",
"score": 3
} |
#### File: 5emu7/roadtofreelancer/capture_image.py
```python
import pyscreenshot as ImageGrab
import os
import time
class ScreenGrab:
def __init__(self):
self.name = "sample_{}.png".format(str(int(time.time())))
def screenGrab(self):
self.im = ImageGrab.grab()
self.im.save(os.getcwd() + "\\"+self.name)
if __name__ =='__main__':
m = ScreenGrab()
print(m.name)
m.screenGrab()
``` |
{
"source": "5enxia/parallel-krylov",
"score": 2
} |
#### File: v1/processes/common.py
```python
import numpy as np
import scipy
from numpy.linalg import norm
from mpi4py import MPI
from ..common import _start, _end
def start(method_name='', k=None):
_start(method_name, k)
return MPI.Wtime()
def end(start_time, isConverged, num_of_iter, final_residual, final_k=None):
elapsed_time = MPI.Wtime() - start_time
_end(elapsed_time, isConverged, num_of_iter, final_residual, final_k)
return elapsed_time
def krylov_base_start():
return MPI.Wtime()
def krylov_base_finish(start_time):
return MPI.Wtime() - start_time
def init_mpi():
comm = MPI.COMM_WORLD
return comm, comm.Get_rank(), comm.Get_size()
def init_gpu(rank):
import cupy as cp
num_of_gpu = cp.cuda.runtime.getDeviceCount()
cp.cuda.Device(rank % num_of_gpu).use()
pool = cp.cuda.MemoryPool(cp.cuda.malloc_managed)
cp.cuda.set_allocator(pool.malloc)
return num_of_gpu
def init(A, b, num_of_process, T, pu):
"""[summary]
Args:
A(
numpy.ndarray,
cupy.ndarray,
scipy.sparse.csr.csr_matrix,
cupyx.scipy.sparse.csr.csr_matrix
): [係数行列]
b(
numpy.ndarray,
cupy.ndarray,
): [右辺ベクトル]
num_of_process(int): [mpiプロセス数]
T ([type]): [精度]
"""
old_N = b.size
num_of_append = num_of_process - (old_N % num_of_process) # 足りない行を計算
num_of_append = 0 if num_of_append == num_of_process else num_of_append
N = old_N + num_of_append
local_N = N // num_of_process
x = None
b_norm = None
if pu == 'cpu':
if isinstance(A, np.ndarray):
if num_of_append:
A = np.append(A, np.zeros((old_N, num_of_append)), axis=1) # 右に0を追加
A = np.append(A, np.zeros((num_of_append, N)), axis=0) # 下に0を追加
elif isinstance(A, scipy.sparse.csr.csr_matrix):
from scipy.sparse import hstack, vstack, csr_matrix
if num_of_append:
A = hstack([A, csr_matrix((old_N, num_of_append))], 'csr') # 右にemptyを追加
A = vstack([A, csr_matrix((num_of_append, N))], 'csr') # 下にemptyを追加
if num_of_append:
b = np.append(b, np.zeros(num_of_append)) # 0を追加
x = np.zeros(N, T)
b_norm = np.linalg.norm(b)
else:
import cupy as cp
import cupyx
if isinstance(A, np.ndarray):
if num_of_append:
A = cp.array(A)
A = cp.append(A, cp.zeros((old_N, num_of_append)), axis=1) # 右に0を追加
A = cp.append(A, cp.zeros((num_of_append, N)), axis=0) # 下に0を追加
elif isinstance(A, scipy.sparse.csr.csr_matrix):
from cupyx.scipy.sparse import hstack, vstack, csr_matrix
if num_of_append:
A = csr_matrix(A)
A = hstack([A, csr_matrix((old_N, num_of_append))], 'csr') # 右にemptyを追加
A = vstack([A, csr_matrix((num_of_append, N))], 'csr') # 下にemptyを追加
if num_of_append:
b = cp.append(b, cp.zeros(num_of_append)) # 0を追加
x = cp.zeros(N, T)
b_norm = cp.linalg.norm(b)
max_iter = old_N * 2
residual = np.zeros(max_iter+1, T)
num_of_solution_updates = np.zeros(max_iter+1, np.int)
num_of_solution_updates[0] = 0
return A, b, x,\
b_norm, N, local_N, max_iter, residual, num_of_solution_updates
```
#### File: v2/gpu/common.py
```python
import time
import numpy as np
import scipy
import cupy as cp
from cupy.cuda import Device
from ..common import _start, _finish
# 計測開始
def start(method_name: str = '', k: int = None) -> float:
_start(method_name, k)
return time.perf_counter()
# 計測終了
def finish(start_time: float, isConverged: bool, num_of_iter: int, final_residual: float, final_k: int = None) -> float:
elapsed_time = time.perf_counter() - start_time
_finish(elapsed_time, isConverged, num_of_iter, final_residual, final_k)
return elapsed_time
# パラメータの初期化
def init(A, b, T, num_of_thread):
# 追加する要素数を算出
old_N = b.size
num_of_append: int = num_of_thread - (old_N % num_of_thread) # 足りない行を計算
num_of_append = 0 if num_of_append == num_of_thread else num_of_append
N: int = old_N + num_of_append
# A
if num_of_append:
# データをパディングする
if isinstance(A, np.ndarray):
if num_of_append:
A = np.append(A, np.zeros((old_N, num_of_append)), axis=1) # 右に0を追加
A = np.append(A, np.zeros((num_of_append, N)), axis=0) # 下に0を追加
elif isinstance(A, scipy.sparse.csr.csr_matrix):
from scipy.sparse import hstack, vstack, csr_matrix
if num_of_append:
A = hstack([A, csr_matrix((old_N, num_of_append))], 'csr') # 右にemptyを追加
A = vstack([A, csr_matrix((num_of_append, N))], 'csr') # 下にemptyを追加
# b
b = cp.array(b, T)
if num_of_append:
b = cp.append(b, cp.zeros(num_of_append)) # 0を追加
b_norm = cp.linalg.norm(b)
# x
x = cp.zeros(N, T)
# その他パラメータ
max_iter = old_N
residual = cp.zeros(max_iter+16, T)
num_of_solution_updates = cp.zeros(max_iter+16, np.int)
num_of_solution_updates[0] = 0
return A, b, x, b_norm, N, max_iter, residual, num_of_solution_updates
class MultiGpu(object):
# numbers
begin: int = 0
end: int = 0
num_of_gpu: int = 0
# dimentinal size
N: int = 0
local_N: int = 0
# matrix
A: list = []
# vector
x: list = []
y: list = []
out: np.ndarray = None
# byte size
nbytes: int = 0
local_nbytes: int = 0
# gpu stream
streams = None
# GPUの初期化
@classmethod
def init_gpu(cls, begin: int, end: int):
cls.begin = begin
cls.end = end
cls.num_of_gpu = end - begin + 1
cls.streams = [None] * cls.num_of_gpu
# init memory allocator
for i in range(cls.begin, cls.end+1):
Device(i).use()
pool = cp.cuda.MemoryPool(cp.cuda.malloc_managed)
cp.cuda.set_allocator(pool.malloc)
cls.streams[i-begin] = cp.cuda.Stream()
# Enable P2P
for j in range(4):
if i == j:
continue
cp.cuda.runtime.deviceEnablePeerAccess(j)
# メモリー領域を確保
@classmethod
def alloc(cls, A, b, T):
# dimentional size
cls.N = b.size
cls.local_N = cls.N // cls.num_of_gpu
# byte size
cls.nbytes = b.nbytes
cls.local_nbytes = b.nbytes // cls.num_of_gpu
# init list
cls.A = [None] * cls.num_of_gpu
cls.x = [None] * cls.num_of_gpu
cls.y = [None] * cls.num_of_gpu
# divide single A -> multi local_A
# allocate x, y
for i in range(cls.begin, cls.end+1):
Device(i).use()
index = i-cls.begin
# npy
if isinstance(A, np.ndarray):
cls.A[index] = cp.array(A[i*cls.local_N:(i+1)*cls.local_N], T)
# npz
elif isinstance(A, scipy.sparse.csr.csr_matrix):
from cupyx.scipy.sparse import csr_matrix
cls.A[index] = csr_matrix(A[i*cls.local_N:(i+1)*cls.local_N])
cls.x[index] = cp.zeros(cls.N, T)
cls.y[index] = cp.zeros(cls.local_N, T)
# init out vector
cls.out = cp.zeros(cls.N, T)
# マルチGPUを用いた行列ベクトル積
@classmethod
def dot(cls, A, x):
# Copy vector data to All devices
for i in range(cls.begin, cls.end+1):
Device(i).use()
index = i-cls.begin
# cp.cuda.runtime.memcpyPeer(cls.x[index].data.ptr, i, x.data.ptr, cls.begin, cls.nbytes)
cp.cuda.runtime.memcpyPeerAsync(cls.x[index].data.ptr, i, x.data.ptr, cls.end, cls.nbytes, cls.streams[index].ptr)
# dot
cls.y[index] = cls.A[index].dot(cls.x[index])
# Gather caculated element from All devices
for i in range(cls.begin, cls.end+1):
index = i-cls.begin
# cp.cuda.runtime.memcpyPeer(cls.out[index*cls.local_N].data.ptr, cls.begin, cls.y[index].data.ptr, i, cls.y[index].nbytes)
cp.cuda.runtime.memcpyPeerAsync(cls.out[index*cls.local_N].data.ptr, cls.end, cls.y[index].data.ptr, i, cls.y[index].nbytes, cls.streams[index].ptr)
# sync
for i in range(cls.begin, cls.end+1):
index = i-cls.begin
cls.streams[index].synchronize()
# Device(i).synchronize()
# return
return cls.out
```
#### File: gpu/mpi/cg.py
```python
import cupy as cp
from cupy import dot
from cupy.linalg import norm
from mpi4py import MPI
from .common import start, finish, init, MultiGpu, init_mpi, calc_alloc_gpu
def cg(A, b, epsilon, T):
# MPI
# rank 0-7
# num_of_process = 8
comm, rank, num_of_process = init_mpi()
# GPU初期化
begin, end = calc_alloc_gpu(rank, num_of_process)
MultiGpu.init_gpu(begin, end, num_of_process)
# 初期化
local_A, b, x, b_norm, N, max_iter, residual, num_of_solution_updates = init(A, b, T, rank, num_of_process, 16)
MultiGpu.alloc(local_A, b, T)
Ax = cp.zeros(N, T)
v = cp.zeros(N, T)
# 初期残差
comm.Allgather(MultiGpu.dot(local_A, x), Ax)
r = b - Ax
p = r.copy()
gamma = dot(r, r)
# 反復計算
i = 0
if rank == 0:
start_time = start(method_name='cg + gpu + mpi')
while i < max_iter:
# 収束判定
residual[i] = norm(r) / b_norm
if residual[i] < epsilon:
isConverged = True
break
# 解の更新
comm.Allgather(MultiGpu.dot(local_A, p), v)
sigma = dot(p, v)
alpha = gamma / sigma
x += alpha * p
r -= alpha * v
old_gamma = gamma.copy()
gamma = dot(r, r)
beta = gamma / old_gamma
p = r + beta * p
i += 1
num_of_solution_updates[i] = i
else:
isConverged = False
if rank == 0:
elapsed_time = finish(start_time, isConverged, i, residual[i])
return elapsed_time, num_of_solution_updates[:i+1], residual[:i+1]
else:
exit(0)
```
#### File: cpu/mpi/kskipcg.py
```python
import numpy as np
from numpy import float64, dot
from numpy.linalg import norm
from .common import start, finish, init, MultiCpu
def kskipcg(comm, local_A, b, x=None, tol=1e-05, maxiter=None, k=0, M=None, callback=None, atol=None) -> tuple:
# MPI初期化
rank = comm.Get_rank()
MultiCpu.joint_mpi(comm)
# 初期化
T = float64
x, maxiter, b_norm, N, residual, num_of_solution_updates = init(
b, x, maxiter)
MultiCpu.alloc(local_A, T)
Ax = np.zeros(N, T)
Ar = np.zeros((k + 2, N), T)
Ap = np.zeros((k + 3, N), T)
a = np.zeros(2*k + 2, T)
f = np.zeros(2*k + 4, T)
c = np.zeros(2*k + 2, T)
# 初期残差
MultiCpu.dot(local_A, x, out=Ax)
Ar[0] = b - Ax
Ap[0] = Ar[0].copy()
# 反復計算
i = 0
index = 0
if rank == 0:
start_time = start(method_name='k-skip CG + MPI', k=k)
while i < maxiter:
# 収束判定
residual[index] = norm(Ar[0]) / b_norm
if residual[index] < tol:
isConverged = True
break
# 基底計算
for j in range(1, k + 1):
MultiCpu.dot(local_A, Ar[j-1], out=Ar[j])
for j in range(1, k + 2):
MultiCpu.dot(local_A, Ap[j-1], out=Ap[j])
# 係数計算
for j in range(2 * k + 1):
jj = j // 2
a[j] = dot(Ar[jj], Ar[jj + j % 2])
for j in range(2 * k + 4):
jj = j // 2
f[j] = dot(Ap[jj], Ap[jj + j % 2])
for j in range(2 * k + 2):
jj = j // 2
c[j] = dot(Ar[jj], Ap[jj + j % 2])
# CGでの1反復
# 解の更新
alpha = a[0] / f[1]
beta = alpha ** 2 * f[2] / a[0] - 1
x += alpha * Ap[0]
Ar[0] -= alpha * Ap[1]
Ap[0] = Ar[0] + beta * Ap[0]
MultiCpu.dot(local_A, Ap[0], out=Ap[1])
# CGでのk反復
for j in range(k):
for l in range(0, 2*(k-j)+1):
a[l] += alpha*(alpha*f[l+2] - 2*c[l+1])
d = c[l] - alpha*f[l+1]
c[l] = a[l] + d*beta
f[l] = c[l] + beta*(d + beta*f[l])
# 解の更新
alpha = a[0] / f[1]
beta = alpha ** 2 * f[2] / a[0] - 1
x += alpha * Ap[0]
Ar[0] -= alpha * Ap[1]
Ap[0] = Ar[0] + beta * Ap[0]
MultiCpu.dot(local_A, Ap[0], out=Ap[1])
i += (k + 1)
index += 1
num_of_solution_updates[index] = i
else:
isConverged = False
residual[index] = norm(Ar[0]) / b_norm
if rank == 0:
elapsed_time = finish(start_time, isConverged, i, residual[index])
info = {
'time': elapsed_time,
'nosl': num_of_solution_updates[:index+1],
'residual': residual[:index+1],
}
return x, info
else:
exit(0)
```
#### File: gpu/mpi/common.py
```python
import os
import numpy as np
import scipy
import cupy as cp
from cupy.cuda import Device
from cupy.cuda.runtime import getDeviceCount
from mpi4py import MPI
# import socket
from ..common import _start, _finish
# 計測開始
def start(method_name='', k=None):
_start(method_name, k)
return MPI.Wtime()
# 計測終了
def finish(start_time, isConverged, num_of_iter, final_residual, final_k=None):
elapsed_time = MPI.Wtime() - start_time
_finish(elapsed_time, isConverged, num_of_iter, final_residual, final_k)
return elapsed_time
# パラメータの初期化
def init(b, x=None, maxiter=None) -> tuple:
T = np.float64
b = cp.array(b)
b_norm = cp.linalg.norm(b)
N = b.size
if isinstance(x, np.ndarray):
x = cp.array(x)
else:
x = cp.zeros(N, dtype=T)
if maxiter == None:
maxiter = N
residual = cp.zeros(maxiter+1, T)
num_of_solution_updates = cp.zeros(maxiter+1, np.int)
return b, x, maxiter, b_norm, N, residual, num_of_solution_updates
class MultiGpu(object):
# numbers
begin: int = 0
end: int = 0
num_of_gpu: int = 0
num_of_process: int = 0
# dimentinal size
N: int = 0
local_N: int = 0
local_local_N: int = 0
# matrix
A: list = []
# vector
x: list = []
y: list = []
out: np.ndarray = None
# byte size
nbytes: int = 0
local_nbytes: int = 0
local_local_nbytes: int = 0
# mpi
comm = None
# gpu stream
streams = None
# GPUの初期化
@classmethod
def init(cls):
# ip = socket.gethostbyname(socket.gethostname())
# rank = os.environ['MV2_COMM_WORLD_RANK']
# local_rank = os.environ['MV2_COMM_WORLD_LOCAL_RANK']
if os.environ.get('GPU_IDS') != None:
ids = os.environ['GPU_IDS'].split(',')
cls.begin = int(ids[0])
cls.end = int(ids[-1])
else:
cls.begin = 0
cls.end = getDeviceCount() - 1
cls.num_of_gpu = cls.end - cls.begin + 1
cls.streams = [None] * cls.num_of_gpu
# init memory allocator
for i in range(cls.begin, cls.end+1):
Device(i).use()
pool = cp.cuda.MemoryPool(cp.cuda.malloc_managed)
cp.cuda.set_allocator(pool.malloc)
cls.streams[i-cls.begin] = cp.cuda.Stream(non_blocking=False)
# Enable P2P
for j in range(4):
if i == j:
continue
cp.cuda.runtime.deviceEnablePeerAccess(j)
# メモリー領域を確保
@classmethod
def alloc(cls, local_A, b, T):
# dimentional size
cls.local_N, cls.N = local_A.shape
cls.local_local_N = cls.local_N // cls.num_of_gpu
# byte size
cls.nbytes = b.nbytes
cls.local_nbytes = cls.nbytes // cls.num_of_process
cls.local_local_nbytes = cls.local_nbytes // cls.num_of_gpu
# init list
cls.A = [None] * cls.num_of_gpu
cls.x = [None] * cls.num_of_gpu
cls.y = [None] * cls.num_of_gpu
# divide single A -> multi local_A
# allocate x, y
for i in range(cls.begin, cls.end+1):
Device(i).use()
index = i-cls.begin
# local_Aは1/8
begin, end = index*cls.local_local_N, (index+1)*cls.local_local_N
# npy
if isinstance(local_A, np.ndarray):
cls.A[index] = cp.array(local_A[begin:end], T)
# npz
elif isinstance(local_A, scipy.sparse.csr.csr_matrix):
from cupyx.scipy.sparse import csr_matrix
cls.A[index] = csr_matrix(local_A[begin:end])
cls.x[index] = cp.zeros(cls.N, T)
cls.y[index] = cp.zeros(cls.local_local_N, T)
# init out vector
cls.out = cp.zeros(cls.local_N, T)
# マルチGPUを用いた行列ベクトル積
@classmethod
def dot(cls, local_A, x, out):
# Copy vector data to All devices
for i in range(cls.begin, cls.end+1):
# Device(i).use()
index = i-cls.begin
# cp.cuda.runtime.memcpyPeerAsync(cls.x[index].data.ptr, i, x.data.ptr, cls.end, cls.nbytes, cls.streams[index].ptr)
cp.cuda.runtime.memcpyPeer(cls.x[index].data.ptr, i, x.data.ptr, cls.end, cls.nbytes)
# dot
for i in range(cls.begin, cls.end+1):
index = i-cls.begin
Device(i).use()
# cls.streams[index].synchronize()
cls.y[index] = cls.A[index].dot(cls.x[index])
# Gather caculated element from All devices
for i in range(cls.begin, cls.end+1):
Device(i).synchronize()
index = i-cls.begin
# cp.cuda.runtime.memcpyPeerAsync(cls.out[index*cls.local_local_N].data.ptr, cls.end, cls.y[index].data.ptr, i, cls.local_local_nbytes, cls.streams[index].ptr)
cp.cuda.runtime.memcpyPeer(cls.out[index*cls.local_local_N].data.ptr, cls.end, cls.y[index].data.ptr, i, cls.y[index].nbytes)
# # sync
# for i in range(cls.begin, cls.end+1):
# index = i-cls.begin
# cls.streams[index].synchronize()
cls.comm.Allgather(cls.out, out)
# return
return out
# joint comm
@classmethod
def joint_mpi(cls, comm):
cls.comm = comm
cls.num_of_process = comm.Get_size()
``` |
{
"source": "5fcgdaeb/hexagonal_python",
"score": 3
} |
#### File: secondary_ports/rawforecastrepository/RawForecastRepository.py
```python
import abc
class RawForecastRepository(object):
@abc.abstractmethod
def upload_raw_reading_list(self, for_run):
raise NotImplementedError(f"Class {self.__class__.__name__} doesnt implement upload_raw_reading_list method.")
@abc.abstractmethod
def upload_compiled_forecast_file_of_location_id_and_day_id(self, forecast_file, measurement_location_id, day_id):
raise NotImplementedError(f"Class {self.__class__.__name__} doesnt implement upload_compiled_forecast_file_of_location_id_and_day_id method.")
@abc.abstractmethod
def prepare_raw_forecasts_of_locations_for_base_hour(self, measurement_location_ids, targetedCrawlIDOfBaseHour):
raise NotImplementedError(f"Class {self.__class__.__name__} doesnt implement prepare_raw_forecasts_of_locations_for_base_hour method.")
@abc.abstractmethod
def prepare_forecasts_of_location_id_and_crawl_id_and_distance(self, measurement_location_id, crawlID, distance):
raise NotImplementedError(f"Class {self.__class__.__name__} doesnt implement prepare_forecasts_of_location_id_and_crawl_id_and_distance method.")
@abc.abstractmethod
def prepare_forecasts_for_all_possible_distances_of_location_id_and_crawl_id(self, measurement_location_id, crawl_id):
raise NotImplementedError(f"Class {self.__class__.__name__} doesnt implement prepare_forecasts_for_all_possible_distances_of_location_id_and_crawl_id method.")
``` |
{
"source": "5foot8/maho-contrail",
"score": 3
} |
#### File: maho-contrail/maho/cli.py
```python
import argparse
import multiprocessing
import queue
import time
import threading
from maho.camera import IPCamera
from maho.adsb import Dump1090
from maho.util import AzimuthAltitudeDistance
from time import sleep
from metar import Metar
#from hanging_threads import start_monitoring
import numpy as np
import cv2
#import subprocess
import pylab
import os
import sys
import math
#import faulthandler
#faulthandler.enable()
#start_monitoring(seconds_frozen=10, test_interval=100)
timeNow = time.strftime("%Y%m%d-%H%M%S")
def restart_program():
print("Restarting")
time.sleep(0)
python = sys.executable
os.execl(sys.executable, 'python', __file__, *sys.argv[1:])
min_height = 1000
def camera_control(camera_host, camera_port, camera_user, camera_pass, q):
"""Control a maho.Camera based on inputs from a multiprocessing queue"
On startup this function will place the stream_url in the queue if camera
communication works.
If it fails the exception for why will be placed in the queue before exiting
"""
try:
camera = IPCamera(camera_host, camera_port, camera_user, camera_pass)
q.put(camera.get_rtsp_url())
except RuntimeError as exc:
q.put(exc)
try:
while True:
camera.move_to(*q.get())
except KeyboardInterrupt:
pass
def track_closest_aircraft(latitude, longitude, elevation, host, port, q):
"""Forward adsb messages to a Queue
Args:
host (str): The dump1090 host
port (int): The dump1090 port
q (queue): Messages will be placed in this queue
On startup this function will place True in the queue if dump1090 starts properly
If it fails the exception will be placed in the queue before exiting
"""
try:
d = Dump1090(host, port)
q.put(True)
except IOError as exc:
q.put(exc)
return
target = None
target_distance = None
frame_count = 0
aad = AzimuthAltitudeDistance(latitude, longitude, elevation)
try:
for aircraft in d.updates():
lat, lng = aircraft.position
azimuth, altitude, distance = aad.calculate(
lat,
lng,
aircraft.altitude
)
if aircraft.position[0] and lat > latitude:
northSouth = ('N')
else:
northSouth = ('S')
# if we don't have a target we do now
# or target is old, then use this new aircraft
# or new aircraft isn't the target, but it is closer, so we switch!
# make sure aircraft swaps between cameras as it passes over
# so the correct camera is pointing at the aircraft, if this is not
# here the camera continues to follow until an aircraft is closer.
# if you are using only one camera you dont require this section
my_lat = 53.43450
timeNow = time.strftime("%Y%m%d-%H%M%S")
if latitude >= my_lat and target is None:
pass
elif latitude >= my_lat > lat and target.icao == aircraft.icao:
target = None
else:
pass
if latitude <= my_lat and target is None:
pass
elif latitude <= my_lat < lat and target.icao == aircraft.icao:
target = None
else:
pass
# Mondified code to split between the North and South facing cameras.
#North facing camera:
above = (min_height * 0.3048)
if (latitude >= my_lat) and (aircraft.altitude >= (30000 * 0.3048)) or (aircraft.position[0] < latitude):
if (target is None or target.age > 20 or target.icao != aircraft.icao and distance < target_distance) \
and northSouth is ('N') and (altitude > 15):
target = aircraft
elif aircraft and target is None:
pass
else:
#print('.', end='')
#print ((timeNow), end='\r', flush=True))
#print(min_height * 0.3048)
print(time.ctime(), (above), end="\r", flush=True)
pass
#South facing camera:
frame_count = 0
if (latitude <= my_lat) and (aircraft.altitude >= (30000 * 0.3048)) or (aircraft.position[0] > latitude):
if (target is None or target.age > 20 or target.icao != aircraft.icao and distance < target_distance) \
and northSouth is ('S') and (altitude > 15):
target = aircraft
elif aircraft and target is None:
pass
else:
#print (timeNow(), end='\r', flush=True)
print(time.ctime(), (above), end="\r", flush=True)
#pass#'''
'''tracker.terminate()
tracker.join()
camera.terminate()
camera.join()
cap.release()
cv2.destroyAllWindows()'''
pass
#if frame_count > 50:
# restart_program()
else:
pass
# if we aren't the target at this point then bail
old_distance = 0
if target != aircraft:
continue
target = aircraft
target_distance = distance
if old_distance == distance or altitude < 10:
pass
else:
old_distance = distance
q.put((target, azimuth, altitude, distance))
except KeyboardInterrupt:
pass
def go_maho(
latitude,
longitude,
elevation,
camera_host,
camera_port,
camera_user,
camera_pass,
adsb_host,
adsb_port,
min_height
):
# fork a process to communicate with dump1090
targets = multiprocessing.Queue()
tracker = multiprocessing.Process(
target=track_closest_aircraft,
args=(latitude, longitude, elevation, adsb_host, adsb_port, targets,)
)
tracker.start()
# fist thing in the queue will be startup status
# True if good
# an Exception if bad
status = targets.get()
if isinstance(status, Exception):
raise RuntimeError("Unable to connect to dump1090 on {}:{}: {}".format(
adsb_host,
adsb_port,
status
))
# run camera control in own process as moving the camera can block for seconds
camera_queue = multiprocessing.Queue()
camera = multiprocessing.Process(
target=camera_control,
args=(camera_host, camera_port, camera_user, camera_pass, camera_queue,)
)
camera.start()
# fist thing in the queue will be startup status
# Stream URL if good
# an Exception if bad
stream_url = camera_queue.get()
if isinstance(stream_url, Exception):
raise RuntimeError("Unable to connect to camera on {}:{}: {}".format(
camera_host,
camera_port,
stream_url
))
cap = cv2.VideoCapture(stream_url)
ret, frame = cap.read()
#cv2.namedWindow("maho")
#orb = cv2.ORB_create()
# build a mask that's the center of the frame
# we'll focus searching for aircraft in this region
search_mask = np.zeros((frame.shape[0], frame.shape[1], 1), dtype=np.uint8)
cx = frame.shape[1] / 2
cy = frame.shape[0] / 2
size = 0.3
search_rect = (
(int(cy - (cy * size)), int(cx - (cx * size))),
(int(cy + (cy * size)), int(cx + (cx * size)))
)
# openCV UI main loops
start = None
end = None
fps = 0
elapsed = 0
target = None
last_target = None
try:
while True:
start = time.time()
#'''
# fill our mask back to full, we may have chopped it smaller on the last frame
search_mask[
search_rect[0][0]:search_rect[0][1],
search_rect[1][0]:search_rect[1][1]
] = 255#'''
# grab a frame from the camera
ret, frame = cap.read()
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# check for new / updated target info
try:
target, azimuth, altitude, distance = targets.get(False)
if last_target is None or target.icao != last_target.icao:
last_target = target
print("Now tracking {} / {} - Distance: {}m Time: {}".format(
target.icao,
target.callsign,
int(distance),
time.strftime("%Y%m%d-%H%M%S")
))
print("{} | azi: {:.3f}, alt: {:.3f}".format(
target,
azimuth,
altitude,
#int(distance)
))
'''print("{} | azi: {:.3f}, alt: {:.3f}, dist: {}m.".format('''
camera_queue.put((azimuth, altitude))
except queue.Empty:
if target is None:
camera_queue.put((0,0))
else:
pass
# annotate the frame
if target:
cv2.putText(
frame,
target.callsign or target.icao,
(0, 50),
cv2.FONT_HERSHEY_DUPLEX,
2,
(255, 255, 255),
4,
cv2.LINE_AA
)
txt = "{0:.3f}, {1:.3f} @ {2:.0f}m (dist: {3:.0f}m)".format(
target.position[0],
target.position[1],
target.altitude,
distance
)
cv2.putText(
frame,
txt,
(10, 75),
cv2.FONT_HERSHEY_SIMPLEX,
.5,
(255, 255, 255),
1,
cv2.LINE_AA
)
cv2.rectangle(frame, search_rect[0][::-1], search_rect[1][::-1], (0, 0, 255), 2)
'''
kp = orb.detect(gray, search_mask)
kp, des = orb.compute(gray, kp)
cv2.drawKeypoints(frame, kp, frame, color=(0, 255, 0), flags=0)#'''
cv2.putText(
frame,
"Camera Position: Az: {:.0f}, Alt: {:.0f}".format(azimuth, altitude),
(10, 100),
cv2.FONT_HERSHEY_SIMPLEX,
.5,
(255, 255, 255),
1,
cv2.LINE_AA
)
# different ways of displaying the final output.
# uncomment to activate, not sure if you cannot have more than three active.
my_lat = 53.43450
if latitude >= my_lat:
window_name = ('<NAME>')
else:
window_name = ('<NAME>')
# display it
# small = cv2.resize(frame, (0,0), fx=1, fy=1)
# small = frame
# small = cv2.resize(frame, (0,0), width, height)
# cv2.imshow('maho', frame)
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
# cv2.setWindowProperty('Maho', cv2.WINDOW_FULLSCREEN)
# cv2.setWindowProperty('Maho', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# cv2.setWindowProperty('Maho', cv2.WINDOW_AUTOSIZE, cv2.WINDOW_FULLSCREEN)
cv2.setWindowProperty(window_name, cv2.WINDOW_AUTOSIZE, cv2.WND_PROP_FULLSCREEN)
# cv2.namedWindow('Maho', cv2.WND_PROP_FULLSCREEN)
# cv2.resizeWindow('Maho', 1000, 700)
cv2.imshow(window_name, frame)
# handle input
keypress = cv2.waitKey(1) & 0xFF
end = time.time()
elapsed = int((end - start) * 1000)
if elapsed == 0:
pass
else:
fps = int(1000 / elapsed)
if keypress == ord('q'):
raise KeyboardInterrupt
def saveScreen():
timestr = time.strftime("%Y%m%d-%H%M%S")
cv2.imwrite('MC' + timestr + '-' + target.icao + '.png', frame)
# cv2.imwrite('MC' + timestr + '.png', frame)
print("Image saved MC" + timestr)
if keypress == ord('s'):
saveScreen()
if keypress == ord('r'):
restart_program()
if keypress == ord('+'):
min_height += 1000
elif keypress == ord('-'):
min_height -= 1000
except KeyboardInterrupt:
tracker.terminate()
tracker.join()
camera.terminate()
camera.join()
cap.release()
cv2.destroyAllWindows()
def main():
parser = argparse.ArgumentParser(
prog='maho',
description='ADS-B asdisted aircraft spotting',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--latitude', type=float, required=True, help='Latitude of the camera')
parser.add_argument('--longitude', type=float, required=True, help='Longitude of the camera')
parser.add_argument('--elevation', type=float, required=True, help='Elevation of the camera')
parser.add_argument('--camera-host', type=str, required=True, help='Camera hostname/ip')
parser.add_argument('--camera-port', type=int, default=80, help='Camera port')
parser.add_argument('--camera-user', type=str, required=True, help='Camera username')
parser.add_argument('--camera-pass', type=str, required=True, help='Camera password')
parser.add_argument('--adsb-host', type=str, default='localhost', help='dump1090 hostname/ip')
parser.add_argument('--adsb-port', type=int, default=30002, help='dump1090 TCP raw output port')
parser.add_argument('--min-height', type=float, default=30000, help='Minimum Altitude ft')
args = parser.parse_args()
try:
go_maho(
args.latitude,
args.longitude,
args.elevation,
args.camera_host,
args.camera_port,
args.camera_user,
args.camera_pass,
args.adsb_host,
args.adsb_port,
args.min_height
)
except KeyboardInterrupt:
pass
except RuntimeError as exc:
parser.error(exc)
raise SystemExit
if __name__ == "__main__":
main()
``` |
{
"source": "5G-Center-Chinatelecom/vpp",
"score": 2
} |
#### File: urpf/test/test_urpf.py
```python
import unittest
from framework import VppTestCase, VppTestRunner
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.inet6 import IPv6
from vpp_papi import VppEnum
N_PKTS = 63
class TestURPF(VppTestCase):
""" Unicast Reverse Path Forwarding Test Case """
@classmethod
def setUpClass(cls):
super(TestURPF, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestURPF, cls).tearDownClass()
def setUp(self):
super(TestURPF, self).setUp()
# create 4 pg interfaces so there are a few addresses
# in the FIB
self.create_pg_interfaces(range(4))
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
i.config_ip6()
i.resolve_ndp()
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.admin_down()
super(TestURPF, self).tearDown()
def test_urpf4(self):
""" uRPF IP4 """
e = VppEnum
p_spoof_loose = (Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
IP(src="3.3.3.3", dst=self.pg1.remote_ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)) * N_PKTS
p_spoof_strict = (Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
IP(src=self.pg2.remote_ip4,
dst=self.pg1.remote_ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)) * N_PKTS
p_good = (Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
IP(src=self.pg0.remote_ip4,
dst=self.pg1.remote_ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)) * N_PKTS
#
# before adding the uRPF, ensure all packets are forwarded
#
self.send_and_expect(self.pg0, p_good, self.pg1)
self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
self.send_and_expect(self.pg0, p_spoof_loose, self.pg1)
#
# apply loose uRPF check on pg0 rx
#
self.vapi.urpf_update(is_input=True,
mode=e.vl_api_urpf_mode_t.URPF_API_MODE_LOOSE,
af=e.vl_api_address_family_t.ADDRESS_IP4,
sw_if_index=self.pg0.sw_if_index)
# good packets still pass
self.send_and_expect(self.pg0, p_good, self.pg1)
# packets from address for which there is a route are forwarded
self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
# packets from address to which there is no route are dropped
self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
self.assert_error_counter_equal("ip4-rx-urpf-loose", N_PKTS)
#
# crank it up to strict mode
#
self.vapi.urpf_update(is_input=True,
mode=e.vl_api_urpf_mode_t.URPF_API_MODE_STRICT,
af=e.vl_api_address_family_t.ADDRESS_IP4,
sw_if_index=self.pg0.sw_if_index)
# good packets still pass
self.send_and_expect(self.pg0, p_good, self.pg1)
# packets that would not be routed back thru pg0 are dropped
self.send_and_assert_no_replies(self.pg0, p_spoof_strict)
self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
self.assert_error_counter_equal("ip4-rx-urpf-strict", 2 * N_PKTS)
#
# disable uRPF, all traffic should pass
#
self.vapi.urpf_update(is_input=True,
mode=e.vl_api_urpf_mode_t.URPF_API_MODE_OFF,
af=e.vl_api_address_family_t.ADDRESS_IP4,
sw_if_index=self.pg0.sw_if_index)
self.send_and_expect(self.pg0, p_good, self.pg1)
self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
self.send_and_expect(self.pg0, p_spoof_loose, self.pg1)
#
# Now apply in the TX direction
# for loose it is the same deal, they should not be forwarded
# if there's no route
# for strict they should not be forwarded if they would be
# forwarded thru that interface.
#
self.vapi.urpf_update(is_input=False,
mode=e.vl_api_urpf_mode_t.URPF_API_MODE_LOOSE,
af=e.vl_api_address_family_t.ADDRESS_IP4,
sw_if_index=self.pg1.sw_if_index)
self.send_and_expect(self.pg0, p_good, self.pg1)
self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
self.assert_error_counter_equal("ip4-tx-urpf-loose", N_PKTS)
self.vapi.urpf_update(is_input=False,
mode=e.vl_api_urpf_mode_t.URPF_API_MODE_STRICT,
af=e.vl_api_address_family_t.ADDRESS_IP4,
sw_if_index=self.pg1.sw_if_index)
self.send_and_expect(self.pg0, p_good, self.pg1)
# the strict packet, from a peer is allowed, since it does
# not forward via pg1
self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
self.assert_error_counter_equal("ip4-tx-urpf-strict", N_PKTS)
# change the strict packet so that it would forward through pg1
p_spoof_strict = (Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
IP(src=self.pg1.remote_ip4,
dst=self.pg1.remote_ip4) /
UDP(sport=1234, dport=1234) /
Raw(b'\xa5' * 100)) * N_PKTS
self.send_and_assert_no_replies(self.pg0, p_spoof_strict)
self.assert_error_counter_equal("ip4-tx-urpf-strict", 2 * N_PKTS)
# cleanup
self.vapi.urpf_update(is_input=False,
mode=e.vl_api_urpf_mode_t.URPF_API_MODE_OFF,
af=e.vl_api_address_family_t.ADDRESS_IP4,
sw_if_index=self.pg1.sw_if_index)
def test_urpf6(self):
""" uRPF IP6 """
e = VppEnum
p_spoof_loose = (Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
IPv6(src="fc00:e968:6179::de52:7100", dst=self.pg1.remote_ip6) /
UDP(sport=1236, dport=1236) /
Raw(b'\xa5' * 100)) * N_PKTS
p_spoof_strict = (Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
IPv6(src=self.pg2.remote_ip6,
dst=self.pg1.remote_ip6) /
UDP(sport=1236, dport=1236) /
Raw(b'\xa5' * 100)) * N_PKTS
p_good = (Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
IPv6(src=self.pg0.remote_ip6,
dst=self.pg1.remote_ip6) /
UDP(sport=1236, dport=1236) /
Raw(b'\xa5' * 100)) * N_PKTS
#
# before adding the uRPF, ensure all packets are forwarded
#
self.send_and_expect(self.pg0, p_good, self.pg1)
self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
self.send_and_expect(self.pg0, p_spoof_loose, self.pg1)
#
# apply loose uRPF check on pg0 rx
#
self.vapi.urpf_update(is_input=True,
mode=e.vl_api_urpf_mode_t.URPF_API_MODE_LOOSE,
af=e.vl_api_address_family_t.ADDRESS_IP6,
sw_if_index=self.pg0.sw_if_index)
# good packets still pass
self.send_and_expect(self.pg0, p_good, self.pg1)
# packets from address for which there is a route are forwarded
self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
# packets from address to which there is no route are dropped
self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
self.assert_error_counter_equal("ip6-rx-urpf-loose", N_PKTS)
#
# crank it up to strict mode
#
self.vapi.urpf_update(is_input=True,
mode=e.vl_api_urpf_mode_t.URPF_API_MODE_STRICT,
af=e.vl_api_address_family_t.ADDRESS_IP6,
sw_if_index=self.pg0.sw_if_index)
# good packets still pass
self.send_and_expect(self.pg0, p_good, self.pg1)
# packets that would not be routed back thru pg0 are dropped
self.send_and_assert_no_replies(self.pg0, p_spoof_strict)
self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
self.assert_error_counter_equal("ip6-rx-urpf-strict", 2 * N_PKTS)
#
# disable uRPF, all traffic should pass
#
self.vapi.urpf_update(is_input=True,
mode=e.vl_api_urpf_mode_t.URPF_API_MODE_OFF,
af=e.vl_api_address_family_t.ADDRESS_IP6,
sw_if_index=self.pg0.sw_if_index)
self.send_and_expect(self.pg0, p_good, self.pg1)
self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
self.send_and_expect(self.pg0, p_spoof_loose, self.pg1)
#
# Now apply in the TX direction
# for loose it is the same deal, they should not be forwarded
# if there's no route
# for strict they should not be forwarded if they would be
# forwarded thru that interface.
#
self.vapi.urpf_update(is_input=False,
mode=e.vl_api_urpf_mode_t.URPF_API_MODE_LOOSE,
af=e.vl_api_address_family_t.ADDRESS_IP6,
sw_if_index=self.pg1.sw_if_index)
self.send_and_expect(self.pg0, p_good, self.pg1)
self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
self.assert_error_counter_equal("ip6-tx-urpf-loose", N_PKTS)
self.vapi.urpf_update(is_input=False,
mode=e.vl_api_urpf_mode_t.URPF_API_MODE_STRICT,
af=e.vl_api_address_family_t.ADDRESS_IP6,
sw_if_index=self.pg1.sw_if_index)
self.send_and_expect(self.pg0, p_good, self.pg1)
# the strict packet, from a peer is allowed, since it does
# not forward via pg1
self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
self.assert_error_counter_equal("ip6-tx-urpf-strict", N_PKTS)
# change the strict packet so that it would forward through pg1
p_spoof_strict = (Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
IPv6(src=self.pg1.remote_ip6,
dst=self.pg1.remote_ip6) /
UDP(sport=1236, dport=1236) /
Raw(b'\xa5' * 100)) * N_PKTS
self.send_and_assert_no_replies(self.pg0, p_spoof_strict)
self.assert_error_counter_equal("ip6-tx-urpf-strict", 2 * N_PKTS)
# cleanup
self.vapi.urpf_update(is_input=False,
mode=e.vl_api_urpf_mode_t.URPF_API_MODE_OFF,
af=e.vl_api_address_family_t.ADDRESS_IP6,
sw_if_index=self.pg1.sw_if_index)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
```
#### File: src/scripts/fts.py
```python
import sys
import os
import ipaddress
import yaml
from pprint import pprint
import re
from jsonschema import validate, exceptions
import argparse
from subprocess import run, PIPE
from io import StringIO
# VPP feature JSON schema
schema = {
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
"name": {"type": "string"},
"description": {"type": "string"},
"maintainer": {"$ref": "#/definitions/maintainers"},
"state": {"type": "string",
"enum": ["production", "experimental", "development"]},
"features": {"$ref": "#/definitions/features"},
"missing": {"$ref": "#/definitions/features"},
"properties": {"type": "array",
"items": {"type": "string",
"enum": ["API", "CLI", "STATS",
"MULTITHREAD"]},
},
},
"additionalProperties": False,
"definitions": {
"maintainers": {
"anyof": [{
"type": "array",
"items": {"type": "string"},
"minItems": 1,
},
{"type": "string"}],
},
"featureobject": {
"type": "object",
"patternProperties": {
"^.*$": {"$ref": "#/definitions/features"},
},
},
"features": {
"type": "array",
"items": {"anyOf": [{"$ref": "#/definitions/featureobject"},
{"type": "string"},
]},
"minItems": 1,
},
},
}
def filelist_from_git_status():
filelist = []
git_status = 'git status --porcelain */FEATURE.yaml'
rv = run(git_status.split(), stdout=PIPE, stderr=PIPE)
if rv.returncode != 0:
sys.exit(rv.returncode)
for l in rv.stdout.decode('ascii').split('\n'):
if len(l):
filelist.append(l.split()[1])
return filelist
def filelist_from_git_ls():
filelist = []
git_ls = 'git ls-files :(top)*/FEATURE.yaml'
rv = run(git_ls.split(), stdout=PIPE, stderr=PIPE)
if rv.returncode != 0:
sys.exit(rv.returncode)
for l in rv.stdout.decode('ascii').split('\n'):
if len(l):
filelist.append(l)
return filelist
def version_from_git():
git_describe = 'git describe'
rv = run(git_describe.split(), stdout=PIPE, stderr=PIPE)
if rv.returncode != 0:
sys.exit(rv.returncode)
return rv.stdout.decode('ascii').split('\n')[0]
class MarkDown():
_dispatch = {}
def __init__(self, stream):
self.stream = stream
self.toc = []
def print_maintainer(self, o):
write = self.stream.write
if type(o) is list:
write('Maintainers: ' +
', '.join(f'{m}' for m in
o) + ' \n')
else:
write(f'Maintainer: {o} \n')
_dispatch['maintainer'] = print_maintainer
def print_features(self, o, indent=0):
write = self.stream.write
for f in o:
indentstr = ' ' * indent
if type(f) is dict:
for k, v in f.items():
write(f'{indentstr}- {k}\n')
self.print_features(v, indent + 2)
else:
write(f'{indentstr}- {f}\n')
write('\n')
_dispatch['features'] = print_features
def print_markdown_header(self, o):
write = self.stream.write
write(f'## {o}\n')
version = version_from_git()
write(f'VPP version: {version}\n\n')
_dispatch['markdown_header'] = print_markdown_header
def print_name(self, o):
write = self.stream.write
write(f'### {o}\n')
self.toc.append(o)
_dispatch['name'] = print_name
def print_description(self, o):
write = self.stream.write
write(f'\n{o}\n\n')
_dispatch['description'] = print_description
def print_state(self, o):
write = self.stream.write
write(f'Feature maturity level: {o} \n')
_dispatch['state'] = print_state
def print_properties(self, o):
write = self.stream.write
write(f'Supports: {" ".join(o)} \n')
_dispatch['properties'] = print_properties
def print_missing(self, o):
write = self.stream.write
write('\nNot yet implemented: \n')
self.print_features(o)
_dispatch['missing'] = print_missing
def print_code(self, o):
write = self.stream.write
write(f'Source Code: [{o}]({o}) \n')
_dispatch['code'] = print_code
def print(self, t, o):
write = self.stream.write
if t in self._dispatch:
self._dispatch[t](self, o,)
else:
write('NOT IMPLEMENTED: {t}\n')
def output_toc(toc, stream):
write = stream.write
write('## VPP Feature list:\n')
for t in toc:
ref = t.lower().replace(' ', '-')
write(f'[{t}](#{ref}) \n')
def featuresort(k):
return k[1]['name']
def featurelistsort(k):
orderedfields = {
'name': 0,
'maintainer': 1,
'description': 2,
'features': 3,
'state': 4,
'properties': 5,
'missing': 6,
'code': 7,
}
return orderedfields[k[0]]
def output_markdown(features, fields, notfields):
stream = StringIO()
m = MarkDown(stream)
m.print('markdown_header', 'Feature Details:')
for path, featuredef in sorted(features.items(), key=featuresort):
codeurl = 'https://git.fd.io/vpp/tree/src/' + '/'.join(os.path.normpath(path).split('/')[1:-1])
featuredef['code'] = codeurl
for k, v in sorted(featuredef.items(), key=featurelistsort):
if notfields:
if k not in notfields:
m.print(k, v)
elif fields:
if k in fields:
m.print(k, v)
else:
m.print(k, v)
tocstream = StringIO()
output_toc(m.toc, tocstream)
return tocstream, stream
def main():
parser = argparse.ArgumentParser(description='VPP Feature List.')
parser.add_argument('--validate', dest='validate', action='store_true',
help='validate the FEATURE.yaml file')
parser.add_argument('--git-status', dest='git_status', action='store_true',
help='Get filelist from git status')
parser.add_argument('--all', dest='all', action='store_true',
help='Validate all files in repository')
parser.add_argument('--markdown', dest='markdown', action='store_true',
help='Output feature table in markdown')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin)
group = parser.add_mutually_exclusive_group()
group.add_argument('--include', help='List of fields to include')
group.add_argument('--exclude', help='List of fields to exclude')
args = parser.parse_args()
features = {}
if args.git_status:
filelist = filelist_from_git_status()
elif args.all:
filelist = filelist_from_git_ls()
else:
filelist = args.infile
if args.include:
fields = args.include.split(',')
else:
fields = []
if args.exclude:
notfields = args.exclude.split(',')
else:
notfields = []
for featurefile in filelist:
featurefile = featurefile.rstrip()
# Load configuration file
with open(featurefile, encoding='utf-8') as f:
cfg = yaml.load(f, Loader=yaml.SafeLoader)
try:
validate(instance=cfg, schema=schema)
except exceptions.ValidationError:
print(f'File does not validate: {featurefile}',
file=sys.stderr)
raise
features[featurefile] = cfg
if args.markdown:
stream = StringIO()
tocstream, stream = output_markdown(features, fields, notfields)
print(tocstream.getvalue())
print(stream.getvalue())
stream.close()
if __name__ == '__main__':
main()
```
#### File: vpp/test/test_node_variants.py
```python
import re
import unittest
import platform
from framework import VppTestCase
def checkX86():
return platform.machine() in ["x86_64", "AMD64"]
def skipVariant(variant):
with open("/proc/cpuinfo") as f:
cpuinfo = f.read()
exp = re.compile(
r'(?:flags\s+:)(?:\s\w+)+(?:\s(' + variant + r'))(?:\s\w+)+')
match = exp.search(cpuinfo, re.DOTALL | re.MULTILINE)
return checkX86() and match is not None
class TestNodeVariant(VppTestCase):
""" Test Node Variants """
@classmethod
def setUpConstants(cls, variant):
super(TestNodeVariant, cls).setUpConstants()
# find the position of node_variants in the cmdline args.
if checkX86():
node_variants = cls.vpp_cmdline.index("node { ") + 1
cls.vpp_cmdline[node_variants] = ("default { variant default } "
"ip4-rewrite { variant " +
variant + " } ")
@classmethod
def setUpClass(cls):
super(TestNodeVariant, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestNodeVariant, cls).tearDownClass()
def setUp(self):
super(TestNodeVariant, self).setUp()
def tearDown(self):
super(TestNodeVariant, self).tearDown()
def getActiveVariant(self, node):
node_desc = self.vapi.cli("show node " + node)
self.logger.info(node_desc)
match = re.search(r'\s+(\S+)\s+(\d+)\s+(:?yes)',
node_desc, re.DOTALL | re.MULTILINE)
return match.groups(0)
def checkVariant(self, variant):
""" Test node variants defaults """
variant_info = self.getActiveVariant("ip4-lookup")
self.assertEqual(variant_info[0], "default")
variant_info = self.getActiveVariant("ip4-rewrite")
self.assertEqual(variant_info[0], variant)
class TestICLVariant(TestNodeVariant):
""" Test icl Node Variants """
VARIANT = "icl"
LINUX_VARIANT = "avx512_bitalg"
@classmethod
def setUpConstants(cls):
super(TestICLVariant, cls).setUpConstants(cls.VARIANT)
@classmethod
def setUpClass(cls):
super(TestICLVariant, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestICLVariant, cls).tearDownClass()
@unittest.skipUnless(skipVariant(LINUX_VARIANT),
VARIANT + " not a supported variant, skip.")
def test_icl(self):
self.checkVariant(self.VARIANT)
class TestSKXVariant(TestNodeVariant):
""" Test skx Node Variants """
VARIANT = "skx"
LINUX_VARIANT = "avx512f"
@classmethod
def setUpConstants(cls):
super(TestSKXVariant, cls).setUpConstants(cls.VARIANT)
@classmethod
def setUpClass(cls):
super(TestSKXVariant, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestSKXVariant, cls).tearDownClass()
@unittest.skipUnless(skipVariant(LINUX_VARIANT),
VARIANT + " not a supported variant, skip.")
def test_skx(self):
self.checkVariant(self.VARIANT)
class TestHSWVariant(TestNodeVariant):
""" Test avx2 Node Variants """
VARIANT = "hsw"
LINUX_VARIANT = "avx2"
@classmethod
def setUpConstants(cls):
super(TestHSWVariant, cls).setUpConstants(cls.VARIANT)
@classmethod
def setUpClass(cls):
super(TestHSWVariant, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestHSWVariant, cls).tearDownClass()
@unittest.skipUnless(skipVariant(LINUX_VARIANT),
VARIANT + " not a supported variant, skip.")
def test_hsw(self):
self.checkVariant(self.VARIANT)
```
#### File: vpp/test/test_offload.py
```python
import unittest
from framework import VppTestCase, VppTestRunner, running_gcov_tests
from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
class TestOffload(VppTestCase):
""" Offload Unit Test Cases """
@classmethod
def setUpClass(cls):
super(TestOffload, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestOffload, cls).tearDownClass()
def setUp(self):
super(TestOffload, self).setUp()
def tearDown(self):
super(TestOffload, self).tearDown()
def test_offload_unittest(self):
""" Checksum Offload Test """
cmds = ["loop create",
"set int ip address loop0 11.22.33.1/24",
"set int state loop0 up",
"loop create",
"set int ip address loop1 11.22.34.1/24",
"set int state loop1 up",
"set ip neighbor loop1 11.22.34.44 03:00:11:22:34:44",
"packet-generator new {\n"
" name s0\n"
" limit 100\n"
" size 128-128\n"
" interface loop0\n"
" tx-interface loop1\n"
" node loop1-output\n"
" buffer-flags ip4 offload-ip-cksum offload-udp-cksum\n"
" data {\n"
" IP4: 1.2.3 -> dead.0000.0001\n"
" UDP: 11.22.33.44 -> 11.22.34.44\n"
" ttl 2 checksum 13\n"
" UDP: 1234 -> 2345\n"
" checksum 11\n"
" incrementing 114\n"
" }\n"
"}",
"trace add pg-input 1",
"pa en",
"show error"]
for cmd in cmds:
r = self.vapi.cli_return_response(cmd)
if r.retval != 0:
if hasattr(r, 'reply'):
self.logger.info(cmd + " FAIL reply " + r.reply)
else:
self.logger.info(cmd + " FAIL retval " + str(r.retval))
r = self.vapi.cli_return_response("show trace")
self.assertTrue(r.retval == 0)
self.assertTrue(hasattr(r, 'reply'))
rv = r.reply
look_here = rv.find('ethernet-input')
self.assertFalse(look_here == -1)
bad_checksum_index = rv[look_here:].find('should be')
self.assertTrue(bad_checksum_index == -1)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
``` |
{
"source": "5GCity/5GCity-AAA",
"score": 2
} |
#### File: 5GCity-AAA/commands/configuration.py
```python
import os
import subprocess
import sys
from getpass import getpass
import bcrypt
from commands import Command
from commands.docker import DockerBuilder
from config import YamlLoader
class Configuration(Command):
VALUES = {}
@classmethod
def build_parser(mcs, parser):
pass
@classmethod
def execute(mcs, args):
base = YamlLoader.load(f'{mcs.COMPOSE_DIR}/compose-aaa-base.yml')
mcs.common_config(base)
mcs.nginx(base)
mcs.gravitee(base)
YamlLoader.save(base, f'{mcs.COMPOSE_DIR}/compose-aaa.yml')
# Ensure Compose ENVs are enforced
subprocess.run(DockerBuilder.COMMAND_MAPPER["start"].split(' '))
@classmethod
def common_config(mcs, base):
parameters = {
"SERVER_NAME": "Insert the Dashboard server name to be used with the http protocol."
"E.g. IP: http://192.168.1.1 or FQDN: http://5g-dashboard.i2cat.net: ",
"MONITORING_GRAFANA": "Insert the Grafana monitoring UI server to be used."
"E.g., http://192.168.1.1 or FQDN: http://monitoring.5gcity.com: "
}
for key, value in parameters.items():
val = input(value)
mcs.VALUES[key] = val
mcs.dashboard_config(base)
@classmethod
def dashboard_config(mcs, base):
dash_path = input("Insert the Dashboard's full path (e.g. /home/5GCITY/dev/5GCity-Dashboard-new): ")
if dash_path.endswith("/"):
dash_path = dash_path[:-1]
# Validate the folder
if not os.path.exists(dash_path):
sys.exit("The provided path don't exist.")
if not os.path.isdir(dash_path):
sys.exit("The provided path is not a dir.")
dockerfile = os.path.join(dash_path, "Dockerfile")
keycloak_json = os.path.join(dash_path, "public", "keycloak_base.json")
if not os.path.exists(dockerfile) or not os.path.exists(keycloak_json):
sys.exit("Dockerfile or public/keycloak_base.json are missing don't exist.")
with open(keycloak_json, "r") as file:
kj = file.read()
with open(os.path.join(dash_path, "public", "keycloak.json"), "w") as file:
kj = kj.replace("<AUTH_SERVER_URL>", f"{mcs.VALUES['SERVER_NAME']}/auth")
file.write(kj)
base["services"]["dashboard"]["build"]["context"] = dash_path
for enum, _ in enumerate(base["services"]["dashboard"]["build"]["args"]):
for key in mcs.VALUES.keys():
if key in base["services"]["dashboard"]["build"]["args"][enum]:
base["services"]["dashboard"]["build"]["args"][enum] = \
base["services"]["dashboard"]["build"]["args"][
enum].replace(key, mcs.VALUES[key], 1)
for enum, _ in enumerate(base["services"]["dashboard"]["volumes"]):
base["services"]["dashboard"]["volumes"][enum] = base["services"]["dashboard"]["volumes"][enum].replace(
"DASH_PATH", dash_path)
@classmethod
def gravitee(mcs, base):
def load_base_yml():
return YamlLoader.load(f'{mcs.COMPOSE_DIR}/gravitee/api/gravitee_base.yml')
def save_yaml():
YamlLoader.save(gravitee_base, f'{mcs.COMPOSE_DIR}/gravitee/api/gravitee.yml')
def set_pwd():
username = input("Please provide the admin's username: ")
while True:
pwd = getpass(prompt="Please set admin password: ")
pwd_confirm = getpass(prompt="Please confirm your password: ")
if pwd != pwd_confirm:
print("The password and password confirmation don't match.")
elif len(pwd) < 5:
print("Please provide a password with 5 characters at least")
else:
break
# Encrypt new password
hashed = bcrypt.hashpw(pwd.encode("UTF-8"), bcrypt.gensalt(prefix=b"2a")).decode("utf-8")
users = gravitee_base.get("security").get("providers")[0]
users["username"] = username
users["password"] = str(<PASSWORD>)
users["users"][0]["username"] = username
def set_mng_uri():
env = base["services"]["management_ui"]["environment"][0]
base["services"]["management_ui"]["environment"][0] = env.replace("SERVER_NAME", mcs.VALUES["SERVER_NAME"])
print("***Gravitee configuration***")
gravitee_base = load_base_yml()
set_pwd()
save_yaml()
set_mng_uri()
@classmethod
def nginx(mcs, base):
print("***NGINX configuration***")
ssl = None
while ssl not in ["y", "n"]:
ssl = input("Are you going to use SSL (y/n): ")
ssl = ssl == "y" # Convert variable to bool
folder = 'https' if ssl else 'http'
# Configure nginx Files
mcs.nginx_conf(f'{mcs.COMPOSE_DIR}/nginx/{folder}/')
if ssl == "y":
print(f"Please put the cert file and private key file on {mcs.COMPOSE_DIR}/nginx/{folder}/cert")
print("With the names bundle.cert and privatekey.key")
# Configure compose file
mcs.nginx_compose(base, ssl)
@classmethod
def nginx_conf(mcs, config_folder):
with open(config_folder + "nginx_base.conf", "r") as file:
conf = file.read()
value = mcs.VALUES["SERVER_NAME"].split("//")[1]
conf = conf.replace(f"@SERVER_NAME", value)
with open(config_folder + 'nginx.conf', "w") as file:
file.write(conf)
@classmethod
def nginx_compose(mcs, base, ssl):
nginx = base.get("services").get("nginx-service")
folder = "./nginx/https/" if ssl else "./nginx/http/"
nginx["volumes"] = []
nginx.get("volumes").append(f"{folder}nginx.conf:/etc/nginx/conf.d/aaa.conf")
if ssl:
nginx.get("ports").append("443:443")
nginx.get("volumes").append(f"{folder}cert:/etc/nginx/ssl/cert/")
nginx.get("volumes").append(
f"{folder}server_names_hash_bucket_size.conf:/etc/nginx/conf.d/server_names_hash_bucket_size.conf")
```
#### File: 5GCity-AAA/gravitee/gravitee.py
```python
import json
import os
import re
import sys
from dataclasses import dataclass
from getpass import getpass
from typing import ClassVar, Dict
import requests
from requests.auth import HTTPBasicAuth
# TODO: Handle Errors
@dataclass
class Gravitee:
ENDPOINTS: ClassVar[Dict[str, str]] = {
'apis': 'apis',
'apis_deploy': 'apis/{}/deploy',
'apis_lifecycle': 'apis/{}',
'export_api': 'apis/{}/export',
'export_subscriptions': 'apis/{}/subscriptions',
'export_applications': 'applications/{}',
'import_api': 'apis/import',
'import_applications': 'applications',
'import_subscriptions': 'apis/{}/subscriptions',
'plans': 'apis/{}/plans'
}
username: str
password: str
base_url: str
@classmethod
def build(cls, base_url):
_args = [input("Gravitee username: "), getpass(prompt="Gravitee password: "), base_url]
instance = Gravitee(*_args)
for key in instance.ENDPOINTS.keys():
instance.ENDPOINTS[key] = instance.base_url + instance.ENDPOINTS[key]
return instance
@property
def authentication(self):
return HTTPBasicAuth(self.username, self.password)
@staticmethod
def write_directory(api, create_dir=True):
path = os.path.join(os.getcwd(), 'gravitee', 'data', api)
if not os.path.exists(path) and create_dir:
os.makedirs(path)
elif not os.path.exists(path):
raise ValueError("Provided API don't exist")
return path
@staticmethod
def __validate_response__(response):
if response.status_code <= 299:
return
exit_message = f"Command error with code {response.status_code} "
if response.text:
exit_message += f"and message {response.text}"
sys.exit(exit_message)
def export_api(self, name):
def __api_id__():
# Collect API id
r = requests.get(self.ENDPOINTS['apis'], params={'name': name}, auth=self.authentication)
Gravitee.__validate_response__(r)
data = r.json()
if not data:
raise ValueError("The provided name don't exist")
return data[0]['id']
def __api__():
# Export the API
r = requests.get(self.ENDPOINTS['export_api'].format(api_id), auth=self.authentication)
Gravitee.__validate_response__(r)
with open(os.path.join(self.write_directory(name), "api.json"), 'w') as f:
f.write(json.dumps(r.json(), indent=4))
def __subscriptions__():
# Export Subscriptions
r = requests.get(self.ENDPOINTS['export_subscriptions'].format(api_id), auth=self.authentication)
Gravitee.__validate_response__(r)
data = r.json()
with open(os.path.join(self.write_directory(name), "subscriptions.json"), 'w') as f:
f.write(json.dumps(data, indent=4))
return [subscription['application'] for subscription in data['data']]
def __application__():
# Export applications
data = []
for app in applications:
r = requests.get(self.ENDPOINTS['export_applications'].format(app), auth=self.authentication)
Gravitee.__validate_response__(r)
to_remove = ('id', 'status', 'created_at', 'updated_at', 'owner')
app_data = r.json()
for k in to_remove:
app_data.pop(k, None)
data.append(app_data)
with open(os.path.join(self.write_directory(name), "applications.json"), 'w') as f:
f.write(json.dumps(data, indent=4))
api_id = __api_id__()
__api__()
applications = __subscriptions__()
__application__()
def import_api(self, name, dev=False):
def change_values(data):
if not dev:
params = re.findall("@.*?@", data)
for param in set(params):
if '_ip' in param.lower():
print("\nPlease provide the protocol, e.g., http://192.168.1.1")
val = input(f"{param.replace('@', '')} value: ")
if val.endswith("/"):
val = val[:-1]
data = data.replace(param, val)
return json.loads(data)
def __api__():
with open(os.path.join(reading_directory, 'api.json'), 'r') as f:
data = f.read()
data = change_values(data)
r = requests.post(self.ENDPOINTS['import_api'], auth=self.authentication, json=data)
Gravitee.__validate_response__(r)
r = requests.get(self.ENDPOINTS['apis'], params={'name': name}, auth=self.authentication)
Gravitee.__validate_response__(r)
data = r.json()
if not data:
raise ValueError("The provided name don't exist")
return data[0]['id']
def __application__():
with open(os.path.join(reading_directory, 'applications.json'), 'r') as f:
data = json.load(f)
for app in data:
requests.post(self.ENDPOINTS['import_applications'], auth=self.authentication, json=app)
# Ignore response validation
# Gravitee.__validate_response__(r)
def __subscriptions__():
with open(os.path.join(reading_directory, 'subscriptions.json'), 'r') as f:
data = json.load(f)
for sub in data['data']:
payload = {
'application': self.get_app_by_name(data['metadata'][sub['application']]),
'plan': self.get_plan_by_name(api_id, data['metadata'][sub['plan']])
}
r = requests.post(self.ENDPOINTS['import_subscriptions'].format(api_id), auth=self.authentication,
params=payload)
Gravitee.__validate_response__(r)
def __start_api__():
r = requests.post(self.ENDPOINTS['apis_deploy'].format(api_id), auth=self.authentication)
Gravitee.__validate_response__(r)
r = requests.post(self.ENDPOINTS['apis_lifecycle'].format(api_id), auth=self.authentication,
params={'action': 'START'})
Gravitee.__validate_response__(r)
reading_directory = self.write_directory(name)
api_id = __api__()
__application__()
__subscriptions__()
__start_api__()
def get_plan_by_name(self, api, name):
r = requests.get(self.ENDPOINTS['plans'].format(api), auth=self.authentication,
params={'name': name})
Gravitee.__validate_response__(r)
for plan in r.json():
if plan['name'] == name['name']:
return plan['id']
def get_app_by_name(self, name):
r = requests.get(self.ENDPOINTS['import_applications'], auth=self.authentication)
Gravitee.__validate_response__(r)
for app in r.json():
if app['name'] == name['name']:
return app['id']
```
#### File: 5GCity-AAA/keycloak/keycloak.py
```python
import json
import os
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from typing import Dict, Any, ClassVar
import requests
from config import DockerParser
@dataclass
class KeyCloak:
ENDPOINTS: ClassVar[Dict[str, str]] = {
'open_id': '/realms/master/protocol/openid-connect/token',
'realm': '/admin/realms'
}
username: str
password: str
client_id: str
base_url: str
_access_token: Dict[str, Any] = field(init=False, default=None)
@classmethod
def build(cls):
_args = [
DockerParser().get_docker_service('keycloak', 'KEYCLOAK_USER'),
DockerParser().get_docker_service('keycloak', 'KEYCLOAK_PASSWORD'),
'admin-cli',
DockerParser().get_docker_service('keycloak', 'AAA_AUTH_BASE_URL')
]
instance = KeyCloak(*_args)
for key in instance.ENDPOINTS.keys():
instance.ENDPOINTS[key] = instance.base_url + instance.ENDPOINTS[key]
return instance
# === Auth methods ===
@property
def access_token(self):
if self._is_token_valid():
return self._access_token['access_token']
elif self._is_token_valid(refresh=True):
self._renew_token()
return self._access_token['access_token']
payload = {
'username': self.username, 'password': <PASSWORD>, 'client_id': self.client_id, 'grant_type': 'password'
}
r = requests.post(self.ENDPOINTS['open_id'], data=payload)
if r.status_code > 299:
raise BaseException(f'Invalid return code {r.status_code} with message: {r.text}')
self._access_token = r.json()
self._set_token_expiration_dates()
return self._access_token
@property
def auth_header(self):
return {'Authorization': f'bearer {self.access_token["access_token"]}'}
def _is_token_valid(self, refresh=False):
if not self._access_token:
return False
token = '<PASSWORD>' if refresh else 'expires_in'
return datetime.now() > self._access_token[token]
def _renew_token(self):
if not self._is_token_valid(refresh=True):
return self.access_token
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = {
'username': self.username, 'password': <PASSWORD>, 'client_id': self.client_id,
'grant_type': 'refresh_token', 'refresh_token': self._access_token['refresh_token']
}
r = requests.get(self.ENDPOINTS['open_id'], headers=headers, data=data)
if r.status_code > 299:
raise BaseException(f'Invalid return code {r.status_code} with message: {r.text}')
self._access_token = r.json()
self._set_token_expiration_dates()
def _set_token_expiration_dates(self):
# Set expiration to python datetime objects
self._access_token['expires_in'] = datetime.now() + timedelta(seconds=self._access_token['expires_in'])
self._access_token['refresh_expires_in'] = datetime.now() + timedelta(
seconds=self._access_token['refresh_expires_in'])
# === Getters ===
def get_realms(self):
r = requests.get(self.ENDPOINTS['realm'], headers=self.auth_header)
if r.status_code > 299:
raise BaseException(f'Invalid return code {r.status_code} with message: {r.text}')
return r.json()
# === Import Methods ===
def import_realm(self, file):
with open(file, 'r') as file:
realm = json.load(file)
r = requests.post(self.ENDPOINTS['realm'], headers=self.auth_header, json=realm)
if r.status_code > 299:
raise BaseException(f'Invalid return code {r.status_code} with message: {r.text}')
else:
print(f'Successfully imported realm {realm["realm"]}')
# === Export Methods ===
def export_realm(self, folder=None):
write_directory = os.path.join(folder, 'realms') if folder else 'realms'
realms = self.get_realms()
if not os.path.exists(write_directory):
os.makedirs(write_directory)
for realm in realms:
with open(os.path.join(write_directory, f"{realm['realm']}.json"), 'w') as f:
f.write(json.dumps(realm, indent=4))
``` |
{
"source": "5GCity/5GCity-infrastructure-abstraction",
"score": 2
} |
#### File: lib/adapters/ruckus.py
```python
import json, requests
import uuid
from urllib3.exceptions import InsecureRequestWarning
from copy import deepcopy
_headers = {
'Content-type': 'application/json;charset=UTF-8',
}
EMPTY_TOPOLOGY = {
"boxes": [],
"links": []
}
class RuckusWiFi(object):
"""Integration for Ruckus Controller
Arguments:
Wireless {[type]} -- [description]
"""
def __init__(self,
controller_id=None,
ip='127.0.0.1',
port=8080,
url="http://{}:{}/",
topology=EMPTY_TOPOLOGY,
chunks=[],
phy_id_mapping=None,
username=None,
password=<PASSWORD>):
self.id = controller_id
self._ip = ip
self._port = port
self._url = url.format(ip,port)
self._topology = topology
self._phy_id_mapping = phy_id_mapping
self._username = username
self._password = password
self._chunks = {}
for chunk in chunks:
self._chunks[chunk["id"]] = chunk
# Disable Warnings about insecure SSL being used
requests.packages.urllib3.disable_warnings(
category=InsecureRequestWarning)
# GET WIRELESS CONFIG FROM RUCKUS CONTROLLER
# {
# "channelNumber": 36,
# "channelBandwidth": 40,
# "txPower": 2000
# }
ticket = self.__login()
# TODO: Implement GET tx Power, by default now is "Full"
for box in self._topology["boxes"]:
for phy in box["phys"]:
if self._phy_id_mapping[phy["id"]]["type"] == "2.4GHZ":
u_path = '/v7_0/rkszones/{p[zone_id]}/apgroups/{p[apgroup_id]}?serviceTicket={t}'.\
format(p=self._phy_id_mapping[phy["id"]],t=ticket)
url = self._url + u_path
resp = requests.get(
url,
headers=_headers,
verify = False
)
if resp.status_code == 200:
ap_group = json.loads(resp.text)
phy["config"] = {
"channelNumber": ap_group["wifi24"]["channel"],
"channelBandwidth": ap_group["wifi24"]["channelWidth"],
"txPower": 3600
}
if self._phy_id_mapping[phy["id"]]["type"] == "5GHZ":
u_path = '/v7_0/rkszones/{p[zone_id]}/apgroups/{p[apgroup_id]}?serviceTicket={t}'.\
format(p=self._phy_id_mapping[phy["id"]],t=ticket)
url = self._url + u_path
resp = requests.get(
url,
headers=_headers,
verify = False
)
if resp.status_code == 200:
ap_group = json.loads(resp.text)
phy["config"] = {
"channelNumber": ap_group["wifi50"]["indoorChannel"],
"channelBandwidth": ap_group["wifi50"]["channelWidth"],
"txPower": 3600
}
self.__logoff(ticket)
# chunkete-topology-controller implementation
def getChunketeTopology(self):
return deepcopy(self._topology), 200
def putInterfaceLTEConfig(self, phy_id, parameters):
return '',401
def putInterfaceType(self, phy_id, phy_type):
for box in self._topology["boxes"]:
for phy in box["phys"]:
if phy["id"] == phy_id:
if phy["type"] != phy_type:
return '', 401
else:
return '', 200
def putInterfaceWiredConfig(self, phy_id, parameters):
for box in self._topology["boxes"]:
for phy in box["phys"]:
if phy["id"] == phy_id:
if phy["config"] != json.loads(parameters):
return '',401
else:
return '',200
def putInterfaceWirelessConfig(self, phy_id, parameters):
# Create a session on Ruckus controller
ticket = self.__login()
# TODO: Implement tx Power, by default now is "Full"
for box in self._topology["boxes"]:
for phy in box["phys"]:
if phy["id"] == phy_id:
if self._phy_id_mapping[phy_id]["type"] == "2.4GHZ":
config = {
"wifi24": {
"channelWidth": json.loads(
parameters)["channelBandwidth"],
"channel": json.loads(
parameters)["channelNumber"],
}
}
u_path = '/v7_0/rkszones/{p[zone_id]}/'.format(
p=self._phy_id_mapping[phy_id])
u_path += '/apgroups/{p[apgroup_id]}'.format(
p=self._phy_id_mapping[phy_id])
u_path += '?serviceTicket={t}'.format(
p=self._phy_id_mapping[phy_id], t=ticket)
url = self._url + u_path
resp = requests.patch(
url,
data=json.dumps(config),
headers=_headers,
verify = False
)
self.__logoff(ticket)
if resp.status_code == 204:
phy["config"] = json.loads(parameters)
return '', 201
else:
return '', 401
elif self._phy_id_mapping[phy_id]["type"] == "5GHZ":
config = {
"wifi50": {
"channelWidth": json.loads(
parameters)["channelBandwidth"],
"indoorChannel": json.loads(
parameters)["channelNumber"],
"outdoorChannel": json.loads(
parameters)["channelNumber"]
}
}
u_path = '/v7_0/rkszones/{p[zone_id]}/'.format(
p=self._phy_id_mapping[phy_id])
u_path += '/apgroups/{p[apgroup_id]}'.format(
p=self._phy_id_mapping[phy_id])
u_path += '?serviceTicket={t}'.format(
p=self._phy_id_mapping[phy_id], t=ticket)
url = self._url + u_path
resp = requests.patch(
url,
data=json.dumps(config),
headers=_headers,
verify = False
)
self.__logoff(ticket)
if resp.status_code == 204:
phy["config"] = json.loads(parameters)
return '', 201
else:
return '', 401
elif self._phy_id_mapping[phy_id]["type"] == "WIRED":
# TODO: Wired interface config implied
return '', 201
# chunkete-chunk-controller implementation
def getAllChunks(self):
return [self._chunks[key] for key in self._chunks.keys()], 200
def registerNewChunk(self, content):
chunk = json.loads(content)
id = str(uuid.uuid4())
self._chunks[id] = chunk
self._chunks[id]["id"]=id
data = {
"id": id
}
return data, 201
def getChunkById(self, chunk_id):
return self._chunks[chunk_id]
def removeExistingChunk(self, chunk_id):
del self._chunks[chunk_id]
return '', 200
# chunkete-swam-controller implementation
def getAllSWAMServices(self, chunk_id):
return [
service for service in self._chunks[chunk_id]["serviceList"]
]
def registerNewSWAMService(self, chunk_id, content):
service = json.loads(content)
if service["wirelessConfig"]["encryption"] == "NONE":
encryption = {
"method": "None",
}
elif service["wirelessConfig"]["encryption"] == "WPA":
encryption = {
"method": "WPA_Mixed",
"algorithm": "TKIP_AES",
"passphrase": service["wirelessConfig"]["password"],
}
elif service["wirelessConfig"]["encryption"] == "WPA2":
encryption = {
"method": "WPA2",
"algorithm": "AES",
"mfp": "disabled",
"passphrase": service["wirelessConfig"]["password"],
}
elif service["wirelessConfig"]["encryption"] == "WEP":
encryption = {
"method": "WEP_64",
"keyIndex": 1,
"keyInHex": service["wirelessConfig"]["password"],
}
else:
return '',401
# Create WLAN
ticket = self.__login()
u_path = '/v7_0/rkszones/{p[zone_id]}/wlans?serviceTicket={t}'.format(
p=self._phy_id_mapping, t=ticket)
url = self._url + u_path
wlan = {
"name": service["wirelessConfig"]["ssid"],
"ssid": service["wirelessConfig"]["ssid"],
"description": "Created by 5GCity Slice Manager",
"encryption": encryption,
"vlan": {
"accessVlan": service["vlanId"],
}
}
resp = requests.post(
url,
data= json.dumps(wlan),
headers=_headers,
verify= False
)
# If WLAN Created OK, raise it up on each of the interfaces
if resp.status_code == 201:
service["id"] = json.loads(resp.text)["id"]
return_data = {
"id": service["id"]
}
data = {
"id": service["id"]
}
for phy in service["selectedPhys"]:
if self._phy_id_mapping[phy]["type"] in ["2.4GHZ","5GHZ"]:
u_path = '/v7_0/rkszones/{p[zone_id]}/'.format(
p=self._phy_id_mapping[phy])
u_path += 'wlangroups/{p[wlangroup_id]}/'.format(
p=self._phy_id_mapping[phy]
)
u_path += 'members?serviceTicket={t}'.format(
t=ticket)
url = self._url + u_path
resp = requests.post(
url,
data= json.dumps(data),
headers=_headers,
verify= False
)
if resp.status_code != 201:
u_path = '/v7_0/rkszones/{}/wlangroups/{}/members?serviceTicket={}'.format(
self._phy_id_mapping["zone_id"], data["id"],ticket)
url = self._url + u_path
resp = requests.delete(
url,
headers=_headers,
verify = False
)
return resp.text, 401
self._chunks[chunk_id]["serviceList"].append(service)
self.__logoff(ticket)
return return_data, 201
else:
return resp.text, 401
def getSWAMServiceById(self, chunk_id, service_id):
for service in self._chunks[chunk_id]["serviceList"]:
if service["id"] == service_id:
return service
return '', 404
def removeExistingSWAMService(self, chunk_id, service_id):
service_list = self._chunks[chunk_id]["serviceList"]
for index in range(len(service_list)):
if service_list[index]["id"] == service_id:
# Remove WLAN
ticket = self.__login()
u_path = '/v7_0/rkszones/{}/wlans/{}?serviceTicket={}'.format(
self._phy_id_mapping["zone_id"], service_id, ticket)
url = self._url + u_path
resp = requests.delete(
url,
headers=_headers,
verify= False
)
if resp.status_code == 204:
self._chunks[chunk_id]["serviceList"].pop(index)
self.__logoff(ticket)
return '', 200
else:
self.__logoff(ticket)
return resp.text, 401
return resp.text, 404
# Session management on Ruckus controller
def __login(self):
url = (self._url + "/v7_0/serviceTicket")
ticket_request = {
"username": self._username,
"password": self._password,
}
resp = requests.post(
url,
data= json.dumps(ticket_request),
headers=_headers,
verify=False
)
return json.loads(resp.text)["serviceTicket"]
def __logoff(self, ticket):
url = self._url
url += "/v7_0/serviceTicket?serviceTicket={}".format(ticket)
requests.delete(
url,
headers=_headers,
verify=False)
```
#### File: proxy/web/app_web.py
```python
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, ForeignKey, Integer, String
from datetime import datetime
from werkzeug.middleware.proxy_fix import ProxyFix
from flask import Flask, Response, jsonify, render_template, request
import logging
import os
import sys
import json
import uuid
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.adapters.ruckus import RuckusWiFi
from lib.adapters.i2cat import I2catController
from conf.config import CONTROLLERS, RUCKUS_ID_MAPPING, RUCKUS_INIT_TOPOLOGY
# Logger configuration
log_filename = "logs/output.log"
os.makedirs(os.path.dirname(log_filename), exist_ok=True)
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(funcName)s %(message)s",
datefmt='%Y-%m-%d %H:%M:%S', filename=log_filename, level=logging.INFO)
logging.getLogger('requests').setLevel(logging.ERROR)
logger = logging.getLogger()
log_base = "{}:{}:{}" # INTERFACE,endpoint,REQ/RESP,content
# Flask app
app = Flask(__name__)
app.config.from_object(__name__)
# Define database
Base = declarative_base()
engine = create_engine('sqlite:///file.db', echo=False)
def generate_uuid():
return str(uuid.uuid4())
class Chunk(Base):
__tablename__ = 'chunks'
id = Column(String, primary_key=True, default=generate_uuid)
# controllers_chunk is a dictionary where the keys are the ids of the
# controller and the value is a list of the chunk in that controller
# in the form "{controllerid1:[chunkid,...],controllerid2:...}"
controllers_chunk = Column(String)
# controllers_phys is a dictionary where the keys are the ids of the
# controller and the value is a list of the chunk in that controller
# in the form "{controllerid1:[chunkid,...],controllerid2:...}"
controllers_phys = Column(String)
phyList = Column(String)
name = Column(String)
assignedQuota = Column(String)
serviceList = Column(String)
linkList = Column(String)
chunk_json = Column(String)
def __repr__(self):
return "{}, {}, {}, {}, {}, {}, {}, {}, {}".format(
self.id,
self._controllers_chunk,
self.controllers_phys,
self.phyList,
self.name,
self.assignedQuota,
self.serviceList,
self.linkList,
self.chunk_json
)
class Box(Base):
__tablename__ = 'boxes'
id = Column(String, primary_key=True, default=generate_uuid)
controller_id = Column(Integer)
box_id_controller = Column(String)
name = Column(String)
location = Column(String)
phys = Column(String)
box_json = Column(String)
def __repr__(self):
return "{}, {}, {}, {}, {}, {}, {}".format(
self.id,
self.controller_id,
self.box_id_controller,
self.name,
self.location,
self.phys,
self.box_json
)
class Phy(Base):
__tablename__ = 'phys'
id = Column(String, primary_key=True, default=generate_uuid)
controller_id = Column(Integer)
phy_id_controller = Column(String)
type = Column(String)
name = Column(String)
config = Column(String)
virtualInterfaceList = Column(String)
phy_json = Column(String)
def __repr__(self):
return "{}, {}, {}, {}, {}, {}, {}, {}".format(
self.id,
self.controller_id,
self.phy_id_controller,
self.type, self.name,
self.config,
self.virtualInterfaceList,
self.phy_json
)
class Vif(Base):
__tablename__ = 'vifs'
id = Column(String, primary_key=True, default=generate_uuid)
service_id = Column(String)
controller_id = Column(Integer)
phy_id = Column(String)
name = Column(String)
vif_json = Column(String)
def __repr__(self):
return "{}, {}, {}, {}, {}, {}".format(
self.id,
self.service_id,
self.controller_id,
self.phy_id,
self.name,
self.vif_json
)
class Vlan(Base):
__tablename__ = 'vlans'
id = Column(String, primary_key=True, default=generate_uuid)
service_id = Column(String)
tag = Column(Integer)
controllers_vlans_id = Column(String)
def __repr__(self):
return "{}, {}, {}".format(
self.id,
self.service_id,
self.tag,
self.controller_vlans_id,
)
class Service(Base):
__tablename__ = 'services'
id = Column(String, primary_key=True, default=generate_uuid)
# controllers_services is a dictionary where the keys are the ids of the
# controller and the value is a list of the chunk in that controller
# in the form "{controllerid1:[serviceid,...],controllerid2:...}"
controllers_services = Column(String)
# controllers_phys is a dictionary where the keys are the ids of the
# controller and the value is a list of the chunk in that controller
# in the form "{controllerid1:[chunkid,...],controllerid2:...}"
controllers_phys = Column(String)
lteConfigCellReserved = Column(String)
lteConfigMMEAddress = Column(String)
lteConfigMMEPort = Column(Integer)
lteConfigPLMNId = Column(String)
selectedPhys = Column(String)
selectedVifs = Column(String)
wirelessConfigEncryption = Column(String)
wirelessConfigPassword = Column(String)
wirelessConfigSSID = Column(String)
vlanId = Column(String)
service_json = Column(String)
def __repr__(self):
return "{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}".format(
self.id,
self.controllers_services,
self.controllers_phys,
self.lteConfigCellReserved,
self.lteConfigMMEAddress,
self.lteConfigMMEPort,
self.lteConfigPLMNId,
self.selectedPhys,
self.selectedVifs,
self.wirelessConfigSSID,
self.wirelessConfigEncryption,
self.wirelessConfigPassword,
self.vlanId,
self.service_json
)
# helpers to translate dabatase type class objects into dictionaries
def _dictService(service):
vlan = session.query(Vlan).filter(Vlan.service_id == service.id).one()
if service.wirelessConfigSSID:
wirelessConfig = {
"ssid": service.wirelessConfigSSID,
"encryption": service.wirelessConfigEncryption,
"password": service.wirelessConfigPassword
}
else:
wirelessConfig = None
if service.lteConfigPLMNId:
lteConfig = {
"plmnId": service.lteConfigPLMNId,
"cellReserved": service.lteConfigCellReserved,
"mmeAddress": service.lteConfigMMEAddress,
"mmePort": service.lteConfigMMEPort
}
else:
lteConfig = None
response_data = {
"id": service.id,
"serviceType": "SWAM_SERVICE",
"selectedRoot": 0,
"vlanId": {
"id": vlan.id,
"vlanId": vlan.tag
},
"selectedVifs": [{"id": x} for x in eval(service.selectedVifs)],
"wirelessConfig": wirelessConfig,
"lteConfig": lteConfig
}
return response_data
def _dictChunk(chunk):
services = session.query(Service).filter(
Service.id.in_(eval(chunk.serviceList))).all()
phys = session.query(Phy).filter(Phy.id.in_(eval(chunk.phyList))).all()
response_data = {
"id": chunk.id,
"name": chunk.name,
"assignedQuota": 0,
"serviceList": [_dictService(service) for service in services],
"physicalInterfaceList": [_dictPhy(phy) for phy in phys],
"linkList": []
}
return response_data
def _dictPhy(phy):
vifs = session.query(Vif).filter(
Vif.id.in_(eval(phy.virtualInterfaceList))).all()
if phy.config:
config = eval(phy.config)
else:
config = phy.config
response_data = {
"id": phy.id,
"name": phy.name,
"type": phy.type,
"virtualInterfaceList": [_dictVif(vif) for vif in vifs],
"config": config
}
return response_data
def _dictVif(vif):
response_data = {
"id": vif.id,
"name": vif.name,
"toRootVlan": 0,
"toAccessVlan": 0,
"toAccessPort": 0,
"toRootPort": 0,
"openFlowPortList": []
}
return response_data
# Create database session
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Initialize controller list
controllers = []
# controllers = {}
# formatter for the returned errors
API_RESPONSE = {
"OK": {
"content": '',
"code": 200
},
"CREATED": {
"content": '',
"code": 201
},
"CONTROLLER": {
"content": 'Controller Error',
"code": 503
},
"NOTFOUND": {
"content": 'Not Found',
"code": 404
},
"DB_INTEGRITY": {
"content": 'DB Integrity',
"code": 401
},
"VERIFICATION_ERROR": {
"content": 'Verification Error',
"code": 401
}
}
def errorResponder(error, message):
# TODO: implement timestamp
dt = datetime.today()
return json.dumps({
"timestamp": dt.isoformat(sep='T'),
"status": API_RESPONSE[error]["code"],
"error": API_RESPONSE[error]["content"],
"message": message,
"path": request.path
}), API_RESPONSE[error]["code"]
NORTHBOUND = "NORTHBOUND"
SOUTHBOUND = "SOUTHBOUND"
INTERNAL = "INTERNAL"
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
REQRESP = "REQ/RESP"
ROLLBACK = "ROLLBACK"
# Load controllers info from config.py and register topologies
# Look for first phy_id free in database
db_id_phy_id_list = session.query(Phy.id, Phy.phy_id_controller).all()
# db_id_list = [r for (r, a) in db_id_phy_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_phy_id = 1
# else:
# new_phy_id = db_id_list[len(db_id_list)-1]+1
# # Look for first box_id free in database
db_id_box_id_list = session.query(Box.id, Box.box_id_controller).all()
# db_id_list = [r for (r, a) in db_id_box_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_box_id = 1
# else:
# new_box_id = db_id_list[len(db_id_list)-1]+1
new_box_id = str(uuid.uuid4())
# *******************************
# Initialize proxy runtime status
# *******************************
#
# INITIAL TOPOLOGY RECOVERY (Boxes, Phys):
# =========================
# -RUCKUS type controller initial topology recovered from config.py
# -I2CAT type controller initial topology recovered from live
# SOUTHBOUND REQUEST to controller
#
# CURRENT STATE (Chunks, Services, VirtualInterfaces):
# ==============
# -RUCKUS type controller current state recovered from database and
# controllers runtime status
# -I2CAT type controller current state kept on controller
#
for item in CONTROLLERS:
if item['type'] == 'ruckus':
# Recover the list of chunks from the database
db_chunks = session.query(Chunk).all()
chunks = []
for db_chunk in db_chunks:
if eval(db_chunk.controllers_chunk)[len(controllers)]:
chunk = _dictChunk(db_chunk)
phys_to_pop = []
services_to_pop = []
for service in chunk["serviceList"]:
db_service = session.query(Service).filter(
Service.id == service["id"]).one()
if len(controllers) in \
eval(db_service.controllers_services).keys():
service["id"] = eval(db_service.controllers_services)[
len(controllers)]
else:
services_to_pop.append(service)
[chunk["serviceList"].remove(service)
for service in services_to_pop]
for phy in chunk["physicalInterfaceList"]:
try:
db_phy = session.query(Phy).filter(
Phy.id == phy["id"],
Phy.controller_id == len(controllers)).one()
phy = db_phy.phy_id_controller
except NoResultFound:
phys_to_pop.append(phy)
[chunk["physicalInterfaceList"].remove(
phy) for phy in phys_to_pop]
chunk["id"] = eval(db_chunk.controllers_chunk)[
len(controllers)]
chunks.append(chunk)
phy_id_mapping = RUCKUS_ID_MAPPING
controller = RuckusWiFi(
controller_id=item['id'],
ip=item['ip'],
port=item['port'],
url=item['url'],
topology=item['topology'],
chunks=chunks,
phy_id_mapping=phy_id_mapping,
username=item['username'],
password=item['password']
)
controllers.append(controller)
# controllers[controller.controller_id] = controller
elif item['type'] == 'i2cat':
controller = I2catController(
controller_id=item['id'],
ip=item['ip'],
port=item['port'],
url=item['url']
)
controllers.append(controller)
# controllers[controller.controller_id] = controller
for box in controller.getChunketeTopology()[0]["boxes"]:
if box['id'] not in [r for (a, r) in db_id_box_id_list]:
try:
# initial_topology["boxes"].append(box)
new_box = Box(
name=box["name"],
location=json.dumps(box["location"]),
controller_id=item['id'],
box_id_controller=box['id'],
phys=json.dumps(box["phys"]),
box_json=json.dumps(box))
session.add(new_box)
# count_phys = 0
for phy in box["phys"]:
if phy['id'] not in [r for (a, r) in db_id_phy_id_list]:
new_phy = Phy(
name=phy["name"], type=phy["type"],
controller_id=item['id'],
phy_id_controller=phy['id'],
config=str(phy["config"]),
virtualInterfaceList=json.dumps([]),
phy_json=json.dumps(phy))
session.add(new_phy)
# count_phys += 1
session.commit()
# new_phy_id += count_phys
# new_box_id += 1
except IntegrityError as ex:
session.rollback()
session.close()
def root_dir():
return os.path.abspath(os.path.dirname(__file__))
def get_file(filename):
try:
src = os.path.join(root_dir(), filename)
# Figure out how flask returns static files
# Tried:
# - render_template
# - send_file
# This should not be so non-obvious
return open(src).read()
except IOError as exc:
logger.error("Impossible to read file", exc_info=True)
return str(exc)
@app.route('/')
def root_page():
# return render_template('proxy.html')
return API_RESPONSE["OK"]["content"], API_RESPONSE["OK"]["code"]
@app.after_request
def flaskResponse(response):
body = ""
if response.get_data():
response.headers["Content-Type"] = "application/json;charset=UTF-8"
body = json.loads(response.get_data())
log_content = " '{}' {} :code:{}:body:{}".format(
request.method, request.path, response.status_code, body)
logger.info(log_base.format(NORTHBOUND, RESPONSE, log_content))
return response
@app.before_request
def before():
# todo with request
# e.g. print request.headers
pass
# Topology API implementation
@app.route('/chunkete/topology', methods=['GET'])
def getChunketeTopology():
resp = {
"boxes": [],
"links": []
}
log_content = ""
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
for index_controller in range(len(controllers)):
try:
boxes = session.query(Box).filter(
Box.controller_id == index_controller).all()
(controller_resp,
code) = controllers[index_controller].getChunketeTopology()
log_content = "controller:{}:response:{}/{}".format(
index_controller, code, controller_resp)
logger.info(log_base.format(SOUTHBOUND, REQRESP, log_content))
if code == API_RESPONSE["OK"]["code"]:
for box in controller_resp["boxes"]:
for index_phy in range(len(box["phys"])):
phy = session.query(Phy).filter(
Phy.controller_id == index_controller).filter(
Phy.phy_id_controller ==
box["phys"][index_phy]["id"]
).one()
box["phys"][index_phy]["id"] = phy.id
for db_box in boxes:
if db_box.box_id_controller == box["id"]:
box["id"] = db_box.id
break
resp["boxes"].append(box)
else:
return controller_resp, code
except NoResultFound:
return json.dumps({
"timestamp": "2019-09-10T14:18:24.866+0000",
"status": API_RESPONSE["NOTFOUND"]["code"],
"error": API_RESPONSE["NOTFOUND"]["content"],
"message": "No Result Found for the request",
"path": request.path
}), API_RESPONSE["NOTFOUND"]["code"]
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
response = jsonify(resp)
return response, API_RESPONSE["OK"]["code"]
@app.route(
'/chunkete/topology/physicalInterface/<phy_id>/LTEConfig',
methods=['PUT'])
def putInterfaceLTEConfig(phy_id):
# {
# "cellIdentity": 256,
# "earfcndl": 41690,
# "phyCellId": 5,
# "prachrootseqindex": 100,
# "primaryMMEAddress": "192.168.100.25",
# "primaryMMEPort": 333,
# "primaryPlmnId": "00101",
# "refSignalPower": -40,
# "reservedForOperatorUse": "not-reserved",
# "trackingAreaCode": 67
# }
try:
content = request.data
content_dict = json.loads(content)
log_content = "phy_id:{}:content:{}".format(phy_id, content_dict)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
if 0 > content_dict["cellIdentity"] > 256:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content_dict["earfcndl"] not in [i for j in (
range(2750, 3449),
range(41690, 43489),
range(37750, 38249)) for i in j]:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if 0 > content_dict["phyCellId"] > 500:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if 0 > content_dict["prachrootseqindex"] > 1023:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if "primaryMMEAddress" not in content_dict.keys():
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if "primaryMMEPort" not in content_dict.keys():
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if "primaryPlmnId" not in content_dict.keys():
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if -40 > content_dict["refSignalPower"] > -10:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content_dict["reservedForOperatorUse"] != "not-reserved":
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if 0 > content_dict["trackingAreaCode"] > 65535:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content:
phy = session.query(Phy).filter(Phy.id == phy_id).one()
response, code = controllers[phy.controller_id].\
putInterfaceLTEConfig(
phy.phy_id_controller, content)
log_content = "controller:{}:phy_id_controller:{}:phy_id:{}"
log_content += ":content:{}:response:{}/{}".\
format(
phy.controller_id, phy.phy_id_controller,
phy_id, content, code, response)
logger.info(log_base.format(SOUTHBOUND, REQRESP, log_content))
return jsonify(response), code
else:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except KeyError:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route(
'/chunkete/topology/physicalInterface/<phy_id>/type/<phy_type>',
methods=['PUT'])
def putInterfaceType(phy_id, phy_type):
try:
log_content = "phy_id:{}:phy_type:{}".format(phy_id, phy_type)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
phy = session.query(Phy).filter(Phy.id == phy_id).one()
response, code = controllers[phy.controller_id].putInterfaceType(
phy.phy_id_controller, phy_type)
log_content = "controller:{}:phy_id_controller:{}:phy_id:{}"
log_content += ":phy_type:{}:response:{}/{}".\
format(
phy.controller_id, phy.phyid_controller,
phy_id, phy_type, code, response)
logger.info(
log_base.format(SOUTHBOUND, REQRESP, log_content))
return response, code
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route(
'/chunkete/topology/physicalInterface/<phy_id>/wiredConfig',
methods=['PUT'])
def putInterfaceWiredConfig(phy_id):
try:
content = request.data
log_content = "phy_id:{}:content:{}".format(
phy_id, json.loads(content))
logger.info(
log_base.format(NORTHBOUND, REQUEST, log_content))
if content:
phy = session.query(Phy).filter(Phy.id == phy_id).one()
response, code = controllers[phy.controller_id].\
putInterfaceWiredConfig(
phy.phy_id_controller, content)
log_content = "controller:{}:phy_id_controller:{}:phy_id:{}"
log_content += ":response:{}/{}".\
format(
phy.controller_id, phy.phy_id_controller,
phy_id, code, response)
logger.info(
log_base.format(SOUTHBOUND, REQRESP, log_content))
return response, code
except KeyError:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route(
'/chunkete/topology/physicalInterface/<phy_id>/wirelessConfig',
methods=['PUT'])
def putInterfaceWirelessConfig(phy_id):
# Verify content
# {
# "channelBandwidth": 20,
# (Se aceptan 20, 40 y 80)
# "channelNumber": 36,
# (Se acepta cualquier canal de la banda de 2.4 y/o de la banda de 5GHz;
# según el nodo puede o no sopotar DFS así que no está restringido
# a canales "normales")
# "txPower": 2000
# (Valor en mBm; se acepta desde 0 hasta 3500 aunque lo
# normal suelen ser 2300)
# }
try:
content = request.data
log_content = "phy_id:{}:content:{}".format(
phy_id, json.loads(content))
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
content_dict = json.loads(content)
if content_dict["channelBandwidth"] not in [20, 40, 80]:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content_dict["channelNumber"] not in [i for j in (
range(1, 11),
range(36, 68, 4),
range(100, 140, 4)) for i in j]:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if 0 >= content_dict["txPower"] > 3500:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if content:
phy = session.query(Phy).filter(Phy.id == phy_id).one()
response, code = controllers[phy.controller_id].\
putInterfaceWirelessConfig(phy.phy_id_controller, content)
log_content = "controller:{}:phy_id_controller:{}:phy_id:{}"
log_content += ":content:{}:response:{}/{}".\
format(
phy.controller_id, phy.phy_id_controller,
phy_id, content, code, response)
logger.info(
log_base.format(SOUTHBOUND, REQRESP, log_content))
return jsonify(response), code
else:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
return (API_RESPONSE["CREATED"]["content"],
API_RESPONSE["CREATED"]["code"])
except KeyError:
logger.error("Malformed request")
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
# Chunk API implementation
@app.route('/chunkete/chunk', methods=['GET'])
def getAllChunks():
log_content = ""
logger.info(
log_base.format(NORTHBOUND, REQUEST, log_content))
# chunks = {}
# chunk_id_list =[]
response = []
try:
db_chunks = session.query(Chunk).all()
for db_chunk in db_chunks:
response.append(_dictChunk(db_chunk))
return jsonify(response), API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route('/chunkete/chunk', methods=['POST'])
def registerNewChunk():
try:
content = request.data
log_content = "content:{}".format(json.loads(content))
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
if content:
chunk_dict = json.loads(content)
controllers_phys = {}
controllers_content = {}
# Split the phys included in the chunk per controller
for phy in chunk_dict["physicalInterfaceList"]:
phy = session.query(Phy).filter(Phy.id == phy["id"]).one()
phy_dict = json.loads(phy.phy_json)
phy_id_dict = {"id": phy_dict["id"]}
if phy.controller_id in controllers_phys.keys():
controllers_phys[phy.controller_id].append(
phy.phy_id_controller)
controllers_content[
phy.controller_id][
"physicalInterfaceList"].append(phy_id_dict)
else:
controllers_phys[phy.controller_id] = [phy.phy_id_controller]
controllers_content[phy.controller_id] = {
"name": chunk_dict["name"],
"physicalInterfaceList": [phy_id_dict],
}
if "assignedQuota" in chunk_dict.keys():
controllers_content[phy.controller_id]["assignedQuota"] = \
chunk_dict["assignedQuota"]
else:
chunk_dict["assignedQuota"] = 0
controllers_content[phy.controller_id]["assignedQuota"] = 0
if "linkList" in chunk_dict.keys():
controllers_content[phy.controller_id]["linkList"] = \
chunk_dict["linkList"]
else:
chunk_dict["linkList"] = []
controllers_content[phy.controller_id]["linkList"] = []
if "serviceList" in chunk_dict.keys():
controllers_content[phy.controller_id]["serviceList"] = \
chunk_dict["serviceList"]
else:
chunk_dict["serviceList"] = []
controllers_content[phy.controller_id]["serviceList"] = []
# # Create a new chunk and add to database
# # Get the next free ID in db
# db_id_list = session.query(Chunk.id).all()
# db_id_list = [r for (r, ) in db_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_chunk_id = 1
# else:
# new_chunk_id = db_id_list[len(db_id_list)-1]+1
# Add the chunk in the database
chunk = Chunk(
name=chunk_dict["name"],
serviceList=json.dumps([]),
assignedQuota=chunk_dict["assignedQuota"],
controllers_phys=str(controllers_phys),
phyList=str(
[phy["id"] for phy in chunk_dict["physicalInterfaceList"]]
),
linkList=json.dumps([]), chunk_json=json.dumps(chunk_dict))
session.add(chunk)
# Register the chunk on each of the controllers
controllers_chunk_dict = {}
for controller_id in controllers_content.keys():
response, code = controllers[controller_id].registerNewChunk(
json.dumps(controllers_content[controller_id]))
log_content = "controller:{}:content:{}"
log_content += ":response:{}/{}".\
format(
controller_id,
json.dumps(
controllers_content[controller_id]),
code, response)
logger.info(log_base.format(SOUTHBOUND, REQRESP, log_content))
if code == API_RESPONSE["CREATED"]["code"]:
controllers_chunk_dict[controller_id] = response["id"]
else:
return errorResponder(
"CONTROLLER", "Managed Controller returned an error")
# Update Service in Database
chunk_dict["id"] = chunk.id
chunk.chunk_json = json.dumps(chunk_dict)
chunk.controllers_chunk = str(controllers_chunk_dict)
session.commit()
return json.dumps(
{'id': chunk.id}), API_RESPONSE["CREATED"]["code"]
except KeyError:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route('/chunkete/chunk/<chunk_id>', methods=['GET'])
def getChunkById(chunk_id):
log_content = "chunk_id:{}".format(chunk_id)
logger.info(
log_base.format(NORTHBOUND, REQUEST, log_content))
try:
chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
response_data = _dictChunk(chunk)
return jsonify(
response_data), API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder(
"NOTFOUND", "Object not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route('/chunkete/chunk/<chunk_id>', methods=['DELETE'])
def removeExistingChunk(chunk_id):
log_content = "chunk_id:{}".format(chunk_id)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
try:
chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
session.close()
controllers_phys = eval(chunk.controllers_phys)
serviceList = eval(chunk.serviceList)
# Remove the Services from the chunk
while serviceList:
removeExistingSWAMService(
chunk_id, serviceList[0], interface=INTERNAL)
serviceList.pop(0)
for controller_id in controllers_phys.keys():
response, code = controllers[controller_id].removeExistingChunk(
eval(chunk.controllers_chunk)[controller_id])
log_content = "controller:{}:chunk_id:{}"
log_content += ":response:{}/{}".\
format(controller_id, chunk_id, code, response)
logger.info(log_base.format(SOUTHBOUND, REQRESP, log_content))
# Remove the chunk from the database
session.delete(chunk)
session.commit()
return API_RESPONSE["OK"]["content"], API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder("NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder("DB_INTEGRITY", "Database integrity error")
finally:
session.close()
# Service API implementation
@app.route('/chunkete/chunk/<chunk_id>/service/SWAM', methods=['GET'])
def getAllSWAMServices(chunk_id):
log_content = "chunk_id:{}".format(chunk_id)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
response = []
try:
db_chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
for service_id in eval(db_chunk.serviceList):
db_service = session.query(Service).filter(
Service.id == service_id).one()
response.append(_dictService(db_service))
return jsonify(response), API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder("NOTFOUND", "Item not found")
finally:
session.close()
@app.route('/chunkete/chunk/<chunk_id>/service/SWAM', methods=['POST'])
def registerNewSWAMService(chunk_id):
# VERIFY CONTENT
# {
# "lteConfig": { (Más info en los mails que te he pasado de accelleran)
# "cellReserved": "not-reserved",
# "mmeAddress": "192.168.50.2",
# "mmePort": 333,
# "plmnId": "00101"
# },
# "selectedPhys": [
# (Sólo se aceptan interfaces de tipo SUB6_ACCESS,
# LTE_PRIMARY_PLMN y WIRED_TUNNEL)
# 14, 23
# ],
# "vlanId": 201, (1-4095)
# "wirelessConfig": {
# "encryption": "WPA", (NONE, WPA, WPA2, WEP aceptados)
# "password": "<PASSWORD>",
# (No se aceptan espacios. Debe contener un mínimo de
# 8 caracteres o estar vacia en caso de encryption == "NONE")
# "ssid": "Test" (No se aceptan espacios)
# }
# }
PHY_TYPES = ["SUB6_ACCESS", "LTE_PRIMARY_PLMN", "WIRED_TUNNEL"]
ENCRYPTION_TYPES = ["NONE", "WPA", "WPA2", "WEP"]
# Action record for rollback in case something fails
# {
# <controller>:{
# "chunk_id": <service_id>
# "service_id": <service_id>
# }
# }
rollback_flag = True
rollback = {}
try:
content = request.data
log_content = "chunk_id:{}:content:{}".format(
chunk_id, json.loads(content))
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
if content:
service_dict = json.loads(content)
# if "lteConfig" in service_dict.keys():
if "lteConfig" in service_dict.keys():
if service_dict["lteConfig"]:
pass
# if service_dict["lteConfig"]["encryption"] not in \
# ENCRYPTION_TYPES:
# return errorResponder(
# "VERIFICATION_ERROR", "Malformed request")
# elif len(service_dict["lteConfig"]["password"]) < 8:
# if service_dict[
# "wirelessConfig"]["encryption"] != "NONE":
# return errorResponder(
# "VERIFICATION_ERROR", "Malformed request")
# elif ' ' in service_dict["lteConfig"]["ssid"]:
# return errorResponder(
# "VERIFICATION_ERROR", "Malformed request")
else:
service_dict["lteConfig"] = {
"cellReserved": None,
"mmeAddress": None,
"mmePort": None,
"plmnId": None
}
if "wirelessConfig" in service_dict.keys():
if service_dict["wirelessConfig"]:
if service_dict["wirelessConfig"]["encryption"] not in \
ENCRYPTION_TYPES:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
elif len(service_dict["wirelessConfig"]["password"]) < 8:
if service_dict[
"wirelessConfig"]["encryption"] != "NONE":
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
elif ' ' in service_dict["wirelessConfig"]["ssid"]:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
else:
service_dict["wirelessConfig"] = {
"encryption": None,
"password": None,
"ssid": None
}
if 1 > service_dict["vlanId"] > 4095:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
controllers_phys = {}
controllers_content = {}
controllers_xref = {}
selected_vifs = []
db_vifs = []
chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
for phy_id in service_dict["selectedPhys"]:
if phy_id not in eval(chunk.phyList):
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
phy = session.query(Phy).filter(Phy.id == phy_id).one()
if phy.type not in PHY_TYPES:
return errorResponder(
"VERIFICATION_ERROR", "Malformed request")
if phy.controller_id in controllers_phys.keys():
controllers_phys[phy.controller_id].append(phy.id)
controllers_xref[phy.controller_id].append(
phy.phy_id_controller)
controllers_content[phy.controller_id]["selectedPhys"].\
append(phy.phy_id_controller)
else:
controllers_phys[phy.controller_id] = [phy.id]
controllers_xref[phy.controller_id] = [
phy.phy_id_controller]
controllers_content[phy.controller_id] = {
"selectedPhys": [phy.phy_id_controller],
"vlanId": service_dict["vlanId"]
}
if "lteConfig" in service_dict.keys():
controllers_content[phy.controller_id]["lteConfig"] = \
service_dict["lteConfig"]
if "wirelessConfig" in service_dict.keys():
controllers_content[phy.controller_id][
"wirelessConfig"] = service_dict["wirelessConfig"]
# Create a new vif and add to database
# Get the next free ID in db
# db_id_list = session.query(Vif.id).all()
# db_id_list = [r for (r, ) in db_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_vif_id = 1
# else:
# new_vif_id = db_id_list[len(db_id_list)-1]+1
# Create a new service and add to database
# Get the next free ID in db
# db_id_list = session.query(Service.id).all()
# db_id_list = [r for (r, ) in db_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_service_id = 1
# else:
# new_service_id = db_id_list[len(db_id_list)-1]+1
# TODO: Name the new vif. At the moment, it just takes the
# phy name followed by the new_vif_id
new_vif_dict = {
'id': str(uuid.uuid4()),
'name': "",
"toRootVlan": 0,
"toAccessVlan": 0,
"toAccessPort": 0,
"toRootPort": 0,
"openFlowPortList": []
}
new_vif_dict['name'] = "{}_{}".\
format(phy.name, new_vif_dict['id'])
vif = Vif(
id=new_vif_dict['id'],
service_id="",
phy_id=phy.id,
controller_id=phy.controller_id,
vif_json=json.dumps(new_vif_dict))
session.add(vif)
db_vifs.append(vif)
selected_vifs.append(new_vif_dict['id'])
phy = session.query(Phy).filter(Phy.id == phy.id).one()
virtualInterfaceList = json.loads(phy.virtualInterfaceList)
virtualInterfaceList.append(vif.id)
phy.virtualInterfaceList = json.dumps(virtualInterfaceList)
phy_dict = json.loads(phy.phy_json)
if "virtualInterfaceList" in phy_dict:
phy_dict["virtualInterfaceList"].append(new_vif_dict)
else:
phy_dict["virtualInterfaceList"] = [new_vif_dict]
phy.phy_json = json.dumps(phy_dict)
# Add the service in the database
service = Service(
controllers_services=str({}),
controllers_phys=str(controllers_xref),
lteConfigCellReserved=service_dict[
"lteConfig"]["cellReserved"],
lteConfigMMEAddress=service_dict["lteConfig"]["mmeAddress"],
lteConfigMMEPort=service_dict["lteConfig"]["mmePort"],
lteConfigPLMNId=service_dict["lteConfig"]["plmnId"],
selectedPhys=str(service_dict["selectedPhys"]),
selectedVifs=str(selected_vifs),
wirelessConfigEncryption=service_dict[
"wirelessConfig"]["encryption"],
wirelessConfigPassword=service_dict[
"wirelessConfig"]["password"],
wirelessConfigSSID=service_dict["wirelessConfig"]["ssid"],
vlanId=service_dict["vlanId"],
service_json=json.dumps(service_dict)
)
vlan = Vlan(
tag=service_dict["vlanId"],
service_id="",
controllers_vlans_id="")
session.add(vlan)
session.add(service)
session.flush()
# Update Chunk in database
# update serviceList
serviceList = json.loads(chunk.serviceList)
serviceList.append(service.id)
chunk.serviceList = json.dumps(serviceList)
# update chunk json
service_dict["id"] = service.id
vlan.service_id = service.id
for db_vif in db_vifs:
db_vif.service_id = service.id
updated_chunk = json.loads(chunk.chunk_json)
updated_chunk["serviceList"].append(service_dict)
chunk.chunk_json = json.dumps(updated_chunk)
service.service_json = json.dumps(service_dict)
session.flush()
# Register the service on each controller
controllers_services_dict = {}
for controller_id in controllers_phys.keys():
data, code = controllers[controller_id].\
registerNewSWAMService(
eval(chunk.controllers_chunk)[controller_id],
json.dumps(controllers_content[controller_id]))
log_content = "controller:{}:chunk_id:{}:content:{}"
log_content += ":response:{}/{}".\
format(
controller_id, chunk_id,
json.dumps(controllers_content[controller_id]),
code, data)
logger.info(log_base.format(
SOUTHBOUND, REQRESP, log_content))
if code == API_RESPONSE["CREATED"]["code"]:
rollback[controller_id] = {
'chunk_id': eval(
chunk.controllers_chunk)[controller_id],
'service_id': data["id"]
}
controllers_services_dict[controller_id] = data["id"]
else:
return errorResponder(
"CONTROLLER",
"Managed Controller returned an error")
# Update and add vlan object
# vlan.service_id = service.id
# vlan.controllers_vlans_id = controllers_services_dict['vlanId']
# Update Service in Database
service.controllers_services = str(controllers_services_dict)
session.commit()
rollback_flag = False
return json.dumps(
{'id': service.id}), API_RESPONSE["CREATED"]["code"]
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
if rollback_flag:
if rollback:
for controller_id in rollback.keys():
data, code = controllers[controller_id].\
removeExistingSWAMService(
rollback[controller_id]["chunk_id"],
rollback[controller_id]["service_id"])
log_content = "controller:{}:chunk_id:{}:service_id:{}"
log_content += ":response:{}/{}".\
format(
controller_id,
rollback[controller_id]["chunk_id"],
rollback[controller_id]["service_id"],
code, data)
logger.info(log_base.format(
SOUTHBOUND, ROLLBACK, log_content))
session.close()
@app.route(
'/chunkete/chunk/<chunk_id>/service/SWAM/<service_id>',
methods=['GET'])
def getSWAMServiceById(chunk_id, service_id):
log_content = "chunk_id:{}:service_id:{}".format(chunk_id, service_id)
logger.info(log_base.format(NORTHBOUND, REQUEST, log_content))
try:
service = session.query(Service).filter(Service.id == service_id).one()
response_data = _dictService(service)
return jsonify(response_data), API_RESPONSE["OK"]["code"]
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
@app.route(
'/chunkete/chunk/<chunk_id>/service/SWAM/<service_id>',
methods=['DELETE'])
def removeExistingSWAMService(chunk_id, service_id, interface=NORTHBOUND):
log_content = "chunk_id:{}:service_id:{}".format(chunk_id, service_id)
logger.info(log_base.format(interface, REQUEST, log_content))
controllers_phys = {}
try:
# Update Chunk in database
chunk = session.query(Chunk).filter(Chunk.id == chunk_id).one()
vifs = session.query(Vif).filter(
Vif.service_id == service_id).all()
for vif in vifs:
phy = session.query(Phy).filter(Phy.id == vif.phy_id).one()
if phy.controller_id in controllers_phys.keys():
controllers_phys[phy.controller_id].append(phy.id)
else:
controllers_phys[phy.controller_id] = [phy.id]
virtualInterfaceList = eval(phy.virtualInterfaceList)
virtualInterfaceList.remove(vif.id)
phy.virtualInterfaceList = json.dumps(virtualInterfaceList)
session.delete(vif)
chunk_dict = json.loads(chunk.chunk_json)
serviceList = json.loads(chunk.serviceList)
for index in range(len(serviceList)):
if serviceList[index] == service_id:
service = session.query(Service).filter(
Service.id == service_id).one()
controllers_services_dict = eval(service.controllers_services)
for controller_id in controllers_phys.keys():
response, code = controllers[controller_id].\
removeExistingSWAMService(
eval(chunk.controllers_chunk)[controller_id],
controllers_services_dict[controller_id])
log_content = "controller:{}:chunk_id:{}:service_id:{}"
log_content += ":service_id_controller:{}:response:{}/{}".\
format(
controller_id, chunk_id,
service_id,
controllers_services_dict[controller_id],
code, response)
logger.info(log_base.format(
SOUTHBOUND, REQRESP, log_content))
chunk_dict["serviceList"].pop(index)
serviceList.pop(serviceList.index(service_id))
chunk.serviceList = json.dumps(serviceList)
chunk.chunk_json = json.dumps(chunk_dict)
vlan = session.query(Vlan).filter(
Vlan.service_id == service_id).one()
session.delete(vlan)
session.delete(service)
session.commit()
return (API_RESPONSE["OK"]["content"],
API_RESPONSE["OK"]["code"])
return errorResponder(
"NOTFOUND", "Item not found")
except NoResultFound:
return errorResponder(
"NOTFOUND", "Item not found")
except IntegrityError:
return errorResponder(
"DB_INTEGRITY", "Database integrity error")
finally:
session.close()
app.wsgi_app = ProxyFix(app.wsgi_app)
if __name__ == '__main__':
"""main function
Default host: 0.0.0.0
Default port: 8080
Default debug: False
"""
try:
app.run(
host='0.0.0.0',
port=8008,
debug=False)
except Exception:
logging.critical(
'server: CRASHED: Got exception on main handler')
raise
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.