metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jerry73204/ms-agv-car",
"score": 2
}
|
#### File: ms-agv-car/tf_ncsdk_source/train_tensorflow_model.py
```python
import os
import glob
import argparse
import logging
import cv2
import numpy as np
import tensorflow as tf
X_FEATURE_KEY = 'x'
def load_data(data_dir, input_height, input_width, input_channel, n_classes):
# 搜尋所有圖檔
match_left = os.path.join(data_dir, 'left', '*.jpg')
paths_left = glob.glob(match_left)
match_right = os.path.join(data_dir, 'right', '*.jpg')
paths_right = glob.glob(match_right)
match_stop = os.path.join(data_dir, 'stop', '*.jpg')
paths_stop = glob.glob(match_stop)
match_other = os.path.join(data_dir, 'other', '*.jpg')
paths_other = glob.glob(match_other)
match_test = os.path.join(data_dir, 'test', '*.jpg')
paths_test = glob.glob(match_test)
n_train = len(paths_left) + len(paths_right) + len(paths_stop) + len(paths_other)
n_test = len(paths_test)
# 初始化資料集矩陣
train_x = np.zeros(
shape=(n_train, input_height, input_width, input_channel),
dtype='float32',
)
train_y = np.zeros(
shape=(n_train,),
dtype='int32',
)
test_x = np.zeros(
shape=(n_test, input_height, input_width, input_channel),
dtype='float32',
)
# 讀取圖片到資料集
paths_train = paths_left + paths_right + paths_stop + paths_other
for ind, path in enumerate(paths_train):
image = cv2.imread(path)
resized_image = cv2.resize(image, (input_width, input_height))
train_x[ind] = resized_image
for ind, path in enumerate(paths_test):
image = cv2.imread(path)
resized_image = cv2.resize(image, (input_width, input_height))
test_x[ind] = resized_image
# 設定訓練集的標記
n_left = len(paths_left)
n_right = len(paths_right)
n_stop = len(paths_stop)
n_other = len(paths_other)
begin_ind = 0
end_ind = n_left
train_y[begin_ind:end_ind] = 0
begin_ind = n_left
end_ind = n_left + n_right
train_y[begin_ind:end_ind] = 1
begin_ind = n_left + n_right
end_ind = n_left + n_right + n_stop
train_y[begin_ind:end_ind] = 2
begin_ind = n_left + n_right + n_stop
end_ind = n_left + n_right + n_stop + n_other
train_y[begin_ind:end_ind] = 3
# 正規化數值到 0~1 之間
train_x = train_x / 255.0
test_x = test_x / 255.0
return (paths_train, train_x, train_y), (paths_test, test_x,)
def custom_model_fn(features, labels, mode, params):
training = bool(mode == tf.estimator.ModeKeys.TRAIN)
# 區塊函數,由多層 conv2d、batch_normalization 構成
def conv_block(x, filters):
# Movidius compiler does not support FusedBatchNorm operator.
# To avoid this error, pass fused=False to batch_normalization()
# x = tf.layers.batch_normalization(
# x,
# training=training,
# fused=False,
# )
x = tf.layers.conv2d(
x,
filters=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
activation=tf.nn.relu,
)
# x = tf.layers.batch_normalization(
# x,
# training=training,
# fused=False,
# )
shortcut = x
x = tf.layers.conv2d(
x,
filters=filters,
kernel_size=(3, 3),
padding='same',
activation=tf.nn.relu,
)
x = tf.add(x, shortcut)
x = tf.layers.max_pooling2d(
x,
pool_size=(2, 2),
strides=(1, 1),
padding='same',
)
return x
# 輸入層
input_image = features[X_FEATURE_KEY]
# 重複輸入區塊,每次用不同數量的 filter
x = conv_block(input_image, 32)
x = conv_block(x, 64)
x = conv_block(x, 128)
x = conv_block(x, 256)
x = conv_block(x, 512)
# 最終區塊
x = tf.layers.flatten(x)
# x = tf.layers.batch_normalization(
# x,
# training=training,
# fused=False,
# )
x = tf.layers.dense(
x,
units=512,
activation=tf.nn.relu,
)
x = tf.layers.dense(
x,
units=512,
activation=tf.nn.relu,
)
# 模型輸出
logits = tf.layers.dense(
x,
units=params['n_classes'],
)
probabilities = tf.nn.softmax(
logits,
name=params['output_name'],
)
predicted_classes = tf.argmax(logits, axis=1)
# 建立預測模型(prediction)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes[:, tf.newaxis],
'probabilities': probabilities,
}
# export_outputs = {
# params['output_name']: tf.estimator.export.PredictOutput(probabilities),
# }
return tf.estimator.EstimatorSpec(
mode,
predictions=predictions,
# export_outputs=export_outputs,
)
# 計算損失值及精準度
loss = tf.losses.sparse_softmax_cross_entropy(
labels=labels,
logits=logits,
)
accuracy = tf.metrics.accuracy(
labels=labels,
predictions=predicted_classes,
name='accurary_op',
)
tf.summary.scalar('accuracy', accuracy[1]) # 紀錄精準度
# 回傳測試模型(evaluation)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode,
loss=loss,
eval_metric_ops={ 'accuracy': accuracy },
)
# 回傳訓練模型(training)
assert mode == tf.estimator.ModeKeys.TRAIN
optimizer = tf.train.AdamOptimizer()
# Due to TensorFlow bug https://stackoverflow.com/questions/43234667/tf-layers-batch-normalization-large-test-error
# batch_normalization() may not work properly. Here is workaround.
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_op = optimizer.minimize(
loss,
global_step=tf.train.get_global_step(),
)
return tf.estimator.EstimatorSpec(
mode,
loss=loss,
train_op=train_op,
)
def main():
# 設定紀錄層級,以顯示 TensorFlow 更多資訊
logging.getLogger().setLevel(logging.INFO)
# 定義程式參數
arg_parser = argparse.ArgumentParser(description='使用 TensorFlow 建立模型範例')
arg_parser.add_argument(
'--model-base-dir',
required=True,
help='模型輸出目錄',
)
arg_parser.add_argument(
'--data-dir',
required=True,
help='資料目錄',
)
arg_parser.add_argument(
'--epochs',
type=int,
default=64,
help='訓練回合數',
)
arg_parser.add_argument(
'--output-file',
default='-',
help='預測輸出檔案',
)
arg_parser.add_argument(
'--input-width',
type=int,
default=48,
help='模型輸入寬度',
)
arg_parser.add_argument(
'--input-height',
type=int,
default=48,
help='模型輸入高度',
)
arg_parser.add_argument(
'--batch-size',
type=int,
default=64,
help='批次大小',
)
arg_parser.add_argument(
'--input-tensor-name',
default='input_image',
help='輸入層名稱',
)
arg_parser.add_argument(
'--output-tensor-name',
default='probabilities',
help='輸出層名稱',
)
args = arg_parser.parse_args()
# 常用常數
input_channel = 3 # 紅綠藍三個通道
n_classes = 4 # 左傳、右轉、停止、其他四個類別
# 載入原始資料
# 習慣上 x 代表輸入資料,y 代表期望標記
(paths_train, train_x, train_y), (paths_test, test_x) = load_data(
args.data_dir,
args.input_height,
args.input_width,
input_channel,
n_classes,
)
assert len(set([len(paths_train), train_x.shape[0], train_y.shape[0]])) == 1
assert len(set([len(paths_test), test_x.shape[0]])) == 1
# 建立資料輸入函數,訓練集和測試集各一個
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={ X_FEATURE_KEY: train_x },
y=train_y,
num_epochs=args.epochs,
shuffle=True,
batch_size=args.batch_size,
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={ X_FEATURE_KEY: train_x },
y=train_y,
num_epochs=1,
shuffle=False,
batch_size=args.batch_size,
)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={ X_FEATURE_KEY: test_x },
y=None,
num_epochs=1,
shuffle=False,
batch_size=args.batch_size,
)
input_tensor = tf.placeholder(
'float',
shape=[1, args.input_height, args.input_width, input_channel],
name=args.input_tensor_name,
)
serving_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(
{ X_FEATURE_KEY: input_tensor },
default_batch_size=1,
)
# 建立模型
params = {
'n_classes': n_classes,
'output_name': args.output_tensor_name,
}
custom_classifier = tf.estimator.Estimator(
model_fn=custom_model_fn,
params=params,
)
# 訓練模型
custom_classifier.train(
input_fn=train_input_fn,
)
# 測試模型
eval_results = custom_classifier.evaluate(
input_fn=eval_input_fn,
)
print('損失值\t%f' % eval_results['loss'])
print('精準度\t%f' % eval_results['accuracy'])
# 執行預測
ind2name = [
'left', # 0
'right', # 1
'stop', # 2
'other', # 3
]
if test_x.shape[0] > 0:
predictions = custom_classifier.predict(
input_fn=test_input_fn,
)
if args.output_file == '-':
for path, pred_dict in zip(paths_test, predictions):
class_id = pred_dict['class_ids'][0]
class_name = ind2name[class_id]
probabilities = pred_dict['probabilities']
print('路徑\t%s' % path)
print('預測\t%s' % class_name)
print('機率\t%s' % probabilities)
print()
else:
with open(args.output_file, 'w') as file_out:
for path, pred_dict in zip(paths_test, predictions):
class_id = pred_dict['class_ids'][0]
class_name = ind2name[class_id]
probabilities = pred_dict['probabilities']
file_out.write('路徑\t%s\n' % path)
file_out.write('預測\t%s\n' % class_name)
file_out.write('機率\t%s\n' % probabilities)
file_out.write('\n')
# 生成預測用模型檔
export_dir = custom_classifier.export_savedmodel(
export_dir_base=args.model_base_dir,
serving_input_receiver_fn=serving_fn,
)
print('模型檔已輸出到', export_dir)
if __name__ == '__main__':
main()
```
#### File: ms-agv-car/utility_source/data_collect.py
```python
import os
import time
import argparse
import RPi.GPIO as GPIO
import cv2
# 設定腳位
PWM_PIN_left = 17
PWM_PIN_right = 18
IR_LEFT_PIN= 2
IR_MIDDLE_PIN= 3
IR_RIGHT_PIN= 4
DUTY_CYCLE = 65
IMAGE_QUEUE_LIMIT = 400
def main():
# 設定程式參數
arg_parser = argparse.ArgumentParser(description='軌跡車程式。')
arg_parser.add_argument('--data-dir', required=True)
# 解讀程式參數
args = arg_parser.parse_args()
# 開啓影片來源
video_dev = cv2.VideoCapture(0)
# 初始化 GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(PWM_PIN_left, GPIO.OUT)
GPIO.setup(PWM_PIN_right, GPIO.OUT)
pwm1 = GPIO.PWM(PWM_PIN_left, 500)
pwm2 = GPIO.PWM(PWM_PIN_right, 500)
pwm1.start(0)
pwm2.start(0)
GPIO.setup(IR_RIGHT_PIN, GPIO.IN) #GPIO 2 -> Left IR out
GPIO.setup(IR_MIDDLE_PIN, GPIO.IN) #GPIO 3 -> Right IR out
GPIO.setup(IR_LEFT_PIN, GPIO.IN) #GPIO 4 -> Right IR out
pwm1.ChangeDutyCycle(DUTY_CYCLE)
pwm2.ChangeDutyCycle(DUTY_CYCLE)
images = list()
def forward():
pwm1.ChangeDutyCycle(DUTY_CYCLE)
pwm2.ChangeDutyCycle(DUTY_CYCLE)
def turn_left():
pwm1.ChangeDutyCycle(DUTY_CYCLE)
pwm2.ChangeDutyCycle(0)
def turn_right():
pwm1.ChangeDutyCycle(0)
pwm2.ChangeDutyCycle(DUTY_CYCLE)
def stop():
pwm1.ChangeDutyCycle(0)
pwm2.ChangeDutyCycle(0)
def track_line():
# TODO verify
middle_val = GPIO.input(IR_MIDDLE_PIN)
left_val = GPIO.input(IR_LEFT_PIN)
right_val = GPIO.input(IR_RIGHT_PIN)
if middle_val:
if left_val and right_val: # 白白白
return 'stop'
elif left_val and not right_val: # 白白黑
return 'left'
elif not left_val and right_val: # 黑白白
return 'right'
else:
return 'forward' # 黑白黑
else:
if left_val and right_val: # 白黑白
return 'stall'
elif left_val and not right_val: # 白黑黑
return 'left'
elif not left_val and right_val: # 黑黑白
return 'right'
else: # 黑黑黑
return 'stall'
try:
while True:
# 根據光感應器讀值決定動作
# advice 是 'left', 'right', 'stop', 'other' 之一
advice = track_line()
print('advice', advice)
if advice == 'left':
turn_left()
elif advice == 'right':
turn_right()
elif advice == 'stop':
stop()
elif advice == 'forward':
forward()
elif advice == 'stall':
forward()
# 拍攝照片並儲存到序列
ret, image = video_dev.read()
images.append((os.path.join(args.data_dir, '%d-%s.jpg' % (int(time.time() * 1000), advice)), image))
print('queue size: %d' % (len(images) + 1))
# 若序列大小到達限制,停下車,並將序列的照片存入硬碟
if len(images) == IMAGE_QUEUE_LIMIT:
stop()
# 儲存影像
for path, image in images:
print('Write %s' % path)
cv2.imwrite(path, image)
del images
images = list()
except KeyboardInterrupt:
pass
# 終止馬達
pwm1.stop()
pwm2.stop()
# 將序列的照片存入硬碟
for path, image in images:
print('Write %s' % path)
cv2.imwrite(path, image)
if __name__ == '__main__':
main()
```
|
{
"source": "jerry800416/3dbinpacking",
"score": 3
}
|
#### File: 3dbinpacking/noused/test2.py
```python
def getCynByAxis(radius = 1, heightStart = 0, heightEnd = 5, \
offset = [0, 0, 0], devision = 20, mainAxis = 'z'):
'''NyanNyanNyan'''
import numpy as np
mainAxis = mainAxis.lower()
theta = np.linspace(0, 2*np.pi, devision)
cx = np.array([radius * np.cos(theta)])
cz = np.array([heightStart, heightEnd])
cx, cz = np.meshgrid(cx, cz)
cy = np.array([radius * np.sin(theta)] * 2)
if mainAxis == 'z':
return offset[0] + cx, offset[1] + cy, offset[2] + cz
elif mainAxis == 'y':
return offset[0] + cx, offset[1] + cz, offset[2] + cy
elif mainAxis == 'x':
return offset[0] + cz, offset[1] + cy, offset[2] + cx
else:
raise ValueError("'x', 'y' or 'z' PLZ")
def drawCylinder():
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
cx, cy, cz = getCynByAxis(offset = [1, 2, 3], devision = 40,\
mainAxis = 'z', heightEnd = 100, heightStart = 0,\
radius = 0.5)
fig = plt.figure(figsize = (11, 10))
ax = plt.axes(projection = '3d')
ax.plot_surface(cx, cy, cz, rstride = 1, cstride = 1,\
linewidth = 0, alpha = 0.25)
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.set_zlim(0, 10)
plt.show()
if __name__ == '__main__':
drawCylinder()
```
|
{
"source": "jerry8137/nctu-tracking",
"score": 3
}
|
#### File: jerry8137/nctu-tracking/iou_utils.py
```python
from numba import jit
import numpy as np
from scipy.spatial import ConvexHull
from shapely.geometry import Polygon
def shapely_polygon_intersection(poly1: np.ndarray, poly2: np.ndarray) -> float:
"""
Args:
- poly1: vertices must be in sequential order
- poly2: vertices must be in sequential order
Returns:
- float representing area of intersection
"""
poly1 = Polygon(poly1)
poly2 = Polygon(poly2)
return poly1.intersection(poly2).area
def shapely_polygon_area(poly: np.ndarray) -> float:
"""
Args:
- poly: vertices must be in sequential order
Returns:
- float representing polygon's area
"""
return Polygon(poly).area
def compute_iou_2d(bbox1: np.ndarray, bbox2: np.ndarray) -> float:
"""
Args:
- bbox1: vertices must be in sequential order
- bbox2: vertices must be in sequential order
Returns:
- iou_2d: intersection over union
"""
inter_area = shapely_polygon_intersection(bbox1, bbox2)
iou_2d = inter_area / (shapely_polygon_area(bbox1) + shapely_polygon_area(bbox2) - inter_area)
return iou_2d
def iou3d(corners1, corners2):
''' Compute 3D bounding box IoU.
Args:
- corners1: numpy array (8,3), assume up direction is negative Y
- corners2: numpy array (8,3), assume up direction is negative Y
Returns:
- iou: 3D bounding box IoU
- iou_2d: bird's eye view 2D bounding box IoU
'''
# corner points are in counter clockwise order
rect1 = [(corners1[i,0], corners1[i,1]) for i in range(3,-1,-1)]
rect2 = [(corners2[i,0], corners2[i,1]) for i in range(3,-1,-1)]
area1 = poly_area(np.array(rect1)[:,0], np.array(rect1)[:,1])
area2 = poly_area(np.array(rect2)[:,0], np.array(rect2)[:,1])
inter_area = shapely_polygon_intersection(rect1, rect2)
#inter, inter_area = convex_hull_intersection(rect1, rect2)
iou_2d = inter_area/(area1+area2-inter_area)
ymax = min(corners1[0,1], corners2[0,1])
ymin = max(corners1[4,1], corners2[4,1])
inter_vol = inter_area * max(0.0, ymax-ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
iou = inter_vol / (vol1 + vol2 - inter_vol)
return iou, iou_2d
def compute_iou_2d_bboxes(corners1, corners2):
"""
"""
rect1 = [(corners1[i,0], corners1[i,1]) for i in range(3,-1,-1)]
rect2 = [(corners2[i,0], corners2[i,1]) for i in range(3,-1,-1)]
return compute_iou_2d(rect1, rect2)
@jit
def poly_area(x,y):
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
@jit
def box3d_vol(corners):
''' corners: (8,3) no assumption on axis direction '''
a = np.sqrt(np.sum((corners[0,:] - corners[1,:])**2))
b = np.sqrt(np.sum((corners[1,:] - corners[2,:])**2))
c = np.sqrt(np.sum((corners[0,:] - corners[4,:])**2))
return a*b*c
@jit
def convex_hull_intersection(p1, p2):
""" Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1,p2)
if inter_p is not None:
hull_inter = ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def polygon_clip(subjectPolygon, clipPolygon):
""" Clip a polygon with another polygon.
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])
def computeIntersection():
dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]
dp = [ s[0] - e[0], s[1] - e[1] ]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0:
return None
return(outputList)
```
#### File: jerry8137/nctu-tracking/run_ab3dmot.py
```python
import argparse
import copy
import numpy as np
import os
from pathlib import Path
import pdb
from tqdm import tqdm
import uuid
from ab3dmot import AB3DMOT
import argoverse
from argoverse.data_loading.object_label_record import json_label_dict_to_obj_record
from argoverse.data_loading.simple_track_dataloader import SimpleArgoverseTrackingDataLoader
from argoverse.utils.se2 import SE2
from transform_utils import (
yaw_to_quaternion3d,
se2_to_yaw,
get_B_SE2_A,
rotmat2d
)
from json_utils import read_json_file, save_json_dict
def check_mkdir(dirpath):
""" """
if not Path(dirpath).exists():
os.makedirs(dirpath, exist_ok=True)
class UUIDGeneration():
def __init__(self):
self.mapping = {}
def get_uuid(self,seed):
if seed not in self.mapping:
self.mapping[seed] = uuid.uuid4().hex
return self.mapping[seed]
uuid_gen = UUIDGeneration()
def yaw_from_bbox_corners(det_corners: np.ndarray) -> float:
"""
Use basic trigonometry on cuboid to get orientation angle.
Args:
- det_corners: corners of bounding box
Returns:
- yaw
"""
p1 = det_corners[1]
p5 = det_corners[5]
dy = p1[1] - p5[1]
dx = p1[0] - p5[0]
# the orientation angle of the car
yaw = np.arctan2(dy, dx)
return yaw
def run_ab3dmot(
classname: str,
pose_dir: str,
dets_dump_dir: str,
tracks_dump_dir: str,
max_age: int = 3,
min_hits: int = 1,
min_conf: float = 0.3
) -> None:
"""
#path to argoverse tracking dataset test set, we will add our predicted labels into per_sweep_annotations_amodal/
#inside this folder
Filtering occurs in the city frame, not the egovehicle frame.
Args:
- classname: string, either 'VEHICLE' or 'PEDESTRIAN'
- pose_dir: string
- dets_dump_dir: string
- tracks_dump_dir: string
- max_age: integer
- min_hits: integer
Returns:
- None
"""
dl = SimpleArgoverseTrackingDataLoader(data_dir=pose_dir, labels_dir=dets_dump_dir)
for log_id in tqdm(dl.sdb.get_valid_logs()):
print(log_id)
labels_folder = dets_dump_dir + "/" + log_id + "/per_sweep_annotations_amodal/"
lis = os.listdir(labels_folder)
lidar_timestamps = [ int(file.split(".")[0].split("_")[-1]) for file in lis]
lidar_timestamps.sort()
previous_frame_bbox = []
ab3dmot = AB3DMOT(max_age=max_age,min_hits=min_hits)
print(labels_folder)
tracked_labels_copy = []
for j, current_lidar_timestamp in enumerate(lidar_timestamps):
#print(current_lidar_timestamp)
dets = dl.get_labels_at_lidar_timestamp(log_id, current_lidar_timestamp)
#print(f'There are {len(dets)} detections!')
dets_copy = dets
transforms = []
city_SE3_egovehicle = dl.get_city_to_egovehicle_se3(log_id, current_lidar_timestamp)
egovehicle_SE3_city = city_SE3_egovehicle.inverse()
transformed_labels = []
sdc_labels = []
for l_idx, l in enumerate(dets):
if l['label_class'] != classname:
# will revisit in other tracking pass
continue
if l["score"] < min_conf:
# print('Skipping det with confidence ', l["score"])
continue
det_obj = json_label_dict_to_obj_record(l)
det_corners_egovehicle_fr = det_obj.as_3d_bbox()
transforms += [city_SE3_egovehicle]
if city_SE3_egovehicle is None:
print('Was None')
# convert detection from egovehicle frame to city frame
det_corners_city_fr = city_SE3_egovehicle.transform_point_cloud(det_corners_egovehicle_fr)
ego_xyz = np.mean(det_corners_city_fr, axis=0)
origin = np.zeros((1,3))
origin = city_SE3_egovehicle.transform_point_cloud(origin)
#get vehicle frame xyz
sdc_xyz = np.mean(det_corners_city_fr, axis=0)
sdc_xyz -= origin[0]
# print(origin)
sdc_labels += [ [sdc_xyz[0], sdc_xyz[1], sdc_xyz[2]] ]
yaw = yaw_from_bbox_corners(det_corners_city_fr)
transformed_labels += [ [ego_xyz[0], ego_xyz[1], ego_xyz[2], yaw, l["length"],l["width"],l["height"]] ]
if len(transformed_labels) > 0:
transformed_labels = np.array(transformed_labels)
else:
transformed_labels = np.empty((0,7))
if len(sdc_labels) > 0:
sdc_labels = np.array(sdc_labels)
else:
sdc_labels = np.empty((0,3))
# print(sdc_labels)
dets_all = {
"dets":transformed_labels,
"info": np.zeros(transformed_labels.shape),
"sdc":sdc_labels
}
# perform measurement update in the city frame.
dets_with_object_id = ab3dmot.update(dets_all)
tracked_labels = []
for det in dets_with_object_id:
# move city frame tracks back to ego-vehicle frame
xyz_city = np.array([det[0].item(), det[1].item(), det[2].item()]).reshape(1,3)
city_yaw_object = det[3]
city_se2_object = SE2(rotation=rotmat2d(city_yaw_object), translation=xyz_city.squeeze()[:2])
city_se2_egovehicle, city_yaw_ego = get_B_SE2_A(city_SE3_egovehicle)
ego_se2_city = city_se2_egovehicle.inverse()
egovehicle_se2_object = ego_se2_city.right_multiply_with_se2(city_se2_object)
# recreate all 8 points
# transform them
# compute yaw from 8 points once more
egovehicle_SE3_city = city_SE3_egovehicle.inverse()
xyz_ego = egovehicle_SE3_city.transform_point_cloud(xyz_city).squeeze()
# update for new yaw
# transform all 8 points at once, then compute yaw on the fly
ego_yaw_obj = se2_to_yaw(egovehicle_se2_object)
qx,qy,qz,qw = yaw_to_quaternion3d(ego_yaw_obj)
tracked_labels.append({
"center": {"x": xyz_ego[0], "y": xyz_ego[1], "z": xyz_ego[2]},
"rotation": {"x": qx , "y": qy, "z": qz , "w": qw},
"length": det[4],
"width": det[5],
"height": det[6],
"track_label_uuid": uuid_gen.get_uuid(det[7]),
"timestamp": current_lidar_timestamp ,
"label_class": classname
})
tracked_labels_copy = copy.deepcopy(tracked_labels)
label_dir = os.path.join(tracks_dump_dir, log_id, "per_sweep_annotations_amodal")
check_mkdir(label_dir)
json_fname = f"tracked_object_labels_{current_lidar_timestamp}.json"
json_fpath = os.path.join(label_dir, json_fname)
if Path(json_fpath).exists():
# accumulate tracks of another class together
prev_tracked_labels = read_json_file(json_fpath)
tracked_labels.extend(prev_tracked_labels)
save_json_dict(json_fpath, tracked_labels)
if __name__ == '__main__':
"""
Run the tracker. The tracking is performed in the city frame, but the
tracks will be dumped into the egovehicle frame for evaluation.
2d IoU only is used for data association.
Note:
"max_age" denotes maximum allowed lifespan of a track (in timesteps of 100 ms)
since it was last updated with an associated measurement.
Argparse args:
- split: dataset split
- max_age: max allowed track age since last measurement update
- min_hits: minimum number of required hits for track birth
- pose_dir: should be path to raw log files e.g.
'/Users/johnlamb/Downloads/ARGOVERSE-COMPETITION/test' or
'/Users/johnlamb/Downloads/ARGOVERSE-COMPETITION/val/argoverse-tracking/val'
- dets_dataroot: should be path to 3d detections e.g.
'/Users/johnlamb/Downloads/argoverse_detections_2020'
- tracks_dump_dir: where to dump the generated tracks
"""
parser = argparse.ArgumentParser()
parser.add_argument("--split", type=str, required=True, help="val or test")
parser.add_argument("--max_age", type=int, default=15,
help="max allowed track age since last measurement update")
parser.add_argument("--min_hits", type=int, default=5,
help="minimum number of required hits for track birth")
parser.add_argument("--dets_dataroot", type=str,
required=True, help="path to 3d detections")
parser.add_argument("--pose_dir", type=str,
required=True, help="path to raw log data (including pose data) for validation or test set")
parser.add_argument("--tracks_dump_dir", type=str,
default='temp_files',
help="path to dump generated tracks (as .json files)")
parser.add_argument("--min_conf", type=float,
default=0.3,
help="minimum allowed confidence for 3d detections to be considered valid")
args = parser.parse_args()
# tracks will be dumped into a subfolder of this name
save_dirname = f'{args.split}-split-track-preds'
save_dirname += f'-maxage{args.max_age}-minhits{args.min_hits}-conf{args.min_conf}'
if args.split == 'train':
args.dets_dataroot += '/training'
elif args.split == 'val':
args.dets_dataroot += '/validation'
elif args.split == 'test':
args.dets_dataroot += '/testing'
args.tracks_dump_dir = f'{args.tracks_dump_dir}/{save_dirname}'
# Run tracker over vehicle detections separately from ped. detections
for classname in ['VEHICLE', 'PEDESTRIAN']:
run_ab3dmot(
classname,
args.pose_dir,
args.dets_dataroot,
args.tracks_dump_dir,
max_age=args.max_age,
min_hits=args.min_hits,
min_conf=args.min_conf
)
```
|
{
"source": "jerry960331/Retailpia",
"score": 3
}
|
#### File: src/models/society.py
```python
import pygame
class Society(pygame.sprite.Sprite):
LEVEL_MULTIPLIER = 34
NORMAL_HOUSE = 0
NEXT_HOUSE = 1
BOBA_HOUSE = 2
LAND = 3
def __init__(self, x=100, y=100, w=112, h=112, floor=0, type=0, layer=0):
pygame.sprite.Sprite.__init__(self)
self.hidden_image = pygame.transform.scale(pygame.image.load("assets/1px.png").convert_alpha(), (1, 1))
self.initialize(x, y, w, h, floor, type, layer)
def initialize(self, x=100, y=100, w=112, h=112, floor=0, type=0, layer=0):
self.w = w
self.h = h
self._w = w
self._h = h
self.image = self.hidden_image
self.images = []
self.images_focused = []
self.floor = floor
self.level = 0
self.type = type
self.rect = self.image.get_rect()
# original x and y
self._x = x
self._y = y
self.rect.x = x
self.rect.y = y
self.isshow = False
self.isfocused = False
self._layer = layer
def show(self, type = None):
if self.isshow:
print("已經show過了")
return False
if self.type == self.LAND:
self.show_land()
return True
self.isshow = True
self.init_images(type)
return True
def init_images(self, type = None):
if type is not None:
image_path = self.get_image_path(type)
print(image_path)
else:
image_path = self.get_image_path(self.type)
w = self.w
h = self.h
self.images = []
self.images_focused = []
# self.images.append(pygame.transform.scale(pygame.image.load(image_path+".png").convert_alpha(), (int(w), int(h))))
# self.images.append(pygame.transform.scale(pygame.image.load(image_path+".png").convert_alpha(), (int(w), int(h) * 2)))
# self.images.append(pygame.transform.scale(pygame.image.load(image_path+".png").convert_alpha(), (int(w), int(h) * 3)))
# self.images_focused.append(pygame.transform.scale(pygame.image.load(image_path+".png").convert_alpha(), (int(w)+8, int(h) + 8)))
# self.images_focused.append(pygame.transform.scale(pygame.image.load(image_path+".png").convert_alpha(), (int(w)+4, int(h) * 2 + 4)))
# self.images_focused.append(pygame.transform.scale(pygame.image.load(image_path+".png").convert_alpha(), (int(w)+4, int(h) * 3 + 4)))
self.images.append(pygame.transform.scale(pygame.image.load(image_path+".png").convert_alpha(), (int(w), int(h))))
self.images.append(pygame.transform.scale(pygame.image.load(image_path+"_1.png").convert_alpha(), (int(w), int(h * 1.298))))
self.images.append(pygame.transform.scale(pygame.image.load(image_path+"_2.png").convert_alpha(), (int(w), int(h * 1.596))))
self.images_focused.append(pygame.transform.scale(pygame.image.load(image_path+".png").convert_alpha(), (int(w)+8, int(h) + 8)))
self.images_focused.append(pygame.transform.scale(pygame.image.load(image_path+"_1.png").convert_alpha(), (int(w)+7, int(h * 1.298 + 6))))
self.images_focused.append(pygame.transform.scale(pygame.image.load(image_path+"_2.png").convert_alpha(), (int(w)+4, int(h * 1.596 + 4))))
if self.isshow:
self.image = self.images[self.level]
else:
self.image = pygame.transform.scale(pygame.image.load("assets/1px.png").convert_alpha(), (1, 1))
self.rect = self.image.get_rect()
self.rect.x = self._x
self.rect.y = self._y
def set_type(self, type):
self.type = type
def get_image_path(self, type):
if type == self.NORMAL_HOUSE:
image_path = "assets/normal_house"
elif type == self.NEXT_HOUSE:
image_path = "assets/next_house"
elif type == self.BOBA_HOUSE:
image_path = "assets/bubble_tea_house"
elif type == self.LAND:
image_path = "assets/soil"
self.type = type
return image_path
def show_land(self):
if not self.type == self.LAND:
return
self.images.append(pygame.transform.scale(pygame.image.load("assets/1px.png").convert_alpha(), (int(self.w), int(self.h))))
self.images_focused.append(pygame.transform.scale(pygame.image.load("assets/soil.png").convert_alpha(), (int(self.w)+8, int(self.h) + 8)))
self.image = self.images[0]
self.rect = self.images_focused[0].get_rect()
self.rect.x = self._x
self.rect.y = self._y
self.isshow = True
def level_up(self):
"""
Make the society level up.
"""
if not self.isshow:
return False
if self.type == self.LAND:
print("ERROR: LAND 無法升級")
return False
if self.level >= 2:
print("ERROR: 樓層數大於3")
return False
self.level += 1
self.image = self.images[self.level]
self.rect = self.image.get_rect()
self.rect.x = self._x
self.rect.y = self._y - (self.level * self.LEVEL_MULTIPLIER)
print(f"房子升至{self.level}級")
return True
def set_focused(self, isfocused):
if not self.isshow:
return
if isfocused:
self.isfocused = True
self.rect.x = self._x - 2
self.rect.y = self._y - 4 - (self.floor*2) - (self.level * self.LEVEL_MULTIPLIER)
self.image = self.images_focused[self.level]
else:
self.isfocused = True
self.rect.x = self._x
self.rect.y = self._y - (self.level * self.LEVEL_MULTIPLIER)
self.image = self.images[self.level]
def level_down(self):
if not self.isshow:
return False
if self.level <= 0:
print("ERROR: 樓層數小於0")
return False
if self.type == self.LAND:
print("ERROR: LAND 無法降級")
return False
self.level -= 1
self.image = self.images[self.level]
self.rect = self.image.get_rect()
self.rect.x = self._x
self.rect.y = self._y
print(f"房子降至{self.level}級")
return True
def redeemed(self):
if self.level != 2:
return
self.clear_society()
def clear_society(self):
print("clear")
self.initialize(x=self._x, y=self._y, w=self._w, h=self._h)
```
#### File: Retailpia/src/save.py
```python
import pygame
import json
from datetime import datetime
import time
import getpass
from src.models import society
import os
def save(societies: dict):
if not os.path.isfile("src/game_info.json"):
generate()
with open(r"src/game_info.json", "r") as jsonFile1:
record = json.load(jsonFile1)
for society_key, society_value in societies.items():
record[society_key] = {}
for i in range(9):
record[society_key][str(i)] = {}
record[society_key][str(i)]["x"] = society_value[i]._x
record[society_key][str(i)]["y"] = society_value[i]._y
record[society_key][str(i)]["w"] = society_value[i].w
record[society_key][str(i)]["h"] = society_value[i].h
record[society_key][str(i)]["type"] = society_value[i].type
record[society_key][str(i)]["floor"] = society_value[i].floor
record[society_key][str(i)]["level"] = society_value[i].level
record[society_key][str(i)]["isshow"] = society_value[i].isshow
with open("src/game_info.json", "w", encoding='utf-8') as jsonFile2:
json.dump(record, jsonFile2, indent=4)
jsonFile1.close()
jsonFile2.close()
def generate():
new = {}
with open("src/game_info.json", 'w') as new_file:
#pass
json.dump(new, new_file, indent=4)
def restore(societies: dict):
try:
with open(r"src/game_info.json", "r") as jsonFile1:
record = json.load(jsonFile1)
for society_key, society_value in societies.items():
for i in range(9):
society_value[i]._x = record[society_key][str(i)]["x"]
society_value[i]._y = record[society_key][str(i)]["y"]
society_value[i].w = record[society_key][str(i)]["w"]
society_value[i].h = record[society_key][str(i)]["h"]
society_value[i].type = record[society_key][str(i)]["type"]
society_value[i].floor = record[society_key][str(i)]["floor"]
society_value[i].level = record[society_key][str(i)]["level"]
society_value[i].isshow = record[society_key][str(i)]["isshow"]
society_value[i].init_images()
jsonFile1.close()
return societies
except Exception as e:
print("No society save record.")
print(e)
return None
```
#### File: Retailpia/src/view.py
```python
import pygame
import random
import time
from src.event_manager import *
from src.local_config import LocalConfig
import src.model as model
from src.models import story
from src.models import society
from src.models import button
from src.models import input_box
from src.models import island
from src.models import code
from src.models import show_history
from src.models import instruction
from src.models import interface
from src.models import message
from src import save
class GraphicalView(object):
"""
Draws the model state onto the screen.
"""
def __init__(self, ev_mgr, model):
"""
ev_mgr (EventManager): Allows posting messages to the event queue.
model (GameEngine): a strong reference to the game Model.
Attributes:
isinitialized (bool): pygame is ready to draw.
screen (pygame.Surface): the screen surface.
clock (pygame.time.Clock): keeps the fps constant.
"""
self.ev_mgr = ev_mgr
ev_mgr.register_listener(self)
self.model = model
self.isinitialized = False
self.screen = None
self.clock = None
self.mouse_pos = (0, 0)
self.local_config = None
self.line_index = 0
self.key = 0
def notify(self, event):
"""
Receive events posted to the message queue.
"""
if isinstance(event, InitializeEvent):
self.initialize()
self.create_all_objects()
elif isinstance(event, QuitEvent):
# shut down the pygame graphics
self.isinitialized = False
pygame.quit()
elif isinstance(event, TickEvent):
if not self.isinitialized:
return
currentstate = self.model.state.peek()
if currentstate == model.STATE_INTRO:
self.render_intro()
if currentstate == model.STATE_STORY:
self.render_story()
if currentstate == model.STATE_INSTRUCTION:
self.render_instruction()
if currentstate == model.STATE_ISLAND:
self.render_island()
if currentstate == model.STATE_ADD:
self.render_add()
if currentstate == model.STATE_BUILD:
self.render_build()
if currentstate == model.STATE_HISTORY:
self.render_history()
if currentstate == model.STATE_CODE:
self.render_code()
self.clock.tick(60)
elif isinstance(event, InputEvent):
currentstate = self.model.state.peek()
if event.click_pos is not None:
self.mouse_pos = event.click_pos
if currentstate == model.STATE_ISLAND:
if event.char == pygame.K_q:
self.delete_house(3)
if event.char == pygame.K_w:
self.delete_house(1)
if event.char == pygame.K_e:
self.delete_house(0)
if event.char == pygame.K_a:
self.delete_house(6)
if event.char == pygame.K_s:
self.delete_house(4)
if event.char == pygame.K_d:
self.delete_house(2)
if event.char == pygame.K_z:
self.delete_house(8)
if event.char == pygame.K_x:
self.delete_house(7)
if event.char == pygame.K_c:
self.delete_house(5)
if currentstate == model.STATE_STORY:
if not(self.story.isDone()):
# 跳過逐一印文字,直接印整行文字
self.story.text_pointer = len(self.story.line)
elif self.line_index < len(self.story.lines) - 1:
# 下一行文字
self.story.select_line(self.line_index + 1)
self.line_index += 1
elif self.line_index == len(self.story.lines) - 1:
# 對話結束自動換State
self.line_index = 0
self.skip_story_sub()
if currentstate == model.STATE_ADD:
self.key = event.char
if currentstate == model.STATE_CODE:
pass
print('self.mouse_pos:' + str(self.mouse_pos))
def initialize(self):
"""
Set up the pygame graphical display and loads graphical resources.
"""
self.local_config = LocalConfig()
resolution_width, resolution_height, screen_flags, display_index, vsync = self.local_config.load_screen_config()
pygame.init()
pygame.font.init()
pygame.display.set_caption('RETAILPIA')
self.screen = pygame.display.set_mode(size = (resolution_width, resolution_height),
flags = screen_flags,
display = display_index,
vsync = vsync)
self.window_width, self.window_height = pygame.display.get_window_size()
self.clock = pygame.time.Clock()
self.isinitialized = True
# background
self.background_image = pygame.image.load("assets/background.png").convert_alpha()
self.background_image = pygame.transform.scale(self.background_image,
(self.window_width, self.window_height))
self.island_image = pygame.image.load("assets/3x3island.png").convert_alpha()
self.island_image = self.scale_keep_aspect_ratio(self.island_image, height = self.window_height * 0.8)
def scale_keep_aspect_ratio(self, image, width = 100000, height = 100000):
ref_width, ref_height = image.get_size()
# 鎖定長寬比,參考長、寬數值較大者進行縮放
if width <= height:
height = int(width * ref_height / ref_width)
else:
width = int(height * ref_width / ref_height)
image = pygame.transform.scale(image, (int(width), int(height)))
return image
def create_all_objects(self):
'''
建立每種畫面需要的物件
'''
self.create_share_objects()
# intro page
self.create_intro_objects()
# story page
self.create_story_objects()
# instruction page
self.create_instruction_objects()
# island page
self.create_island_objects()
# add page
self.create_add_objects()
# build page
self.create_build_objects()
# history page
self.create_history_objects()
# code page
self.create_code_objects()
def create_share_objects(self):
# share
self.help_btn = button.Button(1120, 50, 100, 0, "assets/help_button.png", True) # used by island, add, history, code
self.return_btn = button.Button(50, 50, 100, 0, "assets/return_button.png", True) # used by add, history, code
def create_intro_objects(self):
self.intro_objs = pygame.sprite.Group()
self.start_btn = button.Button(1040, 540, 200, 0, "assets/start_button.png", True)
self.intro_objs.add(self.start_btn)
def create_story_objects(self):
self.story_objs = pygame.sprite.Group()
self.skip_btn = button.Button(1040, 50, 180, 0, "assets/skip_button.png", True)
self.story_objs.add(self.skip_btn)
self.story = story.Story(100, 620, 1280, 720)
def create_instruction_objects(self):
self.instruction_objs = pygame.sprite.Group()
self.instruction_objs.add(self.return_btn)
self.story_btn = button.Button(1040, 50, 180, 0, "assets/story_button.png", True)
self.instruction_objs.add(self.story_btn)
self.small_add_btn = button.Button(705, 320, 70, 0, "assets/add_button.png", True)
self.instruction_objs.add(self.small_add_btn)
self.small_history_btn = button.Button(880, 440, 70, 0, "assets/history_button.png", True)
self.instruction_objs.add(self.small_history_btn)
self.instruction = instruction.Instruction(90, 200)
self.instruction_interface = interface.Interface(1220, 540, "assets/help_box.png", height_offset = 75)
self.ori_island = island.Island()
def create_island_objects(self):
self.island_objs = pygame.sprite.Group()
self.island_objs.add(self.help_btn)
self.history_btn = button.Button(1120, 500, 100, 0, "assets/history_button.png", True)
self.island_objs.add(self.history_btn)
self.add_btn = button.Button(1120, 600, 100, 0, "assets/add_button.png", True)
self.island_objs.add(self.add_btn)
self.lands = []
self.societies = []
self.societies_2nd_floor = []
self.societies_3rd_floor = []
# for 3x3
society_positions = [(583, 144), (479, 210), (686, 204), (375, 272), (582, 271), (793, 266), (477, 334), (690, 330), (580, 392)]
self.society_num = len(society_positions)
self.society_objs = pygame.sprite.LayeredUpdates()
for i in range(self.society_num):
x, y = society_positions[i][0], society_positions[i][1]
# 載入所有預設建築
self.lands.append(society.Society(x-7, y+30, w=125, h=85, layer=0))
self.lands[i].set_type(society.Society.LAND)
self.lands[i].show()
self.societies.append(society.Society(x, y, layer=i*10))
self.societies_2nd_floor.append(society.Society(x, y-38, floor=1, layer=10*i+1))
self.societies_3rd_floor.append(society.Society(x, y-76, floor=2, layer=10*i+2))
# print(f"({x}, {y})")
society_dict = {
"society_0" : self.societies,
"society_1" : self.societies_2nd_floor,
"society_2" : self.societies_3rd_floor,
}
society_dict = save.restore(society_dict)
self.society_objs.add(self.lands)
if society_dict is not None:
print("society json is not None")
for society_key, society_value in society_dict.items():
self.society_objs.add(society_value)
else:
# 自動將所有房子加入
print("society json is None")
self.society_objs.add(self.societies)
self.society_objs.add(self.societies_2nd_floor)
self.society_objs.add(self.societies_3rd_floor)
self.save_society()
# 作為測試用,自動長出一個房子
# self.societies[0].show()
def create_add_objects(self):
self.add_objs = pygame.sprite.Group()
self.input_name = input_box.Input_box(int((1280-200)/2), 244, 140, 32, "Name")
self.input_brand = input_box.Input_box(int((1280-200)/2), 294, 140, 32, "Brand")
self.input_money = input_box.Input_box(int((1280-200)/2), 344, 140, 32, "Pounds") # let's go British!
self.input_code = input_box.Input_box(int((1280-200)/2), 394, 140, 32, "Code")
self.input_type = input_box.Input_box(int((1280-200)/2), 444, 140, 32, "Type")
self.add_objs.add(self.return_btn)
self.add_objs.add(self.help_btn)
self.submit_btn = button.Button(1120, 600, 100, 0, "assets/submit_button.png", True)
self.add_objs.add(self.submit_btn)
self.add_interface = interface.Interface(300, 350, "assets/ui_box.png")
def create_build_objects(self):
self.build_objs = pygame.sprite.Group()
self.build_message = message.Message(50, 40, "Choose a land or building to upgrade!")
def create_history_objects(self):
self.history_objs = pygame.sprite.Group()
self.history_name = show_history.Show_history("Name")
self.history_name.read_data()
self.history_brand = show_history.Show_history("Brand")
self.history_brand.read_data()
self.history_Pounds = show_history.Show_history("Pounds")
self.history_Pounds.read_data()
self.history_code = show_history.Show_history("Code")
self.history_code.read_data()
self.history_type = show_history.Show_history("Type")
self.history_type.read_data()
self.history_objs.add(self.return_btn)
self.history_objs.add(self.help_btn)
self.history_interface = interface.Interface(850, 600, "assets/ui_box.png")
def create_code_objects(self):
self.code_objs = pygame.sprite.Group()
self.code_objs.add(self.return_btn)
self.code_objs.add(self.help_btn)
self.code_objs.add(self.submit_btn)
self.code = code.Code()
self.code_interface = interface.Interface(250, 100, "assets/ui_box.png")
def render_intro(self):
"""
Render the game intro.
"""
self.start_btn.set_focus(self.start_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.start_btn.rect.collidepoint(self.mouse_pos):
print("按下「enter按鈕」")
# 首次遊玩自動進story
if self.local_config.skip_story:
self.ev_mgr.post(StateChangeEvent(model.STATE_ISLAND))
else:
self.line_index = 0
self.story.select_line(self.line_index)
self.ev_mgr.post(StateChangeEvent(model.STATE_STORY))
self.draw_all(self.intro_objs)
def render_story(self):
"""
Render the game story.
"""
self.skip_btn.set_focus(self.skip_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.skip_btn.rect.collidepoint(self.mouse_pos):
print("按下「skip按鈕」")
self.skip_story_sub()
self.story.render_line()
self.draw_all((self.story, self.story_objs))
def render_instruction(self):
"""
Render the game instruction.
"""
self.story_btn.set_focus(self.story_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.story_btn.rect.collidepoint(self.mouse_pos):
print("按下「Story again按鈕」")
self.line_index = 0
self.story.select_line(self.line_index)
self.ev_mgr.post(StateChangeEvent(model.STATE_STORY))
self.return_btn.set_focus(self.return_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.return_btn.rect.collidepoint(self.mouse_pos):
print("按下「return按鈕」")
self.ev_mgr.post(StateChangeEvent(None))
self.draw_all((self.ori_island, self.instruction_interface, self.instruction, self.instruction_objs))
def render_island(self):
"""
Render the game island.
"""
self.help_btn.set_focus(self.help_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.help_btn.rect.collidepoint(self.mouse_pos):
print("按下「help按鈕」")
self.ev_mgr.post(StateChangeEvent(model.STATE_INSTRUCTION))
self.history_btn.set_focus(self.history_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.history_btn.rect.collidepoint(self.mouse_pos):
print("按下「history按鈕」")
self.ev_mgr.post(StateChangeEvent(model.STATE_HISTORY))
self.add_btn.set_focus(self.add_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.add_btn.rect.collidepoint(self.mouse_pos):
print("按下「add按鈕」")
self.ev_mgr.post(StateChangeEvent(model.STATE_ADD))
# 測試刪除房子
# self.society_objs.remove(self.societies_3rd_floor[2])
# self.society_objs.remove(self.societies_3rd_floor[6])
# self.society_objs.remove(self.societies_2nd_floor[6])
focused_societies = []
selected_societies = []
for i in range(self.society_num):
# 建築物被 focus 的處理(建築物會放大)
if self.societies[i].rect.collidepoint(pygame.mouse.get_pos()) or \
self.societies_2nd_floor[i].rect.collidepoint(pygame.mouse.get_pos()) or \
self.lands[i].rect.collidepoint(pygame.mouse.get_pos()):
focused_societies.append(i)
# 建築物被點擊的處理
if self.societies[i].rect.collidepoint(self.mouse_pos) or \
self.societies_2nd_floor[i].rect.collidepoint(self.mouse_pos):
print(f"Island按下「{i}號房子」, level = {self.societies[i].level}")
self.mouse_pos = (0, 0)
selected_societies.append(i)
# 建築物被 focus 的處理(建築物會放大)
self.lands[i].set_focused(False)
self.societies[i].set_focused(False)
self.societies_2nd_floor[i].set_focused(False)
self.societies_3rd_floor[i].set_focused(False)
# 選擇的建築,取ID最大的執行
if selected_societies:
i = max(selected_societies)
# 顯示折扣碼
if self.societies[i].level == 2 or \
self.societies_2nd_floor[i].level == 2 or \
self.societies_3rd_floor[i].level == 2:
self.code.update(i) # 更新code
self.ev_mgr.post(StateChangeEvent(model.STATE_CODE))
# print(focused_societies)
if focused_societies:
i = max(focused_societies)
self.lands[i].set_focused(True)
self.societies[i].set_focused(True)
self.societies_2nd_floor[i].set_focused(True)
self.societies_3rd_floor[i].set_focused(True)
self.draw_all((self.society_objs, self.island_objs))
def render_add(self):
"""
Render the game add.
"""
self.input_name.update()
self.input_name.enter(self.mouse_pos, self.key)
self.input_brand.update()
self.input_brand.enter(self.mouse_pos, self.key)
self.input_money.update()
self.input_money.enter(self.mouse_pos, self.key)
self.input_code.update()
self.input_code.enter(self.mouse_pos, self.key)
self.input_type.update()
self.input_type.enter(self.mouse_pos, self.key)
self.key = None # prevent duplicate input
self.return_btn.set_focus(self.return_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.return_btn.rect.collidepoint(self.mouse_pos):
print("按下「return按鈕」")
self.ev_mgr.post(StateChangeEvent(None))
self.help_btn.set_focus(self.help_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.help_btn.rect.collidepoint(self.mouse_pos):
print("按下「help按鈕」")
self.ev_mgr.post(StateChangeEvent(model.STATE_INSTRUCTION))
self.submit_btn.set_focus(self.submit_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.submit_btn.rect.collidepoint(self.mouse_pos):
print("按下「submit按紐」")
self.input_name.record(cache=False)
self.input_name.clear_cache()
self.ev_mgr.post(StateChangeEvent(model.STATE_BUILD))
self.draw_all((self.society_objs, self.add_interface, self.add_objs, self.input_name, self.input_brand, self.input_money, self.input_code, self.input_type))
def render_build(self):
"""
Render the game build.
"""
selected_societies = []
focused_societies = []
succeed = None
self.return_btn.set_focus(self.return_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.return_btn.rect.collidepoint(self.mouse_pos):
self.ev_mgr.post(StateChangeEvent(None))
current_mouse_pos = pygame.mouse.get_pos()
for i in range(self.society_num):
# 建築物被 focus 的處理(建築物會放大)
if self.societies[i].rect.collidepoint(current_mouse_pos) or \
self.societies_2nd_floor[i].rect.collidepoint(current_mouse_pos) or \
self.lands[i].rect.collidepoint(current_mouse_pos):
focused_societies.append(i)
# 建築物被點擊的處理
# 新增建築物
if self.societies[i].rect.collidepoint(self.mouse_pos) or \
self.societies_2nd_floor[i].rect.collidepoint(self.mouse_pos) or \
self.lands[i].rect.collidepoint(self.mouse_pos):
print(f"Build按下「{i}號房子」")
selected_societies.append(i)
self.lands[i].set_focused(False)
self.societies[i].set_focused(False)
self.societies_2nd_floor[i].set_focused(False)
self.societies_3rd_floor[i].set_focused(False)
# 選擇的建築,取ID最大的執行
if selected_societies:
i = max(selected_societies)
type = self.history_brand.get_last()
succeed = self.society_upgrade(i, type)
print(f"succeed = {succeed}")
# focus 的建築,取ID最大的執行
if focused_societies:
i = max(focused_societies)
self.lands[i].set_focused(True)
self.societies[i].set_focused(True)
self.societies_2nd_floor[i].set_focused(True)
self.societies_3rd_floor[i].set_focused(True)
self.mouse_pos = (0, 0)
if succeed:
self.save_society()
# 狀態機 pop 兩次
self.ev_mgr.post(StateChangeEvent(None))
self.ev_mgr.post(StateChangeEvent(None))
self.draw_all((self.build_objs, self.society_objs, self.build_message))
def society_upgrade(self, i, type):
"""
建築升級處理
判斷現在幾樓
如果是與該建築下方的建築是「不同」種類的
蓋一棟新的1級建築
如果是與該建築下方的建築是「相同」種類的
升級一棟建築
詳細訊息見print()資訊
以下註解樓層數為 0, 1, 2,變數為societies, 2nd_floor, 3rd_floor
"""
succeed = False
# 三個樓層都 show 的話直接 FALSE
if (self.societies[i].isshow
and self.societies_2nd_floor[i].isshow
and self.societies_3rd_floor[i].isshow):
return False
if (self.societies_2nd_floor[i].isshow
and self.societies_2nd_floor[i].level == 0
and self.societies_2nd_floor[i].type != type):
print(f"{i}號房子1樓,建造2樓,類型為:{type}")
succeed = self.societies_3rd_floor[i].show(type)
return succeed
if (self.societies[i].isshow
and self.societies[i].level == 1
and self.societies[i].type != type):
print(f"{i}號房子0樓,建造2樓,類型為:{type}")
succeed = self.societies_3rd_floor[i].show(type)
return succeed
if (self.societies_2nd_floor[i].isshow
and self.societies_2nd_floor[i].level == 0
and self.societies_2nd_floor[i].type == type):
print(f"{i}號房子1樓,升級,相同類型")
succeed = self.societies_2nd_floor[i].level_up()
return succeed
if (self.societies[i].isshow
and self.societies[i].level != 2
and self.societies[i].type == type):
print(f"{i}號房子0樓,升級,相同類型")
succeed = self.societies[i].level_up()
return succeed
if (self.societies[i].isshow
and self.societies[i].level == 0
and self.societies[i].type != type):
print(f"{i}號房子0樓,建造1樓,類型為:{type}")
succeed = self.societies_2nd_floor[i].show(type)
return succeed
# 若無0樓,則加入0樓
if not self.societies[i].isshow:
succeed = self.societies[i].show(type)
print(f"{i}號房子0樓加入,類型為{type}")
return succeed
return succeed
def save_society(self):
society_dict = {
"society_0" : self.societies,
"society_1" : self.societies_2nd_floor,
"society_2" : self.societies_3rd_floor,
}
save.save(society_dict)
def render_history(self):
"""
Render the game history.
"""
self.return_btn.set_focus(self.return_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.return_btn.rect.collidepoint(self.mouse_pos):
print("按下「return按鈕」")
self.ev_mgr.post(StateChangeEvent(None))
self.help_btn.set_focus(self.help_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.help_btn.rect.collidepoint(self.mouse_pos):
print("按下「help按鈕」")
self.ev_mgr.post(StateChangeEvent(model.STATE_INSTRUCTION))
self.draw_all((self.society_objs, self.history_interface, self.history_objs, self.history_name, self.history_brand, self.history_Pounds, self.history_code, self.history_type))
def render_code(self):
"""
Render the game code.
"""
self.return_btn.set_focus(self.return_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.return_btn.rect.collidepoint(self.mouse_pos):
print("按下「return按鈕」")
self.ev_mgr.post(StateChangeEvent(None))
self.help_btn.set_focus(self.help_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.help_btn.rect.collidepoint(self.mouse_pos):
print("按下「help按鈕」")
self.ev_mgr.post(StateChangeEvent(model.STATE_INSTRUCTION))
self.submit_btn.set_focus(self.submit_btn.rect.collidepoint(pygame.mouse.get_pos()))
if self.submit_btn.rect.collidepoint(self.mouse_pos):
print("按下「submit按紐」")
self.mouse_pos = (0, 0)
code = input("Enter the code here: ")
if code == self.code.code_text:
print("success")
# 刪除那棟房子
self.ev_mgr.post(StateChangeEvent(None))
self.delete_house(self.code.society_num)
else:
print("failure")
self.draw_all((self.society_objs, self.code_objs, self.code_interface, self.code))
def delete_house(self, num):
self.societies[num].clear_society()
self.societies_2nd_floor[num].clear_society()
self.societies_3rd_floor[num].clear_society()
self.save_society()
def skip_story_sub(self):
# 首次遊玩自動進story後,更新config.ini
if self.local_config.skip_story:
self.ev_mgr.post(StateChangeEvent(None))
else:
self.local_config.config['GAME']['skip_story'] = 'TRUE'
self.local_config.save_config()
self.local_config.load_game_config()
self.ev_mgr.post(StateChangeEvent(None))
self.ev_mgr.post(StateChangeEvent(model.STATE_ISLAND))
def draw_all(self, objects):
self.screen.fill((0, 0, 0))
self.screen.blit(self.background_image, (0, 0))
x, y = self.window_width * 0.5, self.window_height * 0.6
rect = self.island_image.get_rect(center=(x, y))
self.screen.blit(self.island_image, rect)
# 若輸入為list或tuple則逐一draw到螢幕上
if isinstance(objects, tuple) or isinstance(objects, list):
for object in objects:
object.draw(self.screen)
else:
objects.draw(self.screen)
pygame.display.flip()
```
|
{
"source": "jerry99s/second.pytorch",
"score": 2
}
|
#### File: second.pytorch/second/script.py
```python
from second.pytorch.train import train, evaluate
from google.protobuf import text_format
from second.protos import pipeline_pb2
from pathlib import Path
from second.utils import config_tool
def train_multi_rpn_layer_num():
config_path = "./configs/car.lite.config"
model_root = Path.home() / "second_test" # don't forget to change this.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
input_cfg = config.eval_input_reader
model_cfg = config.model.second
layer_nums = [2, 4, 7, 9]
for l in layer_nums:
model_dir = str(model_root / f"car_lite_L{l}")
model_cfg.rpn.layer_nums[:] = [l]
train(config, model_dir)
def eval_multi_threshold():
config_path = "./configs/car.fhd.config"
ckpt_name = "/path/to/your/model_ckpt" # don't forget to change this.
assert "/path/to/your" not in ckpt_name
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
model_cfg = config.model.second
threshs = [0.3]
for thresh in threshs:
model_cfg.nms_score_threshold = thresh
# don't forget to change this.
result_path = Path.home() / f"second_test_eval_{thresh:.2f}"
evaluate(
config,
result_path=result_path,
ckpt_path=str(ckpt_name),
batch_size=1,
measure_time=True)
if __name__ == "__main__":
eval_multi_threshold()
```
#### File: utils/config_tool/train.py
```python
from second.protos.optimizer_pb2 import Optimizer, LearningRate, OneCycle, ManualStepping, ExponentialDecay
from second.protos.sampler_pb2 import Sampler
from second.utils.config_tool import read_config
from pathlib import Path
from google.protobuf import text_format
from second.data.all_dataset import get_dataset_class
def _get_optim_cfg(train_config, optim):
if optim == "adam_optimizer":
return train_config.optimizer.adam_optimizer
elif optim == "rms_prop_optimizer":
return train_config.optimizer.rms_prop_optimizer
elif optim == "momentum_optimizer":
return train_config.optimizer.momentum_optimizer
else:
raise NotImplementedError
def manual_stepping(train_config, boundaries, rates, optim="adam_optimizer"):
optim_cfg = _get_optim_cfg(train_config, optim)
optim_cfg.learning_rate.manual_stepping.CopyFrom(
ManualStepping(boundaries=boundaries, rates=rates))
def exp_decay(train_config,
init_lr,
decay_length,
decay_factor,
staircase=True,
optim="adam_optimizer"):
optim_cfg = _get_optim_cfg(train_config, optim)
optim_cfg.learning_rate.exponential_decay.CopyFrom(
ExponentialDecay(
initial_learning_rate=init_lr,
decay_length=decay_length,
decay_factor=decay_factor,
staircase=staircase))
def one_cycle(train_config,
lr_max,
moms,
div_factor,
pct_start,
optim="adam_optimizer"):
optim_cfg = _get_optim_cfg(train_config, optim)
optim_cfg.learning_rate.one_cycle.CopyFrom(
OneCycle(
lr_max=lr_max,
moms=moms,
div_factor=div_factor,
pct_start=pct_start))
def _div_up(a, b):
return (a + b - 1) // b
def set_train_step(config,
epochs,
eval_epoch):
input_cfg = config.train_input_reader
train_cfg = config.train_config
batch_size = input_cfg.batch_size
dataset_name = input_cfg.dataset.dataset_class_name
ds = get_dataset_class(dataset_name)(
root_path=input_cfg.dataset.kitti_root_path,
info_path=input_cfg.dataset.kitti_info_path,
)
num_examples_after_sample = len(ds)
step_per_epoch = _div_up(num_examples_after_sample, batch_size)
step_per_eval = step_per_epoch * eval_epoch
total_step = step_per_epoch * epochs
train_cfg.steps = total_step
train_cfg.steps_per_eval = step_per_eval
def disable_sample(config):
input_cfg = config.train_input_reader
input_cfg.database_sampler.CopyFrom(Sampler())
def disable_per_gt_aug(config):
prep_cfg = config.train_input_reader.preprocess
prep_cfg.groundtruth_localization_noise_std[:] = [0, 0, 0]
prep_cfg.groundtruth_rotation_uniform_noise[:] = [0, 0]
def disable_global_aug(config):
prep_cfg = config.train_input_reader.preprocess
prep_cfg.global_rotation_uniform_noise[:] = [0, 0]
prep_cfg.global_scaling_uniform_noise[:] = [0, 0]
prep_cfg.global_random_rotation_range_per_object[:] = [0, 0]
prep_cfg.global_translate_noise_std[:] = [0, 0, 0]
if __name__ == "__main__":
path = Path(__file__).resolve().parents[2] / "configs/car.lite.config"
config = read_config(path)
manual_stepping(config.train_config, [0.8, 0.9], [1e-4, 1e-5, 1e-6])
print(text_format.MessageToString(config, indent=2))
```
|
{
"source": "JerryALee/fraccalc",
"score": 3
}
|
#### File: fraccalc/numeric/diffintegral.py
```python
import numpy as np
from ..basic import gamma, gammaRatio
def coeff(v, N=7, method='2'):
'''
Return the fractional coefficients.
Parameters
----------
v : float
Order of the diffinetration.
N : int, optional
Length of the corresponding coefficients. Default is 7.
method : str
Diffintegration operator. {'1' or '2' (default)}.
Returns
----------
coefficients : ndarray
Coefficients are from from C_{0} to C_{N-1}.
'''
if method == '2':
n = N - 2
coefficients = np.zeros(N)
temp = np.array([v/4 + v**2 / 8, 1 - v**2 / 4, -v/4 + v**2 / 8])
coefficients[0] = temp[0]
coefficients[1] = 1 - v**2 / 2 - v**3 / 8
for k in range(1, n - 1):
coefficients[k + 1] = gammaRatio(k - v + 1, -v) / gamma(k + 2) * temp[0] + gammaRatio(
k - v, -v) / gamma(k + 1) * temp[1] + gammaRatio(k - v - 1, -v) / gamma(k) * temp[2]
coefficients[n] = gammaRatio(n - v - 1, -v) / gamma(n) * \
temp[1] + gammaRatio(n - v - 2, -v) / gamma(n - 1) * temp[2]
coefficients[-1] = gammaRatio(n - v - 1, -v) / gamma(n) * temp[2]
return coefficients
elif method == '1':
n = N - 1
coefficients = np.zeros(N)
coefficients[0] = 1
coefficients[1] = -v
for k in range(2, N):
coefficients[k] = gammaRatio(k - v, -v) / gamma(k + 1)
return coefficients
def dotPos(xq, N=7, a=0, method='2'):
'''
Return the position array for the mask convolution.
Parameters
----------
xq : float
Point at which function is diffintegrated.
N : int, optional
Length of the corresponding coefficients. Default is 7.
a : float, optional
Lower limit of the diffintegration. Default is 0.
method : str
Diffintegration operator. {'1' or '2' (default)}.
Returns
----------
h : float
Step size of the interval.
x_arr : ndarray
Positions for mask convolution.
'''
if method == '2':
h = (xq - a) / (N - 2)
x_arr = np.linspace(xq + h, a, N)
return h, x_arr
elif method == '1':
h = (xq - a) / N
x_arr = np.linspace(xq, a + h, N)
return h, x_arr
def deriv(fun, xq, v, N=7, a=0, method='2'):
'''
Calculate the fractional diffintegral.
Parameters
----------
fun : callable
Diffintegrand function.
xq : ndarray or float
Point at which fun is diffintegrated.
v : float
Diffintegration order.
N : int, optional
Length of the corresponding coefficients. Default is 7.
a : float, optional
Lower limit of the diffintegration. Default is 0.
method : str
Diffintegration operator. {'1' or '2' (default)}.
Returns
----------
yq : ndarray or float
The diffintegral value at xq.
'''
C = coeff(v, N, method)
if hasattr(xq, "__len__"):
num = len(xq)
yq = np.zeros(num)
for i in range(num):
h, x_tmp = dotPos(xq[i], N, a, method)
yq[i] = np.dot(C, fun(x_tmp)) / h**(v)
return yq
else:
h, x_tmp = dotPos(xq, N, a, method)
return np.dot(C, fun(x_tmp)) / h**(v)
def mask(v, N=13, method='Tiansi'):
'''
Return fractional mask operator.
Parameters
----------
v : float
Diffintegration order.
N : int, optional
Mask size of the corresponding operator. Default is 13 x 13.
method : str
Diffintegration operator. {'Tiansi' (1, default) or 'lcr' (2)}.
Returns
----------
result_mask : 2darray
The fractional mask.
'''
center = int((N - 1) / 2)
result_mask = np.zeros((N, N))
if method == 'Tiansi' or method == '1':
C = coeff(v, center + 1, '1')
elif method == 'lcr' or method == '2':
C = coeff(v, center + 2, '2')
C[2] += C[0]
C = C[1:]
result_mask[center, center] = 8 * C[0]
for i in range(1, center + 1):
c = C[i]
result_mask[center - i, center] = c
result_mask[center + i, center] = c
result_mask[center, center - i] = c
result_mask[center, center + i] = c
result_mask[center + i, center - i] = c
result_mask[center - i, center + i] = c
result_mask[center - i, center - i] = c
result_mask[center + i, center + i] = c
return result_mask
def deriv8(A, v, method='2', N=7):
'''
Compute the fractional diffintegral in the eight direction of a matrix A
Parameters
----------
A : 2darray
Matrix (image) that need to be diffintegrated.
v : float
Diffintegration order.
method : str
Diffintegration operator. {'1' or '2' (default)}.
N : int, optional
Length of the corresponding coefficients. Default is 7.
Returns
----------
d8 : 3darray
fractional diffintegral result. First dimension represents direction in the following order: u, d, l, r, ld, ru, lu, rd.
'''
len_x, len_y = A.shape
C = coeff(v, N, method)
d8 = np.zeros((8, len_x, len_y))
if method == '1':
A_pad = np.pad(A, N - 1, mode='symmetric')
for k in range(N):
c = C[k]
d8[0] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 1):(N - 1 + len_y)]
d8[1] += c * A_pad[(N - 1 + k):(N - 1 + k + len_x), (N - 1):(N - 1 + len_y)]
d8[2] += c * A_pad[(N - 1):(N - 1 + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[3] += c * A_pad[(N - 1):(N - 1 + len_x), (N - 1 + k):(N - 1 + k + len_y)]
d8[4] += c * A_pad[(N - 1 + k):(N - 1 + k + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[5] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 1 + k):(N - 1 + k + len_y)]
d8[6] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[7] += c * A_pad[(N - 1 + k):(N - 1 + k + len_x), (N - 1 + k):(N - 1 + k + len_y)]
elif method == '2':
A_pad = np.pad(A, N - 2, mode='symmetric')
for k in range(N):
c = C[k]
d8[0] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 2):(N - 2 + len_y)]
d8[1] += c * A_pad[(N - 3 + k):(N - 3 + k + len_x), (N - 2):(N - 2 + len_y)]
d8[2] += c * A_pad[(N - 2):(N - 2 + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[3] += c * A_pad[(N - 2):(N - 2 + len_x), (N - 3 + k):(N - 3 + k + len_y)]
d8[4] += c * A_pad[(N - 3 + k):(N - 3 + k + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[5] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 3 + k):(N - 3 + k + len_y)]
d8[6] += c * A_pad[(N - 1 - k):(N - 1 - k + len_x), (N - 1 - k):(N - 1 - k + len_y)]
d8[7] += c * A_pad[(N - 3 + k):(N - 3 + k + len_x), (N - 3 + k):(N - 3 + k + len_y)]
return d8
def derivTotal(d8, mode='sum'):
if mode == 'sum':
d_total = np.sum(d8, axis=0)
elif mode == 'L1':
d_total = np.sum(np.abs(d8), axis=0)
elif mode == 'L2':
d_total = np.sum(np.square(d8), axis=0)
elif mode == 'max':
d_total = np.max(np.abs(d8), axis=0)
return d_total
```
|
{
"source": "jerryan999/character-recognition",
"score": 3
}
|
#### File: data/image_process/character_image.py
```python
import os
import cv2
import numpy as np
import os
from image_process import preprocess
import logging
from captcha.constant import CAPTCHA_TO_CATEGORY,CATEGORY_TO_CAPTCHA,APPEARED_LETTERS
logger = logging.getLogger(__name__)
def load_data(folder):
# prepare X and y for processing
img_list = [i for i in os.listdir(folder) if i.endswith('jpg')]
count = 0
for img_index,img_name in enumerate(img_list):
img_path = os.path.join(folder,img_name)
img = preprocess.load_img(img_path)
sub_imgs = preprocess.gen_sub_img(raw_img=img,sub_img_num=4)
for sub_index, sub_img in enumerate(sub_imgs):
sub_img_shaped = sub_img.reshape(1,40,40,1)
count += 1
if count == 1:
# data = np.ndarray((1,40,40,1))
# label = np.ndarray(1,)
data = sub_img_shaped.copy()
label = np.array([CAPTCHA_TO_CATEGORY[img_name[sub_index]]])
else:
data = np.vstack((data,sub_img_shaped))
label = np.append(label,[CAPTCHA_TO_CATEGORY[img_name[sub_index]]],axis=0)
if count % 100 ==0:
logger.info("{} letters of captcha loaded".format(count))
return data, label
def output_character_images(input_folder,output_folder):
# 验证码图片切割成一个一个的character,并存在文件夹中
d, l = load_data(input_folder)
# 新建文件夹
for ch in APPEARED_LETTERS:
folder = '{}/{}'.format(output_folder,ch)
if not os.path.exists(folder):
os.mkdir(folder)
# 写入到目标文件夹
for n, i in enumerate(d):
char = CATEGORY_TO_CAPTCHA[l[n]]
cv2.imwrite('{}/{}/{}:{}.jpg'.format(output_folder,char, char, n),i)
if __name__ == '__main__':
d, l = load_data('./samples')
import os
for ch in APPEARED_LETTERS:
folder = 'chars/{}'.format(ch)
if not os.path.exists(folder):
os.mkdir(folder)
for n, i in enumerate(d):
char = CATEGORY_TO_CAPTCHA[l[n]]
cv2.imwrite('chars/{}/{}:{}.jpg'.format(char, char, n),i)
```
|
{
"source": "jerryarciaga/manga2pdf",
"score": 3
}
|
#### File: jerryarciaga/manga2pdf/manga2pdf.py
```python
import requests
import img2pdf
import os
from PyPDF2 import PdfFileReader, PdfFileMerger
from tqdm import tqdm
#TODO Generate config file for these things
headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:96.0) Gecko/20100101 Firefox/96.0'
}
#TODO Specify temporary image directory
#TODO Specify default destination for saved content
# Download images, then convert to pdf
def download_pdf(url, filename):
img_filename = filename + ".jpg"
pdf_filename = filename + ".pdf"
params = {"v": "12"} # Specific to https://readm.org
with open(img_filename, "wb") as image:
r = requests.get(url, headers=headers, params=params)
for chunk in r.iter_content(chunk_size=128):
image.write(chunk)
with open(pdf_filename, "wb") as pdf:
pdf.write(img2pdf.convert(img_filename))
os.remove(img_filename)
# Uses download_pdf function to download a whole chapter
def download_chapter(manga_id, chapter, chapter_title):
base_url = f"https://www.readm.org/uploads/chapter_files/{manga_id}/{chapter}/"
pages = 1 # Count the number of pages in the chapter
while True:
url = base_url + f"{pages}.jpg"
test = requests.get(url, headers=headers)
if test.status_code != 200:
pages -= 1
break
else:
pages += 1
chapter = PdfFileMerger(strict=False)
for i in tqdm(range(pages), desc=f"Downloading {chapter_title}...",
ascii=False, ncols=75):
page = i + 1
url = base_url + f"{page}.jpg"
download_pdf(url, str(page))
with open(f"{page}.pdf", "rb") as current_file:
current_page = PdfFileReader(current_file, strict=False)
chapter.append(current_page)
os.remove(f"{page}.pdf")
chapter.write(f"{chapter_title}.pdf")
if __name__ == '__main__':
download_chapter(17427, 1, "Chapter 1")
download_chapter(17427, 2, "Chapter 2")
download_chapter(17427, 3, "Chapter 3")
```
|
{
"source": "jerryarciaga/TheHRClerk",
"score": 2
}
|
#### File: TheHRClerk/home/views.py
```python
from django.shortcuts import render
def home(request):
return render(request, 'home/home.html')
def help(request):
return render(request, 'home/help.html')
def about(request):
return render(request, 'home/about.html')
```
|
{
"source": "JerryB32/intref",
"score": 3
}
|
#### File: src/python/interactive_interflow.py
```python
import cmd
import os
import re
import shlex
import glob
import interflow
from pprint import pprint
### Getting Started ###
#
# Heres a small read me to help get you up and running with this code. Hopefully by reading though this you get a little bit of insight on why things were done they way they are.
# To start off, this interactive console app is based around the 'cmd' module in python. This is a great module since it deals with the tab completion and help methods for you
# however there are some querks to it. If you are looking at this code to try and understand interflow, I would recommend starting with the console app.
#
# The autocomplete method gives you 'line' and 'text' as variables. the do_* methods give you 'line' only. In the do_* methods (which run when you go to run a command),
# the line variable is simply the non command part of the input. example: `mycommand some user/input` would yield "some user/input" in the line variable
# However, in the complete_* method you will see that line will give you the WHOLE LINE! so above, if a user was to press tab, line would yield "mycommand some user/input"
# To make matters worse, the text variable (that the cmd module docs say to use) contains a subset of the user input. it seems to only return the last part of the user input
# it seems to split on spaces and slashes, in order to only auto complete the last part the user is typing. the problem is when we are trying to build a path, we need to know
# the whole part the user tpyed in...
#
# This only gets more frustrating when you realize that users could be typing in quoted paths, or paths that contain escaped spaces. (which would cause the text variable to
# split incorrectly. Furthermore it prevents us from using .split() to get parts of the command.
# The solution to this was to create a few helper functions. parsePathsFromFullCommandline(). It does what the name implies. it takes a full command line and parses out paths.
# The function is smart enough to detect escaped spaces and quoted strings... however as the name implies it expects the full command. in order to not rewrite it twice,
# you may see the function being used in do_* commands by simply appending the line variabe to the string of the command. There isnt anything magic happening... the command is
# junk data which simply mimics the input which is given duing the complete_* methods. Its not an optimal solution, but i figure it would keep the simplicity of this example app
# more so than creating multiple methods to essentially do the same thing.
#
#
# Other than that function, it all should be fairly strait forward. the interflow.py file (imported above) has helpful functions that wrap the API. each function returns a dictionary
# with the http response (key: 'response') and the data (key: 'data') returned. for more info on this, look at the comments in interflow.py
#
# you will also notice on most of the os.path.* calls, we do a replace afterwards to make \\ change to /. this is because linux (as well as the api) uses / but windows uses \\ and we
# want to keep this as cross platform as possible.
class Interflow(cmd.Cmd):
"""A simple interactive interflow command line utility."""
#variables
apiKey = ""
validatedAPI = False
interflowPathDir = ""
#customizations:
#make empty lines do nothing (default repeats last command)
def emptyline(self):
pass
#add a startup message and change the prompt from (cmd) to a # (until api key is set)
intro = "Welcome to the Interflow interactive console application. Please set your api key to begin\nType help to view a list of avalible commands, and help + <command> to see command specific help\nMost commands support tab completion"
prompt = "#"
#helper methods:
def printSetAPI(self):
print("*** Please run the setAPI command first before running this command")
def parsePathsFromFullCommandline(self, line):
"""a function that parses paths out of the command line. this function knows to treat quoted strings and ones with escaped spaces as single paths
this function is also used a lot to determine how many arguments a user has entered as it knows how to treat spaces in paths.
IMPORTANT: one weird thing in this code is that "line" in the complete* methods is the full line typed, where in the do_* methods
the variable line only holds the parametors being passed... so you will see this function in the do_* methods with "<command>"+line where as in the
complete_* methods it will just be called with line. I wish the cmd module in python was a bit more consistant with that behavior."""
pathsOnly = line.partition(" ")[2]
#shlex.split will remove trailing spaces... for instance "download abc/123.txt " becomes "abc/123.txt " from the partition,
#the space on the end will be removed with the shlex below. if a user presses tab after the space, they would assume to be completing the 2nd part of the command
#however since the space is removed the parsePathsFromFullCommandline function would only return ["abc/123.txt"] and not ["abc/123.txt",""]. thus we do this check, and add the empty
#path on the end if needed.
#the check for pathsOnly == "" is to catch a space after the command ('cd '), and returning the correct thing (as doing pathsOnly[-1] wound throw an index out of range error)
if pathsOnly == "":
return []
if(pathsOnly[-1] == " "):
return shlex.split(line.partition(' ')[2])+[""]
else:
return shlex.split(line.partition(' ')[2])
def getPathFileParts(self, line, position):
"""returns the path and file fragment at an index in the line being typed. First user entered path is at index 0. used in tab completion (fileFragment will be
part of a file. Or can be used to break up a path a user entered for downloading a file. in this case fileFragment will be a full file name
IMPORTANT: getPathFileParts is meant to be used in the auto completition part where line contains the command. this function can be used in do_* but you MUST
add some junk string with a space before the paths! (see the important note under parsePathsFromFullCommandline for more info)"""
currentPath = self.parsePathsFromFullCommandline(line)[position]
if currentPath == "":
return {"path": "", "fileFragment": ""}
#if the path doesnt start with / then we are using a relitive path and must add the current working directory to what is typed.
#never add on the current interflow path to any path not in the 1st position... as multi position commands (download and upload) use local paths in position 2 (index 1)
if currentPath[0] != "/" and position == 0:
currentPath = os.path.join(self.interflowPathDir,currentPath).replace("\\","/")
if currentPath[-1] == "/":
#normpath takes off the end /. we dont want that...
normalizedCurrentPath = os.path.normpath(currentPath).replace("\\","/")
normalizedCurrentPath = normalizedCurrentPath + "/"
path,fileFragment = os.path.split(normalizedCurrentPath)
else:
path,fileFragment = os.path.split(os.path.normpath(currentPath).replace("\\","/"))
#when using os.path.normpath(), an empty path will return '.' or '/'
#for example: os.path.normpath("test/..") will return .
#for example: os.path.normpath("/test/..") will return /
#we want path to equal "" in both of these cases... so do a quick check
if path == "." or path == "/":
path = ""
#same kinda thing with file fragments. this _shouldnt_ happen under normal usage, but this check stops potential unwanted behavior
if fileFragment == ".." or fileFragment == ".":
fileFragment = ""
#lastly, get rid of any / in the beginning of path. this makes the different auto complete behavior on windows work, as well as making sure absolute paths dont
#contain the / in the beginning when sending it in an api call.
path = path.lstrip("/")
return {"path": path, "fileFragment": fileFragment}
def fixWindowsAutoComplete(self,path, completionResult):
"""there seems to be a bug in pyreadline where the autocompletion return values overwrite what was written instead of appending. this is a small
helper function to return the full path in windows so that the returned value is correct"""
if os.name == "nt":
if not path:
path = "/"
return [os.path.join(path,result).replace("\\","/") for result in completionResult]
else:
return completionResult
#########################################################
#### BEGIN CMD METHODS #################################
#######################################################
#Exit methods
def do_EOF(self, line):
"""Called when ctrl-D is pressed / closes out of the interactive application."""
return True
def do_exit(self, line):
"""method that ends interactive mode"""
return True
def help_exit(self):
print('\n\t'.join(["exit",
"Exits the interactive program"]))
#Set API key methods
def do_setAPI(self, line):
"""Sets the api key internally for future commands."""
if len(line.split()) != 1:
print("There were too many arguments entered for setAPI\n")
self.help_setAPI()
return
result = interflow.getValidation(line)
if result["response"] == 200:
if result["data"]["IsAuthenticated"] == True:
print("Welcome "+result["data"]["Name"]+". Your api key is valid and ready to use.")
#now that the user has validated their api key, change the prompt to reflect that.
self.apiKey = line
self.prompt = "/>"
self.validatedAPI = True
else:
print("Warning: You don't appear to be an authenticated user but your API key is valid. You may need to log in to the API portal and reset your password.")
else:
print("*** The API key entered does not seem to be valid. Authentication check failed!")
def help_setAPI(self):
print('\n\t'.join(["setAPI [key]",
"Sets the API key to be used in future commands.",
"After calling, a check will be done to verify the api key was entered correctly"]))
#list directory commands and aliases
def do_ls(self, line):
"""Lists out all files and directories in the current path. equivalent to dir"""
if not self.validatedAPI:
self.printSetAPI()
return
if len(line) == 0:
path=self.interflowPathDir
else:
path = self.getPathFileParts("ls "+line,0)["path"]
#first get all directories from where the user is.
results = interflow.getInterflowDirs(self.apiKey, path)
if results["response"] == 200:
directories = results["data"]
else:
print("*** Directory does not exist")
return
#next get all files from where the user is.
#There are two end points that serve files. Download_ListFiles and File_ListSharedFiles. you have to download these files with different endpoints as well
#If we got it from Download_ListFiles, we download with Download_File.
#If its a file from File_ListSharedFiles it is downloaded with File_Download.
#
#We only show the shared files in the root. so below is a check for if we are in the root, and if so we call the shared files instead. This check is also done when downloading
#the files, and if they are downloading a file in the root, we call the file_download endpoint instead.
if path == "":
results = interflow.getInterflowSharedFileNamesOnly(self.apiKey)
else:
results = interflow.getInterflowFileNamesOnly(self.apiKey,path)
if results["response"] == 200:
files = results["data"]
#now list out the results we found.
print("Directories:")
print("---------------------")
print("\n".join(directories))
print("\n")
print("Files:")
print("---------------------")
print("\n".join(files))
def do_dir(self, line):
"""Lists out all files and directories in the current path. equivalent to ls"""
self.do_ls(line)
def complete_ls(self, text, line, begidx, endidx):
"""function to complete the ls command. should return the posible directories (same as complete_cd, so this just calls complete_cd"""
return self.complete_cd(text, line, begidx, endidx)
def complete_dir(self, text, line, begidx, endidx):
"""function to complete the dir command. should return the posible directories (same as complete_cd, so this just calls complete_cd"""
return self.complete_cd(text, line, begidx, endidx)
def help_ls(self):
print('\n\t'.join(["ls <dir>",
"Lists the contents of dir",
"if dir not defined, lists the contents of the current directory",
"alias - dir"]))
def help_dir(self):
print('\n\t'.join(["dir <dir>",
"Lists the contents of dir",
"if dir not defined, lists the contents of the current directory",
"alias - ls"]))
#Change directory (cd)
def do_cd(self, line):
"""function to do the directory changing. handels paths with .. in them, root referenced paths, and local paths"""
if not self.validatedAPI:
self.printSetAPI()
return
#if the user just types cd take them back to the root
if len(line) == 0:
self.prompt = "/>"
self.interflowPathDir = ""
return
#if the user just types "cd /". interflow doesnt return the root if you send / as a directory (it returns 404). We know it exists. so change the path internally and return
if line.strip() == "/":
self.prompt = "/>"
self.interflowPathDir = ""
return
path = self.getPathFileParts("cd "+line,0)["path"]
fileFragment = self.getPathFileParts("cd "+line,0)["fileFragment"]
path = os.path.join(path,fileFragment).replace("\\","/")
results = interflow.getInterflowDirs(self.apiKey, path)
if results["response"] == 200:
#we know the folder exists now
self.interflowPathDir = path
self.prompt = "/"+path+">"
else:
print("*** Unable to locate directory - please check your spelling and try again")
#the cmd moduel will autocomplete "cd" but wont know what to do if someone presses tab while typing a path.
#this method will take the current path and try to find matches and return them to the user. so fancy :)
def complete_cd(self, text, line, begidx, endidx):
"""function to assist autocompletion. also called by complete_ls and complete_dir"""
#user presses <tab><tab> with nothing typed. return all directories in the current path
if len(self.parsePathsFromFullCommandline(line)) == 0:
pathData = {"path" : self.interflowPathDir, "fileFragment": ""}
else:
pathData = self.getPathFileParts(line,0)
results = interflow.getInterflowDirs(self.apiKey, pathData["path"])
if results["response"] == 200:
#we know the folder exists, now find all substring matches
matches = [found for found in results["data"] if found.startswith(pathData["fileFragment"])]
return self.fixWindowsAutoComplete(pathData["path"],matches)
else:
return []
def help_cd(self):
print('\n\t'.join(["cd <dir>",
"Changes current directory to <dir>",
"If dir not defined, then the current directory will change to /"]))
#TODO: add support so * can download all in directory
#download file
def do_download(self, line):
if not self.validatedAPI:
self.printSetAPI()
return
#use this instead of split as some paths might have spaces. this function expects the full command line so just add the command back on (as line is only user input)
if len(self.parsePathsFromFullCommandline("download "+ line))!= 2:
self.help_download()
return
path = self.getPathFileParts("download "+line,0)["path"]
file = self.getPathFileParts("download "+line,0)["fileFragment"]
#get the output file parts and glue them togeather
outputFilePath = os.path.join(self.getPathFileParts("download "+line, 1)["path"],self.getPathFileParts("download "+line, 1)["fileFragment"]).replace("\\","/")
#chek if the path is not "" and if it starts with a slash. the length check is to avoid index out of bounds errors.
if len(path) and path[0] == "/":
#internally paths do not start with a / (ex: "test/test.txt". not "/test/test.txt) so we need to strip the left most slash
#then normalize to remove ..'s, then split into constituents
path = path[1:]
#There are two end points that serve files. Download_ListFiles and File_ListSharedFiles. you have to download these files with different endpoints as well
#If we got it from Download_ListFiles, we download with Download_File.
#If its a file from File_ListSharedFiles it is downloaded with File_Download.
#
#We only show the shared files in the root. so below is a check for if we are in the root, and if so we call the shared files endpoint instead when downloading.
if path == "":
results = interflow.downloadInterflowSharedFile(self.apiKey, file)
else:
results = interflow.downloadInterflowFile(self.apiKey, path, file)
if results["response"] == 200:
try:
if os.path.isfile(outputFilePath):
print("*** The file already exists. Do you want to overwrite it? (y/N)")
overwriteInput = raw_input()
if not any(character in overwriteInput for character in ("y","Y")):
return
outputFile = open(outputFilePath, 'wb')
outputFile.write(results["data"])
except Exception as e:
print(e)
else:
print("*** Cannot open file for reading. Please check the file name given for download")
def complete_download(self, text, line, begidx, endidx):
"""a method to auto complete files and directories for download"""
#complete_download is a bit different as there are two arguments it takes. the first is a remote file, and is completed though api (similar to cd and ls).
#the difference being that it needs to return files and directories.
#The second part of the command is a local file or directory, and thus needs to be completed by looking at the local filesystem.
#part 1: this handles the remote interflow file
if len(self.parsePathsFromFullCommandline(line)) <= 1:
if len(self.parsePathsFromFullCommandline(line)) == 0:
pathData = {"path" : self.interflowPathDir, "fileFragment": ""}
else:
pathData = self.getPathFileParts(line,0)
results = interflow.getInterflowDirs(self.apiKey, pathData["path"])
if results["response"] == 200:
#we know the folder exists, now find all substring matches
dirResults = [found for found in results["data"] if found.startswith(pathData["fileFragment"])]
else:
dirResults = []
#try to find files to suggest
#like in the do_download and do_ls commands, the root contains special files shared though another endpoint. likewise, we do a check here to handle getting these files from
#the root directory!
if pathData["path"] == "":
results = interflow.getInterflowSharedFileNamesOnly(self.apiKey)
else:
results = interflow.getInterflowFileNamesOnly(self.apiKey, pathData["path"])
if results["response"] == 200:
#we know the folder with files exists, now find all substring matches
fileResults = [file for file in results["data"] if file.startswith(pathData["fileFragment"])]
else:
fileResults = []
return self.fixWindowsAutoComplete(pathData["path"],dirResults+fileResults)
#part 2: completing on the local file system
elif len(self.parsePathsFromFullCommandline(line)) == 2:
systemPath = os.path.join(self.getPathFileParts(line,1)["path"], self.getPathFileParts(line,1)["fileFragment"])
if ".." not in systemPath:
return [file for file in glob.glob(systemPath+"*/")]
def help_download(self):
print('\n\t'.join(["download [remote file] [output file]",
"Downloads the contents of the remote file specified, and writes to [output file]",
"Remote file can be an absolute path, or in your current directory",
"",
"Remote file supports full tab completion",
"Local file supports tab completion for folders only, and will only look in the current directory forward."]))
#upload file
def do_upload(self, line):
if not self.validatedAPI:
self.printSetAPI()
return
#use this instead of split as some paths might have spaces. this function expects the full command line so just add the command back on (as line is only user input)
if len(self.parsePathsFromFullCommandline("upload "+ line))!= 2:
self.help_upload()
return
#handle generic uploads
if self.parsePathsFromFullCommandline("upload "+line)[0] == "generic":
status = interflow.UploadInterflowGenericFile(self.apiKey, self.parsePathsFromFullCommandline("upload "+line)[1])
if status == 202:
print "The file upload sucessful"
else:
print "Error uploading file - " + str(status)
#handle indicator uploads
elif self.parsePathsFromFullCommandline("upload "+line)[0] == "indicator":
with open(self.parsePathsFromFullCommandline("upload "+line)[1]) as file:
status = interflow.UploadInterflowOneIndicator(self.apiKey, file.read())
if status == 201:
print "The OneIndicator upload sucessful"
else:
print "Error uploading OneIndicator - " + str(status)
else:
self.help_upload()
return
def complete_upload(self, text, line, begidx, endidx):
#handle the tab completion for "generic" and "indicator"
if len(self.parsePathsFromFullCommandline(line)) <= 1:
#handle tab press with nothing typed
if len(self.parsePathsFromFullCommandline(line)) == 0:
return ["generic","indicator"]
#get what the user has typed. there shouldnt be any escaped spaces so a split could be used, but use this to keep with what historically is used.
fileOpetion = self.getPathFileParts(line,0)["fileFragment"]
matches = [found for found in ["generic","indicator"] if found.startswith(fileOpetion)]
return matches
#handle the tab completion for local files
elif len(self.parsePathsFromFullCommandline(line)) == 2:
#get the path the user has typed up to now
Path = self.parsePathsFromFullCommandline(line)[1]
if Path == "." or Path == "/":
Path = ""
results = []
for file in glob.glob(Path+"*"):
if os.path.isdir(file):
results.append((os.path.split(file)[1]+"/").replace(" ","\\ "))
else:
results.append(os.path.split(file)[1].replace(" ","\\ "))
return results
def help_upload(self):
print('\n\t'.join(["upload <generic|indicator> [file path]",
"Uploads either a generic file or indicator to interflow.",
"[file path] can be an absolute path, or in your current directory",
"",
"Example usage:",
" 'upload generic somefile.txt' - uploads the generic file somefile.txt",
" 'upload indicator someindicator.json' - uploads the OneIndicator stored in someindicator.json",
"",
"Upload supports full tab completion"]))
#program entry point
if __name__ == '__main__':
try:
import readline
except ImportError, e:
print "#====================================================================================#"
print "# Welcome windows user. This program features tab completion, however to enable this #"
print "# you must download the pyreadline library. #"
print "# #"
print "# To do this, first install pip if you do not have it installed: #"
print "# https://pip.pypa.io/en/stable/installing/ #"
print "# Next, from a cmd window: #"
print "# cd c:/Python27/scripts #"
print "# pip install pyreadline #"
print "# #"
print "# If you do not care about this feature, you may simply press a key and use the #"
print "# program like normal, however navigating folders may be significantly more #"
print "# difficult. #"
print "#====================================================================================#"
if os.name == "nt":
import msvcrt as m
m.getch()
os.system("cls")
else:
raw_input("\n\n\tCan not find a suitable readline library. Please install pyreadline though pip.\nThis program will still run, but tab completion will not be supported.\nPlease press enter to continue")
os.system("clear") #probably on a mac that doesnt have the right version of the readline library
try:
import requests
except ImportError, e:
print "ERROR! This program requires the 'requests' library which is missing from your system, please install this then re-run the program."
print "\tThis can be done with the following command: 'pip install requests'. For more information, please read the getting started documentation provided with this software!"
exit()
Interflow().cmdloop()
```
|
{
"source": "JerryBalan/DiscordBearBot",
"score": 3
}
|
#### File: JerryBalan/DiscordBearBot/BearBot.py
```python
import discord
import praw
redditCredentials = [line.rstrip('\n') for line in open('reddit.token')]
discordCredentials = [line.rstrip('\n') for line in open('discord.token')]
client = discord.Client()
reddit = praw.Reddit(client_id = redditCredentials[0],
client_secret = redditCredentials[1],
username = redditCredentials[2],
password = <PASSWORD>[3],
user_agent = redditCredentials[4])
token = discordCredentials[0]
@client.event
async def on_ready():
print(f"We have logged in as {client.user}")
@client.event
async def on_message(message):
raw_msg = message.content.lower()
split_msg = raw_msg.split()
if split_msg[0] == "!m":
if len(split_msg) > 1:
if split_msg[1] == "hi":
await message.channel.send(f"Hello fellow gamers.")
elif split_msg[1] == "load":
if len(split_msg) > 2:
urls = f""
subreddit = reddit.subreddit(split_msg[2])
hot_subreddit = subreddit.hot(limit=10)
for submission in hot_subreddit:
if not submission.stickied:
urls += submission.url + "\n"
await message.channel.send(urls)
else:
await message.channel.send(f'You look like you need some help. Consult Jerry.')
else:
await message.channel.send(f"You look like you need some help. Consult Jerry.")
else:
await message.channel.send(f"You look like you need some help. Consult Jerry.")
client.run(token)
```
|
{
"source": "JerryBeGood/Cryptofolio",
"score": 2
}
|
#### File: JerryBeGood/Cryptofolio/app.py
```python
import sys
from cryptofolio import app
def create_app(config_file, app_mode):
app.config.from_pyfile(config_file)
if app_mode == '--real':
app.config['BINANCE'] = 'https://api.binance.com'
app.config['BYBIT'] = 'https://api.bybit.com'
else:
app.config['BINANCE'] = 'https://testnet.binance.vision'
app.config['BYBIT'] = 'https://api-testnet.bybit.com'
return app
if __name__ == "__main__":
create_app(*sys.argv[1:]).run()
```
#### File: resolvers/binance/orders_utility.py
```python
import requests
from cryptofolio import app
from .cache import update_binance_order_info, BINANCE_EXCHANGE_INFO
def make_order(params, api_key):
payload = {}
update_binance_order_info(params['symbol'])
with requests.post(f'{app.config.get("BINANCE")}/api/v3/order',
params=params,
headers={
'X-MBX-APIKEY': api_key,
'content-type': 'application/x-www-form-urlencoded'
}) as response:
response_json = response.json()
if response.status_code != 200:
payload['success'] = False
payload['code'] = response_json['code']
payload['msg'] = describe_order_error(params['symbol'], response_json['msg'])
else:
payload['success'] = True
payload['status'] = response_json['status']
return payload
def describe_order_error(symbol, error):
error = error[16:]
symbol = BINANCE_EXCHANGE_INFO[symbol]
if error == 'PRICE_FILTER':
high = float(symbol['filters'][0]['maxPrice'])
low = float(symbol['filters'][0]['minPrice'])
return f'Price for this symbol must be between {high:g} and {low:g}'
elif error == 'PERCENT_PRICE':
minutes = symbol['filters'][1]['avgPriceMins']
return f'Price is too low or too high from the average weighted price over the last {minutes} minutes'
elif error == 'LOT_SIZE':
high = float(symbol['filters'][2]['maxQty'])
low = float(symbol['filters'][2]['minQty'])
return f'Quantity for this symbol must be between {high:g} and {low:g}'
elif error == 'MIN_NOTIONAL':
min_order_value = float(symbol['filters'][3]['minNotional'])
return f'Value of the order must be greater than {min_order_value:g}'
elif error == 'MAX_NUM_ORDERS':
return 'Account has too many open orders on the symbol'
return error
def prepare_stop_loss_order_request_body(order, timestamp):
request_body = ''
if 'icebergQty' in order.keys():
request_body = f'symbol={order["symbol"]}&side={order["side"]}&type=STOP_LOSS_LIMIT&icebergQty={order["icebergQty"]}&quantity={order["quantity"]}&timeInForce={order["timeInForce"]}&price={order["price"]}&stopPrice={order["stopPrice"]}&newOrderRespType=RESULT×tamp={timestamp}'
else:
request_body = f'symbol={order["symbol"]}&side={order["side"]}&type=STOP_LOSS_LIMIT&quantity={order["quantity"]}&timeInForce={order["timeInForce"]}&price={order["price"]}&stopPrice={order["stopPrice"]}&newOrderRespType=RESULT×tamp={timestamp}'
return request_body
def prepare_stop_loss_order_params(order, timestamp):
params = {}
params['symbol'] = order["symbol"]
params['side'] = order["side"]
params['type'] = 'STOP_LOSS_LIMIT'
params['quantity'] = order['quantity']
params['timeInForce'] = order['timeInForce']
params['price'] = order['price']
params['stopPrice'] = order['stopPrice']
params['newOrderRespType'] = 'RESULT'
if 'icebergQty' in order.keys():
params['icebergQty'] = order['icebergQty']
params['timestamp'] = timestamp
return params
def prepare_spot_market_order_request_body(order, timestamp):
request_body = ''
if order['base'] is True:
request_body = f'symbol={order["symbol"]}&side={order["side"]}&type=MARKET&quantity={order["quantity"]}×tamp={timestamp}'
else:
request_body = f'symbol={order["symbol"]}&side={order["side"]}&type=MARKET"eOrderQty={order["quantity"]}×tamp={timestamp}'
return request_body
def prepare_spot_market_order_params(order, timestamp):
params = {
'symbol': order["symbol"],
'side': order['side'],
'type': 'MARKET',
}
if order['base'] is True:
params['quantity'] = order['quantity']
else:
params['quoteOrderQty'] = order['quantity']
params['timestamp'] = timestamp
return params
def prepare_spot_market_limit_order_params(order, timestamp):
params = {}
params['symbol'] = order["symbol"]
params['side'] = order["side"]
params['type'] = 'LIMIT'
if 'icebergQty' in order.keys():
params['icebergQty'] = order['icebergQty']
params['quantity'] = order['quantity']
params['timeInForce'] = order['timeInForce'] if 'timeInForce' in order.keys(
) else 'GTC'
params['price'] = order['price']
params['timestamp'] = timestamp
return params
def prepare_spot_market_limit_order_request_body(order, timestamp):
request_body = ''
timeInForce = order['timeInForce'] if 'timeInForce' in order.keys(
) else 'GTC'
if 'icebergQty' in order.keys():
request_body = f'symbol={order["symbol"]}&side={order["side"]}&type=LIMIT&icebergQty={order["icebergQty"]}&quantity={order["quantity"]}&timeInForce={timeInForce}&price={order["price"]}×tamp={timestamp}'
else:
request_body = f'symbol={order["symbol"]}&side={order["side"]}&type=LIMIT&quantity={order["quantity"]}&timeInForce={timeInForce}&price={order["price"]}×tamp={timestamp}'
return request_body
```
#### File: cryptofolio/resolvers/shared_utilities.py
```python
import jwt
import requests
import datetime
from cryptography.fernet import Fernet
from cryptofolio import app
from cryptofolio.models import Exchange
def validate_token(authToken):
try:
jwt_claims = jwt.decode(
jwt=authToken,
key=app.config.get('SECRET_KEY'),
algorithms=['HS256'],
options={
'verify_signature': True,
'require': ['exp', 'iat', 'iss'],
'verify_exp': True,
'verify_iat': True
}
)
except jwt.exceptions.InvalidTokenError as error:
return False, str(error)
except jwt.exceptions.DecodeError as error:
return False, str(error)
except jwt.exceptions.ExpiredSignatureError as error:
return False, str(error)
except jwt.exceptions.InvalidIssuedAtError as error:
return False, str(error)
except jwt.exceptions.InvalidKeyError as error:
return False, str(error)
except jwt.exceptions.InvalidAlgorithmError as error:
return False, str(error)
except jwt.exceptions.MissingRequiredClaimError as error:
return False, str(error)
else:
return True, jwt_claims
def fetch_exchange_credentials(token_claims, exchange):
exchange_credentials = Exchange.query.filter_by(
user_id=token_claims['iss']).filter_by(exchange=exchange).first()
if not exchange_credentials:
return False, f"{exchange} credentials doesn't exist for this account"
try:
cipher_suite = Fernet(app.config.get('EXCHANGE_SECRET_KEY'))
API_key = cipher_suite.decrypt(
exchange_credentials.api_key).decode('UTF-8')
secret = cipher_suite.decrypt(
exchange_credentials.secret).decode('UTF-8')
except Exception as error:
print(str(error))
return False, 'Decryption error'
else:
return True, API_key, secret
def prepare_bybit_exchange_info():
payload = {}
with requests.get(
f'{app.config.get("BYBIT")}/spot/v1/symbols') as response:
response_json = response.json()
for pair in response_json['result']:
payload[pair['name']] = pair
return payload
def prepare_bybit_asset_ticker_info():
payload = {}
with requests.get(
f'{app.config.get("BYBIT")}/spot/quote/v1/ticker/24hr') as response:
response_json = response.json()
if response_json['ret_code'] == 0:
for item in response_json['result']:
asset = {}
asset['price'] = item['bestAskPrice']
payload[item['symbol']] = asset
else:
payload = {'Msg': 'Asset ticker info error'}
return payload
def prepare_binance_exchange_info():
payload = {}
with requests.get(
'https://api1.binance.com/api/v3/exchangeInfo') as response:
response_json = response.json()
for pair in response_json['symbols']:
payload[pair['symbol']] = pair
return payload
def prepare_binance_asset_ticker_info():
payload = {}
with requests.get(
f'{app.config.get("BINANCE")}/api/v3/ticker/24hr') as response:
response_json = response.json()
for pair in response_json:
payload[pair['symbol']] = {
'symbol': pair['symbol'],
'priceChange': pair['priceChange'],
'priceChangePercent': pair['priceChangePercent'],
'price': pair['weightedAvgPrice']
}
return payload
def prepare_start_time():
startTime = datetime.datetime.now() - datetime.timedelta(days=7)
startTime = int(startTime.timestamp() * 1000)
return startTime
```
#### File: resolvers/user_account/user_account_utility.py
```python
import datetime
import jwt
from cryptofolio import app, db
from cryptofolio.resolvers.binance import validate_binance_credentials
from cryptofolio.resolvers.bybit.utility import validate_bybit_credentials
from cryptofolio.models import Code
def validate_exchange_credentials(API_key, secret, exchange):
if exchange == 'binance':
return validate_binance_credentials(API_key, secret)
elif exchange == 'bybit':
return validate_bybit_credentials(API_key, secret)
def generate_auth_token(user):
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, minutes=60),
'iat': datetime.datetime.utcnow(),
'iss': user.id,
}
return True, jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
return False, e
def code_auth(user, type, code):
the_code = Code.query.filter_by(code=code).filter_by(
user_id=user.id).filter_by(type=type).first()
if not the_code:
False, f'Wrong {type} code'
elif the_code.timestamp - int(datetime.datetime.utcnow().timestamp()) < -300000:
db.session.delete(the_code)
db.session.commit()
return False, f'{type} code overdue'
else:
db.session.delete(the_code)
db.session.commit()
return True, 'Ok'
```
|
{
"source": "jerry-belaston/gopro-lib-node.gl",
"score": 2
}
|
#### File: gopro-lib-node.gl/tests/anim.py
```python
import itertools
import random
import pynodegl as ngl
from pynodegl_utils.tests.cmp_floats import test_floats
def _easing_split(easing):
name_split = easing.split(':')
easing_name = name_split[0]
args = [float(x) for x in name_split[1:]] if len(name_split) > 1 else None
return easing_name, args
def _easing_join(easing, args):
return easing if not args else easing + ':' + ':'.join('%g' % x for x in args)
_easing_specs = (
('linear', 0),
('quadratic', 3),
('cubic', 3),
('quartic', 3),
('quintic', 3),
('power:7.3', 3),
('sinus', 3),
('exp', 3),
('circular', 3),
('bounce', 1),
('elastic', 1),
('back', 3),
)
def _get_easing_list():
easings = []
for col, (easing, flags) in enumerate(_easing_specs):
versions = []
if flags & 1:
versions += ['_in', '_out']
if flags & 2:
versions += ['_in_out', '_out_in']
if not flags:
versions = ['']
for version in versions:
base_name, args = _easing_split(easing)
easing_name = _easing_join(base_name + version, args)
easings.append(easing_name)
return easings
_offsets = (None, (0.0, 0.7), (0.3, 1.0), (0.3, 0.7))
_easing_list = _get_easing_list()
@test_floats()
def anim_forward_api(nb_points=7):
scale = 1. / float(nb_points)
ret = []
times = [i * scale for i in range(nb_points + 1)]
for easing in _easing_list:
easing_name, easing_args = _easing_split(easing)
for offsets in _offsets:
values = [ngl.easing_evaluate(easing_name, t, easing_args, offsets) for t in times]
ret.append([easing_name] + values)
return ret
@test_floats()
def anim_resolution_api(nb_points=7):
scale = 1. / float(nb_points)
ret = []
times = [i * scale for i in range(nb_points + 1)]
for easing in _easing_list:
easing_name, easing_args = _easing_split(easing)
for offsets in _offsets:
try:
values = [ngl.easing_solve(easing_name, t, easing_args, offsets) for t in times]
except Exception as e:
pass
else:
ret.append([easing_name] + values)
return ret
def _get_anim_func(size, animated_type, kf_func):
@test_floats()
def test_func():
offsets = ((None, None), (None, 0.7), (0.3, None), (0.3, 0.7))
nb_kf = len(_easing_specs) + 1
nb_queries = nb_kf - 1
scale = 1. / float(nb_kf)
random.seed(0)
kfvalues = [[random.uniform(0, 1) for r in range(size)] for i in range(nb_kf + 1)]
ret = []
for i, (easing_start_offset, easing_end_offset) in enumerate(offsets):
anim_kf = [kf_func(0, kfvalues[0])]
for j in range(nb_kf):
t = (j + 1) * scale
v = kfvalues[j + 1]
easing_name, easing_args = _easing_split(_easing_list[j])
anim_kf.append(kf_func(t, v,
easing=easing_name,
easing_args=easing_args,
easing_start_offset=easing_start_offset,
easing_end_offset=easing_end_offset))
anim = animated_type(anim_kf)
# Query between times
values = [anim.evaluate((t_id + 1) * scale) for t_id in range(nb_queries)]
# Query boundaries and out of them (to trigger a copy instead of a mix)
values += [anim.evaluate(0)]
values += [anim.evaluate(1)]
values += [anim.evaluate(5)]
if hasattr(values[0], '__iter__'):
values = list(itertools.chain(*values))
ret.append(['off%d' % i] + values)
return ret
return test_func
_float_kf_func = lambda t, v, **kw: ngl.AnimKeyFrameFloat(t, v[0], **kw)
_vec2_kf_func = lambda t, v, **kw: ngl.AnimKeyFrameVec2(t, v, **kw)
_vec3_kf_func = lambda t, v, **kw: ngl.AnimKeyFrameVec3(t, v, **kw)
_vec4_kf_func = lambda t, v, **kw: ngl.AnimKeyFrameVec4(t, v, **kw)
_quat_kf_func = lambda t, v, **kw: ngl.AnimKeyFrameQuat(t, v, **kw)
anim_forward_float = _get_anim_func(1, ngl.AnimatedFloat, _float_kf_func)
anim_forward_vec2 = _get_anim_func(2, ngl.AnimatedVec2, _vec2_kf_func)
anim_forward_vec3 = _get_anim_func(3, ngl.AnimatedVec3, _vec3_kf_func)
anim_forward_vec4 = _get_anim_func(4, ngl.AnimatedVec4, _vec4_kf_func)
anim_forward_quat = _get_anim_func(4, ngl.AnimatedQuat, _quat_kf_func)
```
#### File: gopro-lib-node.gl/tests/text.py
```python
import array
import pynodegl as ngl
from pynodegl_utils.misc import scene
from pynodegl_utils.toolbox.colors import COLORS
from pynodegl_utils.tests.cmp_fingerprint import test_fingerprint
@test_fingerprint(tolerance=1)
@scene()
def text_0_to_127(cfg):
s = ''
for y in range(8):
for x in range(16):
c = y << 4 | x
s += chr(c) if c else ' '
s += '\n'
return ngl.Text(s)
def _text(**params):
return ngl.Text('This\nis\nnode.gl', font_scale=0.7, padding=8, **params)
@test_fingerprint(tolerance=1)
@scene()
def text_colors(cfg):
return _text(fg_color=COLORS['rose'], bg_color=COLORS['cgreen'])
@test_fingerprint(tolerance=1)
@scene()
def text_align_cc(cfg):
return _text(valign="center", halign="center")
@test_fingerprint(tolerance=1)
@scene()
def text_align_cr(cfg):
return _text(valign="center", halign="right")
@test_fingerprint(tolerance=1)
@scene()
def text_align_cl(cfg):
return _text(valign="center", halign="left")
@test_fingerprint(tolerance=1)
@scene()
def text_align_bc(cfg):
return _text(valign="bottom", halign="center")
@test_fingerprint(tolerance=1)
@scene()
def text_align_br(cfg):
return _text(valign="bottom", halign="right")
@test_fingerprint(tolerance=1)
@scene()
def text_align_bl(cfg):
return _text(valign="bottom", halign="left")
@test_fingerprint(tolerance=1)
@scene()
def text_align_tc(cfg):
return _text(valign="top", halign="center")
@test_fingerprint(tolerance=1)
@scene()
def text_align_tr(cfg):
return _text(valign="top", halign="right")
@test_fingerprint(tolerance=1)
@scene()
def text_align_tl(cfg):
return _text(valign="top", halign="left")
```
|
{
"source": "jerrybelmonte/Algorithms-Python",
"score": 4
}
|
#### File: jerrybelmonte/Algorithms-Python/closest.py
```python
import sys
from math import sqrt, ceil
from collections import namedtuple
Point = namedtuple('Point', 'x y')
def euclidian_distance(x1: int, y1: int, x2: int, y2: int):
return sqrt(((x1 - x2) ** 2) + ((y1 - y2) ** 2))
def naive_min_distance(pts: list):
num_pts = len(pts)
if num_pts <= 2: # base case
return euclidian_distance(*pts[0], *pts[1])
min_distance = float(sys.maxsize)
for i in range(num_pts):
for j in range(i + 1, num_pts):
distance = euclidian_distance(*pts[i], *pts[j])
if distance <= min_distance:
min_distance = distance
return min_distance
def minimum_distance(x_coord: list, y_coord: list):
"""
Computes the minimum distance between the points.
:param x_coord: list of x coordinates of the points
:param y_coord: list of y coordinates of the points
:return: the minimum distance
>>> minimum_distance([0, 3], [0, 4])
5.0
>>> minimum_distance([7, 1, 7], [7, 100, 7])
0.0
>>> minimum_distance([7, 1, 4, 7], [7, 100, 8, 7])
0.0
>>> minimum_distance([0, 5, 3, 7], [0, 6, 4, 2])
2.8284271247461903
>>> minimum_distance([4, -2, -3, -1, 2, -4, 1, -1, 3, -4, -2], [4, -2, -4, 3, 3, 0, 1, -1, -1, 2, 4])
1.4142135623730951
"""
num_pts = len(x_coord)
if num_pts <= 2: # base case
return euclidian_distance(x_coord[0], y_coord[0],
x_coord[1], y_coord[1])
points = [Point(x_coord[i], y_coord[i]) for i in range(num_pts)]
x_points = sorted(points, key=lambda point: (point.x, point.y))
y_points = sorted(points, key=lambda point: (point.y, point.x))
return recursive_min_distance(x_points, y_points)
def recursive_min_distance(x_pts, y_pts):
num_pts = len(x_pts)
if num_pts <= 3:
return naive_min_distance(x_pts)
mid = ceil(num_pts/2)
x_left, x_right = x_pts[:mid], x_pts[mid:]
y_left, y_right = [], []
xl_set = set(x_left)
for pt in y_pts:
if pt in xl_set:
y_left.append(pt)
else:
y_right.append(pt)
delta_left = recursive_min_distance(x_left, y_left)
delta_right = recursive_min_distance(x_right, y_right)
delta = min(delta_left, delta_right)
delta_strip = strip_min_distance(x_pts, y_pts, delta)
return min(delta, delta_strip)
def strip_min_distance(x_pts, y_pts, delta):
num = len(x_pts)
mid = x_pts[num//2].x
strip = [pt for pt in y_pts if mid - delta <= pt.x <= mid + delta]
min_delta = delta
num = len(strip)
for i in range(num - 1):
for j in range(i + 1, min(i + 7, num)):
distance = euclidian_distance(*strip[i], *strip[j])
if distance <= min_delta:
min_delta = distance
return min_delta
if __name__ == '__main__':
data = list(map(int, sys.stdin.read().split()))
n = data[0]
x = data[1::2]
y = data[2::2]
print("{0:.9f}".format(minimum_distance(x, y)))
```
#### File: jerrybelmonte/Algorithms-Python/fibonacci_partial_sum.py
```python
def fibonacci_partial_sum(m: int, n: int):
"""
Finds the lsat digit of a partial sum of Fibonacci numbers:
Fm + Fm+1 + ... + Fn.
:param m: starting index in Finacci sequence
:param n: end index in Fibonacci sequence
:return: the last digit of the partial sum
Example: F3 + F4 + F5 + F7 = 2 + 3 + 5 + 8 + 13 = 31
>>> fibonacci_partial_sum(3, 7)
1
"""
pisano_period = 60
partial_sum = [0, 1]
if m == n: # base case for a single fibonacci number
fibonacci = n % pisano_period
if fibonacci <= 1: # base case for Fn <= 1
return fibonacci
for i in range(2, fibonacci + 1): # compute the fibonacci sequence
digit = (partial_sum[i - 1] + partial_sum[i - 2]) % 10
partial_sum.append(digit)
return partial_sum[fibonacci]
start_ndx = m % pisano_period
end_ndx = n % pisano_period + 1
if start_ndx >= end_ndx:
end_ndx += pisano_period
for i in range(2, end_ndx):
digit = (partial_sum[i - 1] + partial_sum[i - 2]) % 10
partial_sum.append(digit)
return sum(partial_sum[start_ndx:end_ndx]) % 10
if __name__ == '__main__':
start, end = input().split()
print(fibonacci_partial_sum(int(start), int(end)))
```
#### File: jerrybelmonte/Algorithms-Python/lcm.py
```python
def lcm(a, b):
# greatest common divisor helper function
def gcd(a, b):
left, right = max(a, b), min(a, b)
if right == 0: # base case
return left # left is the greatest common divisor
# recursively call the gcd function
return gcd(right, (left % right))
# return the least common multiple
return (a // gcd(a, b)) * b
if __name__ == '__main__':
a, b = input().split()
print(lcm(int(a), int(b)))
```
#### File: jerrybelmonte/Algorithms-Python/sorting.py
```python
import sys
from random import randint
def partition3(seq, lo, hi):
left, right = lo, hi
ndx = lo + 1
pivot = seq[lo]
while ndx <= right:
if seq[ndx] < pivot:
seq[left], seq[ndx] = seq[ndx], seq[left]
left, ndx = left + 1, ndx + 1
elif seq[ndx] > pivot:
seq[ndx], seq[right] = seq[right], seq[ndx]
right -= 1
else:
ndx += 1
return left, right
def randomized_quick_sort(seq, left, right):
if left >= right: # base case
return
rand_ndx = randint(left, right) # random selection
seq[left], seq[rand_ndx] = seq[rand_ndx], seq[left]
mid_left, mid_right = partition3(seq, left, right)
randomized_quick_sort(seq, left, mid_left - 1)
randomized_quick_sort(seq, mid_right + 1, right)
def quick_sort(seq: list):
"""
Randomized quicksort with 3-way partition implementation.
:param seq: sequence of numbers
:return: sorted sequence in non-decreasing order
>>> quick_sort([2, 3, 9, 2, 2])
[2, 2, 2, 3, 9]
"""
randomized_quick_sort(seq, 0, len(seq) - 1)
return seq
if __name__ == '__main__':
n, *a = list(map(int, sys.stdin.read().split()))
randomized_quick_sort(a, 0, n - 1)
for x in a:
print(x, end=' ')
```
|
{
"source": "jerrybelmonte/DataStructures-Python",
"score": 4
}
|
#### File: jerrybelmonte/DataStructures-Python/job_queue.py
```python
from collections import namedtuple
from heapq import heapify, heappop, heappush
AssignedJob = namedtuple('AssignedJob', ['worker', 'started_at'])
def assign_jobs(n_workers: int, jobs: list):
"""
Uses n independent threads to process a given list of jobs.
:param n_workers: number of threads
:param jobs: list of job query times
:return: list of ith thread and time started
>>> assign_jobs(2, [1, 2, 3, 4, 5])
[(0, 0), (1, 0), (0, 1), (1, 2), (0, 4)]
"""
pq = [(0, w) for w in range(n_workers)]
heapify(pq)
result = []
for job in jobs:
worker = heappop(pq)
result.append(AssignedJob(worker[1], worker[0]))
heappush(pq, (worker[0] + job, worker[1]))
return result
def main():
n_workers, n_jobs = map(int, input().split())
jobs = list(map(int, input().split()))
assert len(jobs) == n_jobs
assigned_jobs = assign_jobs(n_workers, jobs)
for job in assigned_jobs:
print(job.worker, job.started_at)
if __name__ == "__main__":
main()
```
#### File: jerrybelmonte/DataStructures-Python/max_sliding_window.py
```python
from collections import deque
def max_sliding_window(sequence, k):
"""
Gets the maximum elements in the sliding window.
:param sequence: the sequence of elements
:param k: the size of the sliding window
:return: the list with the maximums
>>> max_sliding_window([2, 7, 3, 1, 5, 2, 6, 2], 4)
[7, 7, 5, 6, 6]
"""
maximums = []
dq = deque()
for i in range(k): # the initial k elements in the window
while dq and sequence[dq[-1]] <= sequence[i]:
dq.pop() # pop smaller elements than the current from the end
dq.append(i) # add the current element to the deque
for i in range(k, len(sequence)):
maximums.append(sequence[dq[0]]) # element at 0th index is the maximum
while dq and dq[0] <= i - k: # i - k is the current window
dq.popleft() # pop maximums not in the window
while dq and sequence[dq[-1]] <= sequence[i]:
dq.pop() # pop smaller elements than the current element
dq.append(i) # add the current element
maximums.append(sequence[dq.popleft()]) # add the last maximum
return maximums
if __name__ == '__main__':
n = int(input())
input_sequence = [int(i) for i in input().split()]
assert len(input_sequence) == n
window_size = int(input())
print(*max_sliding_window(input_sequence, window_size))
```
#### File: jerrybelmonte/DataStructures-Python/phone_book.py
```python
class PhoneBook:
def __init__(self):
self.contacts = dict()
def add(self, number: int, name: str):
"""Adds a contact to the phone book with the given name and number.
If the number already exists, the contact name will be overwritten."""
self.contacts[number] = name
def erase(self, number: int):
"""Erases a person with the given number from the phone book if and
and only if the contact number is in the phone book."""
if number in self.contacts:
del self.contacts[number]
def find(self, number: int):
"""Looks for a person with the given phone number. Returns a string
with the contact name, or 'not found' if the phone number is not in
the phone book."""
if number in self.contacts:
return self.contacts.get(number)
return 'not found'
class Query:
def __init__(self, query):
self.type = query[0]
self.number = int(query[1])
self.name = None
if self.type == 'add':
self.name = query[2]
def read_queries():
n = int(input())
return [Query(input().split()) for _ in range(n)]
def write_responses(result):
print('\n'.join(result))
def process_queries(queries):
result = []
phone_book = PhoneBook()
for cur_query in queries:
if cur_query.type == 'add':
phone_book.add(cur_query.number, cur_query.name)
elif cur_query.type == 'del':
phone_book.erase(cur_query.number)
elif cur_query.type == 'find':
result.append(phone_book.find(cur_query.number))
return result
if __name__ == '__main__':
write_responses(process_queries(read_queries()))
```
|
{
"source": "JerryBian/mfe",
"score": 3
}
|
#### File: mfe/src/main.py
```python
import argparse
from package.sqlpackageparser import SqlPackageParser
from mysqlrepository import MySqlRepository
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-p', '--path', required=True)
args = parser.parse_args()
package = SqlPackageParser.parse(args.path)
for file in package.files:
with open(file, 'r') as f:
print(f'start execute sql at "{file}"')
MySqlRepository.execute(package, f.read())
print(f'execute sql at "{file}" successfully.')
if __name__ == "__main__":
print('mfe started.')
main()
print('mfe completed')
```
|
{
"source": "JerryBian/web-shutdown",
"score": 2
}
|
#### File: JerryBian/web-shutdown/app.py
```python
from datetime import datetime
from user import User
from flask import Flask, jsonify, request, render_template, url_for
from flask_login import LoginManager
from flask_login.utils import login_required, login_user, logout_user
from werkzeug.utils import redirect
from concurrent.futures import ThreadPoolExecutor
from sendgrid.helpers.mail import *
import subprocess
import traceback
import logging
import sys
import os
import time
import datetime
import sendgrid
import socket
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
executor = ThreadPoolExecutor(5)
app = Flask(__name__, static_url_path='/assets', static_folder='assets')
app.secret_key = os.getenv('ENV_SECRET_KEY', 'SECRET_KEY_DEFAULT')
login_manager = LoginManager()
login_manager.init_app(app)
user_name = os.getenv('ENV_USER_NAME', 'admin')
user_pwd = os.getenv('ENV_PASSWORD', '<PASSWORD>')
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect(url_for('login'))
@login_manager.user_loader
def load_user(name):
if user_name == name:
u = User(name)
u.is_authenticated = True
return u
return None
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/login', methods=['POST', 'GET'])
def login():
error = None
global user_name, user_pwd
if request.method == 'POST':
request_user_name = request.form.get('user_name')
request_user_pwd = request.form.get('user_pwd')
request_user_remember = request.form.get('user_remember')
if request_user_name == user_name:
u = User(user_name)
u.verify_pwd(request_user_pwd, user_pwd)
if u.is_authenticated:
login_user(u, remember=request_user_remember == 'on',
duration=datetime.timedelta(days=7))
return redirect(url_for('index'))
error = 'Invalid credentials'
return render_template('login.html', error=error)
@app.route('/')
@login_required
def index():
return render_template('index.html')
@app.route('/shutdown', methods=['POST'])
@login_required
def shutdown():
out = err = str()
try:
args = ['sudo', 'shutdown', '-h', 'now']
executor.submit(exeCmd, args)
out = f'<p class="text-center text-muted fs-6">{get_machine()}</p><p>has been shutdown successfully</p>'
except Exception:
err = traceback.format_exc()
logger.error(err)
logger.debug('about to send response')
return jsonify({'stdout': out, 'stderr': err})
@app.route('/reboot', methods=['POST'])
@login_required
def reboot():
out = err = str()
try:
args = ['sudo', 'shutdown', '-r', 'now']
executor.submit(exeCmd, args)
out = f'<p class="text-center text-muted fs-6">{get_machine()}</p><p>has been shutdown successfully</p>'
except Exception:
err = traceback.format_exc()
logger.error(err)
logger.debug('about to send response')
return jsonify({'stdout': out, 'stderr': err})
def exeCmd(args):
logger.info(f'begin exec {args}')
time.sleep(1)
sendMail(f'<code>{args}</code> request finished.')
command = subprocess.run(args, capture_output=True)
logger.info(f'end exec {args}')
return command
def get_machine():
return f'{socket.gethostname()} @{get_ip()}'
def sendMail(message):
apiKey = os.getenv('ENV_SENDGRID_API_KEY')
if apiKey is None:
logger.warn('ENV_SENDGRID_API_KEY is not set')
return
mailToAddr = os.getenv('ENV_MAIL_TO_ADDR')
if mailToAddr is None:
logger.warn('ENV_MAIL_TO_ADDR is not set')
return
mailToName = os.getenv('ENV_MAIL_TO_NAME', 'admin')
mailFromAddr = os.getenv('ENV_MAIL_FROM_ADDR', '<EMAIL>')
mailFromName = os.getenv('ENV_MAIL_FROM_NAME', 'robot')
html_content = f'<div><p>{message}</p></div><hr/><div style="text-align:center;margin-top:1.2rem;"><small>{get_machine()}</small></div>'
sg = sendgrid.SendGridAPIClient(api_key=apiKey)
from_email = Email(mailFromAddr, name=mailFromName)
to_email = To(mailToAddr, name=mailToName)
subject = "Notification from web-shutdown"
content = HtmlContent(html_content)
tracking_settings = TrackingSettings()
tracking_settings.click_tracking = ClickTracking(False, False)
tracking_settings.open_tracking = OpenTracking(False)
tracking_settings.subscription_tracking = SubscriptionTracking(False)
tracking_settings.ganalytics = Ganalytics(False)
mail = Mail(from_email=from_email, to_emails=to_email, subject=subject, html_content=content)
mail.tracking_settings = tracking_settings
response = sg.client.mail.send.post(request_body=mail.get())
if response.status_code == 202:
logger.info(f'send email to {mailToAddr} successfully: {message}')
else:
print(
f'send email failed. response code: {response.status_code}, message: {response.body}.')
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
if __name__ == '__main__':
sendMail('<code>web-shutdown</code> is ready to run.')
app.run(host=os.getenv('ENV_HOST', '127.0.0.1'),
port=os.getenv('ENV_PORT', '5000'))
```
|
{
"source": "Jerrybibo/FlaskPlayground",
"score": 3
}
|
#### File: flask-tutorial/flaskr/db.py
```python
import sqlite3
# click ("Command Line Interface Creation Kit") creates helper commands for initializing our database.
import click
from flask import current_app, g
from flask.cli import with_appcontext
def init_app(app):
"""
Registers the relevant functions with the specific Flask application instance.
:param app: Flask app object
"""
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
def get_db():
"""
Establishes connection to SQLite database.
:return: SQLite connection object
"""
# g is a special object unique for each request, which stores cross-function data. ("the application context")
# If a DB connection has already been made, it won't reconnect to the SQLite DB.
if 'db' not in g:
g.db = sqlite3.connect(
current_app.config['DATABASE'], # current_app is a special object that points to the Flask application.
detect_types=sqlite3.PARSE_DECLTYPES
)
# sqlite3.Row tells SQLite to return rows that behave like dicts. (column names = dict keys)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
"""
Closes the database connection if any exists.
:param e:
"""
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
"""
Initializes a SQLite database using schema.sql.
"""
db = get_db()
# Opens schema.sql as relative to the flaskr package, then executes the commands in the SQL file on SQLite
with current_app.open_resource('schema.sql') as schema_file:
db.executescript(schema_file.read().decode('utf8'))
@click.command('init-db')
@with_appcontext
def init_db_command():
"""
Initializes a SQLite database using schema.sql.
"""
init_db()
click.echo('Initialized the database.')
```
|
{
"source": "JerryBii/computer-vision-python",
"score": 3
}
|
#### File: modules/videoDisplay/videoDisplay.py
```python
import cv2
from matplotlib.font_manager import is_opentype_cff_font
import numpy as np
import multiprocessing as mp
import logging
import time
is_open = None
# method that displays webcam
def displayCamera():
cap = cv2.VideoCapture(0)
global is_open
is_open = False
if (cap.isOpened() == False):
print("Unable to read camera feed")
# Default resolutions of the frame are obtained.The default resolutions are system dependent
# We convert the resolutions from float to integer
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# Define the codec and create VideoWriter object
out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
timeout = 5 #5 seconds
timeout_start = time.time()
while(True):
ret, frame = cap.read()
is_open = True
if ret == True:
out.write(frame)
cv2.imshow('frame',frame)
if time.time() > timeout_start + timeout:
break
if cv2.waitKey(1) & 0xFF == ord('q'): # close the window when 'q' is clicked
break
# Break the loop
else:
break
if cv2.getWindowProperty('frame', cv2.WND_PROP_VISIBLE) < 1: # close the window when top right 'X' is clicked
break
is_open = False
cap.release()
out.release()
# Closes all the frames
cv2.destroyAllWindows()
def displayVideo(pause, exitRequest, frameIn): # this function needs to take in pipelineOut from decklinkSrcWorker and display to screen as window popup
logger = logging.getLogger()
logger.debug("videoDisplay: Started video display")
logger.debug("videoDisplay: Stopped video display")
displayCamera()
```
|
{
"source": "Jerry-BinWang/microservices-demo",
"score": 2
}
|
#### File: deploy/docker-swarm/deployfrontend.py
```python
import os
import sys
import subprocess
FRONTEND_STACK_PREFIX = "sockshop_frontend_"
BACKEND_STACK = "sockshop_backend"
def print_usage():
print("This script takes exactly one argument.")
print("Usage: {} HOSTNAME".format(sys.argv[0]))
sys.exit(1)
if __name__ == "__main__":
if len(sys.argv) != 2:
print_usage()
hostname = sys.argv[1]
print("Generating compose file for host {}".format(hostname))
temp_config_file = "{}.yml".format(hostname)
with open(temp_config_file, "w") as fout:
subprocess.run(["sed", "s/{{hostname}}/"+hostname+"/g", "frontend.yml"], check=True, stdout=fout)
print("Deploying frontend stack")
stack_name = FRONTEND_STACK_PREFIX + hostname
subprocess.run(["docker", "stack", "deploy", "-c", temp_config_file, stack_name], check=True)
print("Adding backend services to network")
process = subprocess.run(["docker", "stack", "services", BACKEND_STACK], check=True, stdout=subprocess.PIPE)
process = subprocess.run(["tail", "-n", "+2"], input=process.stdout, check=True, stdout=subprocess.PIPE)
process = subprocess.run(["awk", "{print $2}"], input=process.stdout, check=True, stdout=subprocess.PIPE)
for line in process.stdout.decode().split("\n"):
service = line.strip()
if service:
alias = service.replace(BACKEND_STACK+"_", "")
subprocess.run(
["docker", "service", "update", "--network-add",
"name={},alias={}".format(stack_name + "_default", alias), service], check=True)
print("Cleaning up")
os.remove(temp_config_file)
```
|
{
"source": "JerryBluesnow/JerryPythonLesson",
"score": 3
}
|
#### File: lesson-2/weChatAirCraft/AirCraft.py
```python
import random
import pygame
# 屏幕大小的常量
SCREEN_RECT = pygame.Rect(0, 0, 1920, 1080)
# 刷新的帧率
#FRAME_PER_SEC = 60
# 创建敌机的定时器常量
CREATE_ENEMY_EVENT = pygame.USEREVENT
# 英雄发射子弹事件
HERO_FIRE_EVENT = pygame.USEREVENT + 1
class GameSprite(pygame.sprite.Sprite):
"""飞机大战游戏精灵"""
def __init__(self, image_name, speed=1):
# 调用父类的初始化方法
super().__init__()
# 定义对象的属性
self.image = pygame.image.load(image_name)
self.rect = self.image.get_rect()
self.speed = speed
def update(self):
# 在屏幕的垂直方向上移动
self.rect.y += self.speed
class Background(GameSprite):
"""游戏背景精灵"""
def __init__(self, is_alt=False):
# 1. 调用父类方法实现精灵的创建(image/rect/speed)
super().__init__("./images/background.png")
# 2. 判断是否是交替图像,如果是,需要设置初始位置
if is_alt:
self.rect.y = -self.rect.height
def update(self):
# 1. 调用父类的方法实现
super().update()
# 2. 判断是否移出屏幕,如果移出屏幕,将图像设置到屏幕的上方
if self.rect.y >= SCREEN_RECT.height:
self.rect.y = -self.rect.height
class Enemy(GameSprite):
"""敌机精灵"""
def __init__(self):
# 1. 调用父类方法,创建敌机精灵,同时指定敌机图片
super().__init__("./images/enemy1.png")
# 2. 指定敌机的初始随机速度 1 ~ 3
self.speed = random.randint(1, 3)
# 3. 指定敌机的初始随机位置
self.rect.bottom = 0
max_x = SCREEN_RECT.width - self.rect.width
self.rect.x = random.randint(0, max_x)
def update(self):
# 1. 调用父类方法,保持垂直方向的飞行
super().update()
# 2. 判断是否飞出屏幕,如果是,需要从精灵组删除敌机
if self.rect.y >= SCREEN_RECT.height:
# print("飞出屏幕,需要从精灵组删除...")
# kill方法可以将精灵从所有精灵组中移出,精灵就会被自动销毁
self.kill()
def __del__(self):
# print("敌机挂了 %s" % self.rect)
pass
class Hero(GameSprite):
"""英雄精灵"""
def __init__(self):
# 1. 调用父类方法,设置image&speed
super().__init__("./images/me1.png", 0)
# 2. 设置英雄的初始位置
self.rect.centerx = SCREEN_RECT.centerx
self.rect.bottom = SCREEN_RECT.bottom - 120
# 3. 创建子弹的精灵组
self.bullets = pygame.sprite.Group()
def update(self):
# 英雄在水平方向移动
self.rect.x += self.speed
# 控制英雄不能离开屏幕
if self.rect.x < 0:
self.rect.x = 0
elif self.rect.right > SCREEN_RECT.right:
self.rect.right = SCREEN_RECT.right
def fire(self):
print("发射子弹...")
for i in (0, 1, 2):
# 1. 创建子弹精灵
bullet = Bullet()
# 2. 设置精灵的位置
bullet.rect.bottom = self.rect.y - i * 20
bullet.rect.centerx = self.rect.centerx
# 3. 将精灵添加到精灵组
self.bullets.add(bullet)
class Bullet(GameSprite):
"""子弹精灵"""
def __init__(self):
# 调用父类方法,设置子弹图片,设置初始速度
super().__init__("./images/bullet1.png", -2)
def update(self):
# 调用父类方法,让子弹沿垂直方向飞行
super().update()
# 判断子弹是否飞出屏幕
if self.rect.bottom < 0:
self.kill()
def __del__(self):
print("子弹被销毁...")
class PlaneGame(object):
"""飞机大战主游戏"""
def __init__(self):
print("游戏初始化")
pygame.init() # 初始化
pygame.mixer.init()
# 1. 创建游戏的窗口
self.screen = pygame.display.set_mode(SCREEN_RECT.size)
# 2. 创建游戏的时钟
self.clock = pygame.time.Clock()
# 3. 调用私有方法,精灵和精灵组的创建
self.__create_sprites()
# 4. 设置定时器事件 - 创建敌机 1s
pygame.time.set_timer(CREATE_ENEMY_EVENT, 1000)
pygame.time.set_timer(HERO_FIRE_EVENT, 500)
def __create_sprites(self):
# 创建背景精灵和精灵组
bg1 = Background()
bg2 = Background(True)
self.back_group = pygame.sprite.Group(bg1, bg2)
# 创建敌机的精灵组
self.enemy_group = pygame.sprite.Group()
# 创建英雄的精灵和精灵组
self.hero = Hero()
self.hero_group = pygame.sprite.Group(self.hero)
def start_game(self):
print("游戏开始...")
while True:
# 1. 设置刷新帧率
self.clock.tick(FRAME_PER_SEC)
# 2. 事件监听
self.__event_handler()
# 3. 碰撞检测
self.__check_collide()
# 4. 更新/绘制精灵组
self.__update_sprites()
# 5. 更新显示
pygame.display.update()
def __event_handler(self):
for event in pygame.event.get():
# 判断是否退出游戏
if event.type == pygame.QUIT:
PlaneGame.__game_over()
elif event.type == CREATE_ENEMY_EVENT:
# print("敌机出场...")
# 创建敌机精灵
enemy = Enemy()
# 将敌机精灵添加到敌机精灵组
self.enemy_group.add(enemy)
elif event.type == HERO_FIRE_EVENT:
self.hero.fire()
# elif event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT:
# print("向右移动...")
# 使用键盘提供的方法获取键盘按键 - 按键元组
keys_pressed = pygame.key.get_pressed()
# 判断元组中对应的按键索引值 1
if keys_pressed[pygame.K_RIGHT]:
self.hero.speed = 2
elif keys_pressed[pygame.K_LEFT]:
self.hero.speed = -2
else:
self.hero.speed = 0
def __check_collide(self):
# 1. 子弹摧毁敌机
pygame.sprite.groupcollide(self.hero.bullets, self.enemy_group, True, True)
# 2. 敌机撞毁英雄
enemies = pygame.sprite.spritecollide(self.hero, self.enemy_group, True)
# 判断列表时候有内容
if len(enemies) > 0:
# 让英雄牺牲
self.hero.kill()
# 结束游戏
PlaneGame.__game_over()
def __update_sprites(self):
self.back_group.update()
self.back_group.draw(self.screen)
self.enemy_group.update()
self.enemy_group.draw(self.screen)
self.hero_group.update()
self.hero_group.draw(self.screen)
self.hero.bullets.update()
self.hero.bullets.draw(self.screen)
@staticmethod
def __game_over():
print("游戏结束")
pygame.quit()
exit()
if __name__ == '__main__':
# 创建游戏对象
game = PlaneGame()
# 启动游戏
game.start_game()
```
|
{
"source": "jerrybox2/region-data",
"score": 2
}
|
#### File: apps/region_cn/models.py
```python
from __future__ import unicode_literals
from django.db import models
from django.utils.functional import cached_property
def populate_tree(node, grade):
"""
递归遍历树节点,遍历到层级为grade结束遍历
Example:
populate_tree(node(name="河北省"), grade="county") # 省-市-县
populate_tree(node(name="河北省"), grade="city") # 省-市
{
"data": {
"name": "河北省",
"code": "130000",
"children": [
{
"name": "石家庄市",
"code": "130100"
},
{
"name": "唐山市",
"code": "130200"
},
{
"name": "秦皇岛市",
"code": "130300"
},
{
"name": "邯郸市",
"code": "130400"
},
{
"name": "邢台市",
"code": "130500"
},
{
"name": "保定市",
"code": "130600"
},
{
"name": "张家口市",
"code": "130700"
},
{
"name": "承德市",
"code": "130800"
},
{
"name": "沧州市",
"code": "130900"
},
{
"name": "廊坊市",
"code": "131000"
},
{
"name": "衡水市",
"code": "131100"
}
]
}
}
:param node: 区域节点对象
:param grade: 最低的层级类型
:return:
"""
data = {"name": node.name,
"code": node.code}
if node.children.all().count() and not(node.grade == grade):
data["children"] = []
for child in node.children.all():
data["children"].append(populate_tree(child, grade=grade))
return data
class Region(models.Model):
"""
省市县三级区域数据:
0 整体是一个树形结构
1 每个区域都是树上的一个节点
2 外键关联节点间父子关系
"""
GRADE_CHOICES = (
("province", 'Province'),
("city", 'City'),
("county", "County"),
)
code = models.CharField(max_length=6)
name = models.CharField(max_length=100)
parent = models.ForeignKey(to='self',
related_name="children",
related_query_name="child",
null=True,
blank=True,
on_delete=models.SET_NULL)
grade = models.CharField(max_length=10, choices=GRADE_CHOICES, default="county")
@cached_property
def province(self):
if self.grade == self.GRADE_CHOICES[0][0]:
obj = self
elif self.grade == self.GRADE_CHOICES[1][0]:
obj = self.parent
elif self.grade == self.GRADE_CHOICES[2][0]:
obj = self.parent.parent
else:
raise Exception("Grade Error")
data = {"obj": obj, "name": obj.name, "code": obj.code}
return data
@cached_property
def city(self):
if self.grade == self.GRADE_CHOICES[0][0]:
data = []
for city in self.children.all():
obj = {"obj": city, "name": city.name, "code": city.code}
data.append(obj)
return data
elif self.grade == self.GRADE_CHOICES[1][0]:
obj = self
elif self.grade == self.GRADE_CHOICES[2][0]:
obj = self.parent
else:
raise Exception("Grade Error")
data = {"obj": obj, "name": obj.name, "code": obj.code}
return data
@cached_property
def county(self):
if self.grade == self.GRADE_CHOICES[0][0]:
raise Exception("Get city First")
elif self.grade == self.GRADE_CHOICES[1][0]:
data = []
for county in self.children.all():
obj = {"obj": county, "name": county.name, "code": county.code}
data.append(obj)
return data
elif self.grade == self.GRADE_CHOICES[2][0]:
obj = self
data = {"obj": obj, "name": obj.name, "code": obj.code}
return data
def __str__(self):
return self.name
```
|
{
"source": "jerryc05/python-progressbar",
"score": 2
}
|
#### File: python-progressbar/tests/test_multibar.py
```python
import pytest
import progressbar
def test_multi_progress_bar_out_of_range():
widgets = [
progressbar.MultiProgressBar('multivalues'),
]
bar = progressbar.ProgressBar(widgets=widgets, max_value=10)
with pytest.raises(ValueError):
bar.update(multivalues=[123])
with pytest.raises(ValueError):
bar.update(multivalues=[-1])
def test_multi_progress_bar_fill_left():
import examples
return examples.multi_progress_bar_example(False)
```
#### File: python-progressbar/tests/test_speed.py
```python
import pytest
import progressbar
@pytest.mark.parametrize('total_seconds_elapsed,value,expected', [
(1, 0, ' 0.0 s/B'),
(1, 0.01, '100.0 s/B'),
(1, 0.1, ' 0.1 B/s'),
(1, 1, ' 1.0 B/s'),
(1, 2 ** 10 - 1, '1023.0 B/s'),
(1, 2 ** 10 + 0, ' 1.0 KiB/s'),
(1, 2 ** 20, ' 1.0 MiB/s'),
(1, 2 ** 30, ' 1.0 GiB/s'),
(1, 2 ** 40, ' 1.0 TiB/s'),
(1, 2 ** 50, ' 1.0 PiB/s'),
(1, 2 ** 60, ' 1.0 EiB/s'),
(1, 2 ** 70, ' 1.0 ZiB/s'),
(1, 2 ** 80, ' 1.0 YiB/s'),
(1, 2 ** 90, '1024.0 YiB/s'),
])
def test_file_transfer_speed(total_seconds_elapsed, value, expected):
widget = progressbar.FileTransferSpeed()
assert widget(None, dict(
total_seconds_elapsed=total_seconds_elapsed,
value=value,
)) == expected
```
#### File: python-progressbar/tests/test_stream.py
```python
from __future__ import print_function
import io
import sys
import pytest
import progressbar
def test_nowrap():
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
stdout = sys.stdout
stderr = sys.stderr
progressbar.streams.wrap()
assert stdout == sys.stdout
assert stderr == sys.stderr
progressbar.streams.unwrap()
assert stdout == sys.stdout
assert stderr == sys.stderr
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
def test_wrap():
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
stdout = sys.stdout
stderr = sys.stderr
progressbar.streams.wrap(stderr=True, stdout=True)
assert stdout != sys.stdout
assert stderr != sys.stderr
# Wrap again
stdout = sys.stdout
stderr = sys.stderr
progressbar.streams.wrap(stderr=True, stdout=True)
assert stdout == sys.stdout
assert stderr == sys.stderr
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
def test_excepthook():
progressbar.streams.wrap(stderr=True, stdout=True)
try:
raise RuntimeError()
except RuntimeError:
progressbar.streams.excepthook(*sys.exc_info())
progressbar.streams.unwrap_excepthook()
progressbar.streams.unwrap_excepthook()
def test_fd_as_io_stream():
stream = io.StringIO()
with progressbar.ProgressBar(fd=stream) as pb:
for i in range(101):
pb.update(i)
stream.close()
def test_no_newlines():
kwargs = dict(
redirect_stderr=True,
redirect_stdout=True,
line_breaks=False,
is_terminal=True,
)
with progressbar.ProgressBar(**kwargs) as bar:
for i in range(5):
bar.update(i)
for i in range(5, 10):
try:
print('\n\n', file=progressbar.streams.stdout)
print('\n\n', file=progressbar.streams.stderr)
except ValueError:
pass
bar.update(i)
@pytest.mark.parametrize('stream', [sys.__stdout__, sys.__stderr__])
def test_fd_as_standard_streams(stream):
with progressbar.ProgressBar(fd=stream) as pb:
for i in range(101):
pb.update(i)
```
#### File: python-progressbar/tests/test_unicode.py
```python
import time
import pytest
import progressbar
from python_utils import converters
@pytest.mark.parametrize('name,markers', [
('line arrows', u'←↖↑↗→↘↓↙'),
('block arrows', u'◢◣◤◥'),
('wheels', u'◐◓◑◒'),
])
@pytest.mark.parametrize('as_unicode', [True, False])
def test_markers(name, markers, as_unicode):
if as_unicode:
markers = converters.to_unicode(markers)
else:
markers = converters.to_str(markers)
widgets = [
'%s: ' % name.capitalize(),
progressbar.AnimatedMarker(markers=markers),
]
bar = progressbar.ProgressBar(widgets=widgets)
bar._MINIMUM_UPDATE_INTERVAL = 1e-12
for i in bar((i for i in range(24))):
time.sleep(0.001)
```
#### File: python-progressbar/tests/test_utils.py
```python
import io
import pytest
import progressbar
@pytest.mark.parametrize('value,expected', [
(None, None),
('', None),
('1', True),
('y', True),
('t', True),
('yes', True),
('true', True),
('0', False),
('n', False),
('f', False),
('no', False),
('false', False),
])
def test_env_flag(value, expected, monkeypatch):
if value is not None:
monkeypatch.setenv('TEST_ENV', value)
assert progressbar.utils.env_flag('TEST_ENV') == expected
if value:
monkeypatch.setenv('TEST_ENV', value.upper())
assert progressbar.utils.env_flag('TEST_ENV') == expected
monkeypatch.undo()
def test_is_terminal(monkeypatch):
fd = io.StringIO()
monkeypatch.delenv('PROGRESSBAR_IS_TERMINAL', raising=False)
monkeypatch.delenv('JPY_PARENT_PID', raising=False)
assert progressbar.utils.is_terminal(fd) is False
assert progressbar.utils.is_terminal(fd, True) is True
assert progressbar.utils.is_terminal(fd, False) is False
monkeypatch.setenv('JPY_PARENT_PID', '123')
assert progressbar.utils.is_terminal(fd) is True
monkeypatch.delenv('JPY_PARENT_PID')
# Sanity check
assert progressbar.utils.is_terminal(fd) is False
monkeypatch.setenv('PROGRESSBAR_IS_TERMINAL', 'true')
assert progressbar.utils.is_terminal(fd) is True
monkeypatch.setenv('PROGRESSBAR_IS_TERMINAL', 'false')
assert progressbar.utils.is_terminal(fd) is False
monkeypatch.delenv('PROGRESSBAR_IS_TERMINAL')
# Sanity check
assert progressbar.utils.is_terminal(fd) is False
```
#### File: python-progressbar/tests/test_with.py
```python
import progressbar
def test_with():
with progressbar.ProgressBar(max_value=10) as p:
for i in range(10):
p.update(i)
def test_with_stdout_redirection():
with progressbar.ProgressBar(max_value=10, redirect_stdout=True) as p:
for i in range(10):
p.update(i)
def test_with_extra_start():
with progressbar.ProgressBar(max_value=10) as p:
p.start()
p.start()
```
|
{
"source": "jerryc05/uno",
"score": 3
}
|
#### File: uno/bot/util.py
```python
import re
import conf
def get_ui_line_nums(player_num, handcards_num):
base_line_num = conf.base_scale_of_view[player_num][0]
# -1 // 8 == -1 in python
extra_line_num = max((handcards_num - 1) // 8, 0)
return base_line_num + extra_line_num
def remove_escape(s):
return s.replace("\x1b[31m", "") \
.replace("\x1b[32m", "") \
.replace("\x1b[33m", "") \
.replace("\x1b[34m", "") \
.replace("\x1b[0m", "")
def is_special_card(card):
if card in ['W', '+4']:
return True
if card[1:] in ['S', 'R', '+2']:
return True
return False
def can_be_played_after(card_to_play, last_played_card, handcards_num):
_card_to_play = remove_escape(card_to_play)
_last_played_card = remove_escape(last_played_card)
_last_color = _last_played_card[0]
_last_text = _last_played_card[1:]
if handcards_num == 1 and is_special_card(_card_to_play):
return False
if _last_text == 'S':
return _card_to_play[1:] == 'S'
if _last_text == '+2':
return _card_to_play[1:] in ['+2', '+4']
if _last_text == '+4':
return _card_to_play[1:] == '+4'
if _card_to_play in ['W', '+4']:
return True
return _card_to_play[0] == _last_color or _card_to_play[1:] == _last_text
def update_handcards(handcards, line):
cursor_index = -1
_line = line.split()[1:-1]
for i in range(len(_line)):
if _line[i][0] == '>':
cursor_index = i
_line[i] = _line[i][1:]
break
return handcards + _line, cursor_index
def next_frame(game, player_num, debug=False):
cur_line_num = 0
is_updating_handcards = False
stat = 0
lines_left_num = -1
# return value
last_played_card = None
handcards = []
cursor_index = -1
_pos_of_last_played_card = conf.pos_of_last_played_card[player_num]
_pos_of_my_box = conf.pos_of_player_box[player_num][0]
while True:
line = game.stdout.readline().decode("UTF-8").replace("\n", "")
if debug:
print(cur_line_num, "\t", line)
else:
print(line)
line = remove_escape(line)
if cur_line_num == 0 and re.match(".*Want to play again", line):
stat = 4
return stat, last_played_card, handcards, cursor_index
if cur_line_num == _pos_of_last_played_card[0]:
# last played card
if len(line[_pos_of_last_played_card[1]:].split()) > 0:
last_played_card = line[_pos_of_last_played_card[1]:].split()[0].strip()
if cur_line_num == _pos_of_my_box[0] + 3:
# handcards
is_updating_handcards = True
if is_updating_handcards:
if re.match(".*\+--------", line):
is_updating_handcards = False
else:
handcards, _cursor_index = update_handcards(handcards, line)
if _cursor_index > -1:
cursor_index = _cursor_index + 8 * (cur_line_num - _pos_of_my_box[0] - 3)
if cur_line_num == get_ui_line_nums(player_num, len(handcards)) - 1:
if line[_pos_of_my_box[1]] != '[':
# it's not my turn
stat = 0
return stat, last_played_card, handcards, cursor_index
if cur_line_num == get_ui_line_nums(player_num, len(handcards)):
if re.match("Now it's your turn", line):
lines_left_num = 3
stat = 1
elif re.match("Press Enter to play the card just drawn immediately", line):
lines_left_num = 2
stat = 2
elif re.match("Specify the next color", line):
lines_left_num = 1
stat = 3
else:
assert False
lines_left_num -= 1
if lines_left_num == 0:
return stat, last_played_card, handcards, cursor_index
cur_line_num += 1
```
|
{
"source": "JerryCastanos/CryptoTracker",
"score": 2
}
|
#### File: CryptoTracker/app/tracker.py
```python
from elasticsearch import Elasticsearch, helpers
from public.bitfinex import BitFinex_Market
from public.bitmex import BitMex_Market
from public.bittrex import BitTrex_Market
from public.gdax import GDAX_Market
from public.gemini import Gemini_Market
from public.kraken import Kraken_Market
from public.okcoin import OKCoin_Market
from public.poloniex import Poloniex_Market
from dotenv import Dotenv
from time import sleep
import logging
import schedule
import settings
import utils
import random
import time
def main():
logging.basicConfig(format='%(levelname)s:%(asctime)s %(message)s',level=settings.LOGLEVEL)
es = Elasticsearch(settings.ELASTICSEARCH_CONNECT_STRING)
logging.info('Market Refresh Rate: ' + str(settings.MARKET_REFRESH_RATE) + ' seconds.')
logging.info('Initial Sleep: ' + str(settings.INITIAL_SLEEP) + ' seconds.')
sleep(settings.INITIAL_SLEEP)
logging.info('Application Started.')
#supported_exchanges = [BitFinex_Market(), BitMex_Market(), BitTrex_Market(), GDAX_Market(), Gemini_Market(), Kraken_Market(), OKCoin_Market(), Poloniex_Market()]
exchanges = [BitFinex_Market(), BitMex_Market(), BitTrex_Market(), GDAX_Market(), Gemini_Market(), Kraken_Market(), OKCoin_Market(), Poloniex_Market()]
#print active exchanges and create indexes in kibana based on products listed in each market
for exchange in exchanges:
logging.info(exchange.exchange + ': activated and indexed.')
for product, kibana_index in exchange.products.iteritems():
utils.create_index(es, kibana_index)
logging.warn('Initiating Market Tracking.')
#Record Ticks
while True:
sleep(settings.MARKET_REFRESH_RATE)
try:
for exchange in exchanges:
exchange.record_ticker(es)
except Exception as e:
logging.warning(e)
sleep(settings.RETRY_RATE)
if __name__ == '__main__':
main()
```
|
{
"source": "JerryCatLeung/Explaining-and-Harnessing-Adversarial-Examples",
"score": 3
}
|
#### File: MovieLens/source/rs4movie.py
```python
import datetime
import pickle
import random
import re
import time
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
def load_data():
"""
Load Data set from File
数据预处理
"""
# 读取User数据
users_title = ["UserID", "Gender", "Age", "JobID", "Zip-code"]
users = pd.read_csv(r"./users.dat", sep="::", header=None, names=users_title, engine="python")
users = users.filter(regex="UserID|Gender|Age|JobID")
users_orig = users.values
# 改变User数据中性别和年龄
gender_map = {"F": 0, "M": 1}
users["Gender"] = users["Gender"].map(gender_map)
age_map = {value: index for index, value in enumerate(set(users["Age"]))}
users["Age"] = users["Age"].map(age_map)
# 读取Movie数据集
movies_title = ["MovieID", "Title", "Genres"]
movies = pd.read_csv(r"./movies.dat", sep="::", header=None, names=movies_title, engine="python")
movies_orig = movies.values
# 将Title中的年份去掉
pattern = re.compile(r"^(.*)\((\d+)\)$")
title_map = {val: pattern.match(val).group(1).strip() for ii, val in enumerate(set(movies["Title"]))}
movies["Title"] = movies["Title"].map(title_map)
# 电影类型转数字字典
genres_set = set()
for val in movies["Genres"].str.split("|"):
genres_set.update(val)
genres_set.add("<PAD>")
genres2int = {val: ii for ii, val in enumerate(genres_set)}
# 将电影类型转成等长数字列表,长度是18
genres_map = {val: [genres2int.get(row) for row in val.split("|")] for ii, val in enumerate(set(movies["Genres"]))}
for key in genres_map:
for cnt in range(max(genres2int.values()) - len(genres_map[key])):
genres_map[key].insert(len(genres_map.get(key)) + cnt, genres2int.get("<PAD>"))
movies["Genres"] = movies["Genres"].map(genres_map)
# 电影Title转数字字典
title_set = set()
for val in movies["Title"].str.split():
title_set.update(val)
title_set.add("<PAD>")
title2int = {val: ii for ii, val in enumerate(title_set)}
# 将电影Title转成等长数字列表,长度是15
title_count = 15
title_map = {val: [title2int.get(row) for row in val.split()] for ii, val in enumerate(set(movies["Title"]))}
for key in title_map:
for cnt in range(title_count - len(title_map[key])):
title_map[key].insert(len(title_map[key]) + cnt, title2int.get("<PAD>"))
movies["Title"] = movies["Title"].map(title_map)
# 读取评分数据集
ratings_title = {"UserID", "MovieID", "ratings", "timestamps"}
ratings = pd.read_csv(r"./ratings.dat", sep='::', header=None, names=ratings_title, engine="python")
ratings = ratings.filter(regex="UserID|MovieID|ratings")
# 合并三个表
data = pd.merge(pd.merge(ratings, users), movies)
# 将数据分成X和y两张表
target_fields = ['ratings']
features_pd, targets_pd = data.drop(target_fields, axis=1), data[target_fields]
features = features_pd.values
targets_values = targets_pd.values
return title_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig, users_orig
"""加载数据并保存到本地"""
title_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig, users_orig = load_data()
pickle.dump((title_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig,
users_orig), open("preprocessors.p", "wb"))
"""从本地读取数据"""
title_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig, users_orig = pickle.load(
open('preprocessors.p', mode='rb'))
def save_params(params):
"""
Save parameters to file
"""
pickle.dump(params, open('params.p', 'wb'))
def load_params():
"""
Load parameters from file
"""
return pickle.load(open('params.p', mode='rb'))
# 嵌入矩阵的维度
embed_dim = 32
# 用户ID的个数
uid_max = max(features.take(0, 1)) + 1
# 性别个数
gender_max = max(features.take(2, 1)) + 1
# 年龄类别个数
age_max = max(features.take(3, 1)) + 1
# 职业个数
job_max = max(features.take(4, 1)) + 1
# 电影ID个数
movie_id_max = max(features.take(1, 1)) + 1
# 电影类型个数
movie_categories_max = max(genres2int.values()) + 1
# 电影名单词个数
movie_title_max = len(title_set)
# 对电影类型嵌入向量做加和操作的标志,使用mean做平均
combiner = "mean"
# 电影名长度
sentences_size = title_count
# 文本卷积滑动窗口,分别滑动2, 3, 4, 5个单词
window_sizes = {2, 3, 4, 5}
# 文本卷积核数量
filter_num = 8
# 电影ID转下标的字典,数据集中电影ID跟下标不一致,比如第5行的数据电影ID不一定是5
movieid2idx = {val[0]: i for i, val in enumerate(movies.values)}
"""神经网络超参数"""
# Number of Epochs
num_epochs = 6
# Batch Size
batch_size = 256
dropout_keep = 0.5
# Learning Rate
learning_rate = 0.0001
# Show stats for every n number of batches
show_every_n_batches = 20
save_dir = r"./save"
def get_inputs():
"""定义输入的占位符"""
uid = tf.placeholder(tf.int32, [None, 1], name="uid")
user_gender = tf.placeholder(tf.int32, [None, 1], name="user_gender")
user_age = tf.placeholder(tf.int32, [None, 1], name="user_age")
user_job = tf.placeholder(tf.int32, [None, 1], name="user_job")
movie_id = tf.placeholder(tf.int32, [None, 1], name="movie_id")
movie_categories = tf.placeholder(tf.int32, [None, 18], name="movie_categories")
movie_titles = tf.placeholder(tf.int32, [None, 15], name="movie_titles")
targets = tf.placeholder(tf.int32, [None, 1], name="targets")
LearningRate = tf.placeholder(tf.float32, name="LearningRate")
dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
return uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, LearningRate, dropout_keep_prob
"""#####构建神经网络#####"""
def get_user_embedding(uid, user_gender, user_age, user_job):
"""
定义User的Embedding矩阵
:param uid:
:param user_gender:
:param user_age:
:param user_job:
:return:
"""
with tf.name_scope("user_embedding"):
uid_embed_matrix = tf.Variable(tf.random_uniform([uid_max, embed_dim], -1, 1), name="uid_embed_matrix")
uid_embed_layer = tf.nn.embedding_lookup(uid_embed_matrix, uid, name="uid_embed_layer")
gender_embed_matrix = tf.Variable(tf.random_uniform([gender_max, embed_dim // 2], -1, 1),
name="gender_embed_matrix")
gender_embed_layer = tf.nn.embedding_lookup(gender_embed_matrix, user_gender, name="gender_embed_layer")
age_embed_matrix = tf.Variable(tf.random_uniform([age_max, embed_dim // 2], -1, 1), name="age_embed_matrix")
age_embed_layer = tf.nn.embedding_lookup(age_embed_matrix, user_age, name="age_embed_layer")
job_embed_matrix = tf.Variable(tf.random_uniform([job_max, embed_dim // 2], -1, 1), name="job_embed_matrix")
job_embed_layer = tf.nn.embedding_lookup(job_embed_matrix, user_job, name="job_embed_layer")
return uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer
def get_user_feature_layer(uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer):
"""
将User的嵌入矩阵一起全连接生成User的特征
:param uid_embed_layer:
:param gender_embed_layer:
:param age_embed_layer:
:param job_embed_layer:
:return:
"""
with tf.name_scope("user_fc"):
# 第一层全连接
uid_fc_layer = tf.layers.dense(uid_embed_layer, embed_dim, name="uid_fc_layer", activation=tf.nn.relu)
gender_fc_layer = tf.layers.dense(gender_embed_layer, embed_dim, name="gender_fc_layer", activation=tf.nn.relu)
age_fc_layer = tf.layers.dense(age_embed_layer, embed_dim, name="age_fc_layer", activation=tf.nn.relu)
job_fc_layer = tf.layers.dense(job_embed_layer, embed_dim, name="job_fc_layer", activation=tf.nn.relu)
# 第二层全连接
user_combine_layer = tf.concat([uid_fc_layer, gender_fc_layer, age_fc_layer, job_fc_layer], 2)
# user_combine_layer = tf.contrib.layers.fully_connected(user_combine_layer, 200, tf.tanh)
user_combine_layer = tf.layers.dense(user_combine_layer, 200, activation=tf.tanh)
user_combine_layer_flat = tf.layers.flatten(user_combine_layer)
return user_combine_layer, user_combine_layer_flat
def get_movie_id_embed_layer(movie_id):
"""
定义Movie ID的embedding矩阵
:param movie_id:
:return:
"""
with tf.name_scope("movie_embedding"):
movie_id_embed_matrix = tf.Variable(tf.random_uniform([movie_id_max, embed_dim], -1, 1),
name="movie_id_embed_matrix")
movie_id_embed_layer = tf.nn.embedding_lookup(movie_id_embed_matrix, movie_id, name="movie_id_embed_layer")
return movie_id_embed_layer
def get_movie_categories_layers(movie_categories):
with tf.name_scope("movie_categories_layers"):
movie_categories_embed_matrix = tf.Variable(tf.random_uniform([movie_categories_max, embed_dim], -1, 1),
name="movie_categories_embed_matrix")
movie_categories_embed_layer = tf.nn.embedding_lookup(movie_categories_embed_matrix, movie_categories,
name="movie_categories_embed_layer")
if combiner == "mean":
movie_categories_embed_layer = tf.reduce_mean(movie_categories_embed_layer, axis=1, keepdims=True)
return movie_categories_embed_layer
"""Movie Title的文本卷积网络实现"""
def get_movie_cnn_layer(movie_titles):
"""
从嵌入矩阵中得到电影名对应的各个单词的嵌入向量
:param movie_titles:
:return:
"""
with tf.name_scope("movie_embedding"):
movie_title_embed_matrix = tf.Variable(tf.random_uniform([movie_title_max, embed_dim], -1, 1),
name="movie_title_embed_matrix")
movie_title_embed_layer = tf.nn.embedding_lookup(movie_title_embed_matrix, movie_titles,
name="movie_title_embed_layer")
movie_title_embed_layer_expand = tf.expand_dims(movie_title_embed_layer, -1)
# 对文本嵌入层使用不同尺寸的卷积核做卷积和最大池化
pool_layer_lst = []
for window_size in window_sizes:
with tf.name_scope("movie_txt_conv_maxpool_{}".format(window_size)):
filter_weights = tf.Variable(tf.truncated_normal([window_size, embed_dim, 1, filter_num], stddev=0.1),
name="filter_weights")
filter_bias = tf.Variable(tf.constant(0.1, shape=[filter_num]), name="filter_bias")
conv_layer = tf.nn.conv2d(movie_title_embed_layer_expand, filter_weights, [1, 1, 1, 1], padding="VALID",
name="conv_layer")
relu_layer = tf.nn.relu(tf.nn.bias_add(conv_layer, filter_bias), name="relu_layer")
maxpool_layer = tf.nn.max_pool(relu_layer, [1, sentences_size - window_size + 1, 1, 1], [1, 1, 1, 1],
padding="VALID", name="maxpool_layer")
pool_layer_lst.append(maxpool_layer)
# Dropout层
with tf.name_scope("pool_dropout"):
pool_layer = tf.concat(pool_layer_lst, 3, name="pool_layer")
max_num = len(window_sizes) * filter_num
pool_layer_flat = tf.reshape(pool_layer, [-1, 1, max_num], name="pool_layer_flat")
dropout_layer = tf.nn.dropout(pool_layer_flat, dropout_keep_prob, name="dropout_layer")
return pool_layer_flat, dropout_layer
def get_movie_feature_layer(movie_id_embed_layer, movie_categories_embed_layer, dropout_layer):
"""
将Movie的各个层一起做全连接
:param movie_id_embed_layer:
:param movie_categories_embed_layer:
:param dropout_layer:
:return:
"""
with tf.name_scope("movie_fc"):
# 第一层全连接
movie_id_fc_layer = tf.layers.dense(movie_id_embed_layer, embed_dim, name="movie_id_fc_layer",
activation=tf.nn.relu)
movie_categories_fc_layer = tf.layers.dense(movie_categories_embed_layer, embed_dim,
name="movie_categories_fc_layer", activation=tf.nn.relu)
# 第二层全连接
movie_combine_layer = tf.concat([movie_id_fc_layer, movie_categories_fc_layer, dropout_layer], 2) # (?, 1, 96)
movie_combine_layer = tf.layers.dense(movie_combine_layer, 200, activation=tf.tanh)
# movie_combine_layer_flat = tf.reshape(movie_combine_layer, [-1, 200])
movie_combine_layer_flat = tf.layers.flatten(movie_combine_layer)
return movie_combine_layer, movie_combine_layer_flat
"""构建计算图"""
tf.reset_default_graph()
train_graph = tf.Graph()
with train_graph.as_default():
# 获取输入占位符
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob = get_inputs()
# 获取User的4个嵌入向量
uid_embed_layer, gender_embed_layer, age_embed_layer, job_embed_layer = get_user_embedding(uid, user_gender,
user_age, user_job)
# 得到用户特征
user_combine_layer, user_combine_layer_flat = get_user_feature_layer(uid_embed_layer, gender_embed_layer,
age_embed_layer, job_embed_layer)
# 获取电影ID的嵌入向量
movie_id_embed_layer = get_movie_id_embed_layer(movie_id)
# 获取电影类型的嵌入向量
movie_categories_embed_layer = get_movie_categories_layers(movie_categories)
# 获取电影名的特征向量
pool_layer_flat, dropout_layer = get_movie_cnn_layer(movie_titles)
# 得到电影特征
movie_combine_layer, movie_combine_layer_flat = get_movie_feature_layer(movie_id_embed_layer,
movie_categories_embed_layer,
dropout_layer)
# 计算出评分,要注意两个不同的方案,inference的名字(name值)是不一样的,后面做推荐时要根据name取得tensor
with tf.name_scope("inference"):
# 将用户特征和电影特征作为输入,经过全连接,输出一个值的方案
# inference_layer = tf.concat([user_combine_layer_flat, movie_combine_layer_flat], 1) #(?, 200)
# inference = tf.layers.dense(inference_layer, 1,
# kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
# kernel_regularizer=tf.nn.l2_loss, name="inference")
# 简单的将用户特征和电影特征做矩阵乘法得到一个预测评分
# inference = tf.matmul(user_combine_layer_flat, tf.transpose(movie_combine_layer_flat))
inference = tf.reduce_sum(user_combine_layer_flat * movie_combine_layer_flat, axis=1)
inference = tf.expand_dims(inference, axis=1)
with tf.name_scope("loss"):
# MSE损失,将计算值回归到评分
cost = tf.losses.mean_squared_error(targets, inference)
loss = tf.reduce_mean(cost)
# 优化损失
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(lr)
gradients = optimizer.compute_gradients(loss) # cost
train_op = optimizer.apply_gradients(gradients, global_step=global_step)
print(inference)
def get_batches(Xs, ys, batch_size):
"""
取得batch
:param Xs:
:param ys:
:param batch_size:
:return:
"""
for start in range(0, len(Xs), batch_size):
end = min(start + batch_size, len(Xs))
yield Xs[start:end], ys[start:end]
"""训练网络"""
losses = {'train': [], 'test': []}
with tf.Session(graph=train_graph) as sess:
timestamp = str(int(time.time()))
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for epoch_i in range(num_epochs):
# 将数据集分成训练集和测试集,随机种子不固定
train_X, test_X, train_y, test_y = train_test_split(features,
targets_values,
test_size=0.2,
random_state=0)
train_batches = get_batches(train_X, train_y, batch_size)
test_batches = get_batches(test_X, test_y, batch_size)
# 训练的迭代,保存训练损失
for batch_i in range(len(train_X) // batch_size):
x, y = next(train_batches)
categories = np.zeros([batch_size, 18])
for i in range(batch_size):
categories[i] = x.take(6, 1)[i]
titles = np.zeros([batch_size, sentences_size])
for i in range(batch_size):
titles[i] = x.take(5, 1)[i]
feed = {
uid: np.reshape(x.take(0, 1), [batch_size, 1]),
user_gender: np.reshape(x.take(2, 1), [batch_size, 1]),
user_age: np.reshape(x.take(3, 1), [batch_size, 1]),
user_job: np.reshape(x.take(4, 1), [batch_size, 1]),
movie_id: np.reshape(x.take(1, 1), [batch_size, 1]),
movie_categories: categories, # x.take(6,1)
movie_titles: titles, # x.take(5,1)
targets: np.reshape(y, [batch_size, 1]),
dropout_keep_prob: dropout_keep, # dropout_keep
lr: learning_rate}
step, train_loss, _ = sess.run([global_step, loss, train_op], feed) # cost
losses['train'].append(train_loss)
# Show every <show_every_n_batches> batches
if (epoch_i * (len(train_X) // batch_size) + batch_i) % show_every_n_batches == 0:
time_str = datetime.datetime.now().isoformat()
print('{}: Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
time_str,
epoch_i,
batch_i,
(len(train_X) // batch_size),
train_loss))
# 使用测试数据的迭代
for batch_i in range(len(test_X) // batch_size):
x, y = next(test_batches)
categories = np.zeros([batch_size, 18])
for i in range(batch_size):
categories[i] = x.take(6, 1)[i]
titles = np.zeros([batch_size, sentences_size])
for i in range(batch_size):
titles[i] = x.take(5, 1)[i]
feed = {
uid: np.reshape(x.take(0, 1), [batch_size, 1]),
user_gender: np.reshape(x.take(2, 1), [batch_size, 1]),
user_age: np.reshape(x.take(3, 1), [batch_size, 1]),
user_job: np.reshape(x.take(4, 1), [batch_size, 1]),
movie_id: np.reshape(x.take(1, 1), [batch_size, 1]),
movie_categories: categories, # x.take(6,1)
movie_titles: titles, # x.take(5,1)
targets: np.reshape(y, [batch_size, 1]),
dropout_keep_prob: 1,
lr: learning_rate}
step, test_loss = sess.run([global_step, loss], feed) # cost
# 保存测试损失
losses['test'].append(test_loss)
time_str = datetime.datetime.now().isoformat()
if (epoch_i * (len(test_X) // batch_size) + batch_i) % show_every_n_batches == 0:
print('{}: Epoch {:>3} Batch {:>4}/{} test_loss = {:.3f}'.format(
time_str,
epoch_i,
batch_i,
(len(test_X) // batch_size),
test_loss))
# Save Model
saver.save(sess, save_dir) # , global_step=epoch_i
print('Model Trained and Saved')
save_params(save_dir)
load_dir = load_params()
def get_tensors(loaded_graph):
"""
获取tensors,后面的推荐功能要用到
:param loaded_graph:
:return:
"""
uid = loaded_graph.get_tensor_by_name("uid:0")
user_gender = loaded_graph.get_tensor_by_name("user_gender:0")
user_age = loaded_graph.get_tensor_by_name("user_age:0")
user_job = loaded_graph.get_tensor_by_name("user_job:0")
movie_id = loaded_graph.get_tensor_by_name("movie_id:0")
movie_categories = loaded_graph.get_tensor_by_name("movie_categories:0")
movie_titles = loaded_graph.get_tensor_by_name("movie_titles:0")
targets = loaded_graph.get_tensor_by_name("targets:0")
dropout_keep_prob = loaded_graph.get_tensor_by_name("dropout_keep_prob:0")
lr = loaded_graph.get_tensor_by_name("LearningRate:0")
# 两种不同计算预测评分的方案使用不同的name获取tensor inference
# inference = loaded_graph.get_tensor_by_name("inference/inference/BiasAdd:0")
inference = loaded_graph.get_tensor_by_name("inference/ExpandDims:0")
movie_combine_layer_flat = loaded_graph.get_tensor_by_name("movie_fc/flatten/Reshape:0")
user_combine_layer_flat = loaded_graph.get_tensor_by_name("user_fc/flatten/Reshape:0")
return uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, inference, movie_combine_layer_flat, user_combine_layer_flat
def rating_movie(user_id_val, movie_id_val):
"""
指定用户和电影进行评分
:param user_id_val:
:param movie_id_val:
:return:
"""
loaded_graph = tf.Graph() #
with tf.Session(graph=loaded_graph) as sess: #
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, inference, _, __ = get_tensors(
loaded_graph) # loaded_graph
categories = np.zeros([1, 18])
categories[0] = movies.values[movieid2idx[movie_id_val]][2]
titles = np.zeros([1, sentences_size])
titles[0] = movies.values[movieid2idx[movie_id_val]][1]
feed = {
uid: np.reshape(users.values[user_id_val - 1][0], [1, 1]),
user_gender: np.reshape(users.values[user_id_val - 1][1], [1, 1]),
user_age: np.reshape(users.values[user_id_val - 1][2], [1, 1]),
user_job: np.reshape(users.values[user_id_val - 1][3], [1, 1]),
movie_id: np.reshape(movies.values[movieid2idx[movie_id_val]][0], [1, 1]),
movie_categories: categories, # x.take(6,1)
movie_titles: titles, # x.take(5,1)
dropout_keep_prob: 1}
# Get Prediction
inference_val = sess.run([inference], feed)
return inference_val
print(rating_movie(234, 1401))
"""生成Movie特征矩阵将训练好的电影特征组合成电影特征矩阵并保存到本地"""
loaded_graph = tf.Graph() #
movie_matrics = []
with tf.Session(graph=loaded_graph) as sess: #
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, _, movie_combine_layer_flat, __ = get_tensors(
loaded_graph) # loaded_graph
for item in movies.values:
categories = np.zeros([1, 18])
categories[0] = item.take(2)
titles = np.zeros([1, sentences_size])
titles[0] = item.take(1)
feed = {
movie_id: np.reshape(item.take(0), [1, 1]),
movie_categories: categories, # x.take(6,1)
movie_titles: titles, # x.take(5,1)
dropout_keep_prob: 1}
movie_combine_layer_flat_val = sess.run([movie_combine_layer_flat], feed)
movie_matrics.append(movie_combine_layer_flat_val)
pickle.dump((np.array(movie_matrics).reshape(-1, 200)), open('movie_matrics.p', 'wb'))
movie_matrics = pickle.load(open('movie_matrics.p', mode='rb'))
"""生成User特征矩阵将训练好的用户特征组合成用户特征矩阵并保存到本地"""
loaded_graph = tf.Graph() #
users_matrics = []
with tf.Session(graph=loaded_graph) as sess: #
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
uid, user_gender, user_age, user_job, movie_id, movie_categories, movie_titles, targets, lr, dropout_keep_prob, _, __, user_combine_layer_flat = get_tensors(
loaded_graph) # loaded_graph
for item in users.values:
feed = {
uid: np.reshape(item.take(0), [1, 1]),
user_gender: np.reshape(item.take(1), [1, 1]),
user_age: np.reshape(item.take(2), [1, 1]),
user_job: np.reshape(item.take(3), [1, 1]),
dropout_keep_prob: 1}
user_combine_layer_flat_val = sess.run([user_combine_layer_flat], feed)
users_matrics.append(user_combine_layer_flat_val)
pickle.dump((np.array(users_matrics).reshape(-1, 200)), open('users_matrics.p', 'wb'))
users_matrics = pickle.load(open('users_matrics.p', mode='rb'))
"""开始推荐电影使用生产的用户特征矩阵和电影特征矩阵做电影推荐"""
"""
1、推荐同类型的电影
思路是计算当前看的电影特征向量与整个电影特征矩阵的余弦相似度,取相似度最大的top_k个,这里加了些随机选择在里面,保证每次的推荐稍稍有些不同。
"""
def recommend_same_type_movie(movie_id_val, top_k=20):
loaded_graph = tf.Graph() #
with tf.Session(graph=loaded_graph) as sess: #
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
norm_movie_matrics = tf.sqrt(tf.reduce_sum(tf.square(movie_matrics), 1, keepdims=True))
normalized_movie_matrics = movie_matrics / norm_movie_matrics
# 推荐同类型的电影
probs_embeddings = (movie_matrics[movieid2idx[movie_id_val]]).reshape([1, 200])
probs_similarity = tf.matmul(probs_embeddings, tf.transpose(normalized_movie_matrics))
sim = (probs_similarity.eval())
# results = (-sim[0]).argsort()[0:top_k]
# print(results)
print("您看的电影是:{}".format(movies_orig[movieid2idx[movie_id_val]]))
print("以下是给您的推荐:")
p = np.squeeze(sim)
p[np.argsort(p)[:-top_k]] = 0
p = p / np.sum(p)
results = set()
while len(results) != 5:
c = np.random.choice(3883, 1, p=p)[0]
results.add(c)
for val in results:
print(val)
print(movies_orig[val])
return results
print(recommend_same_type_movie(1401, 20))
"""
2、推荐您喜欢的电影
思路是使用用户特征向量与电影特征矩阵计算所有电影的评分,取评分最高的top_k个,同样加了些随机选择部分。
"""
def recommend_your_favorite_movie(user_id_val, top_k=10):
loaded_graph = tf.Graph() #
with tf.Session(graph=loaded_graph) as sess: #
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# 推荐您喜欢的电影
probs_embeddings = (users_matrics[user_id_val - 1]).reshape([1, 200])
probs_similarity = tf.matmul(probs_embeddings, tf.transpose(movie_matrics))
sim = (probs_similarity.eval())
# print(sim.shape)
# results = (-sim[0]).argsort()[0:top_k]
# print(results)
# sim_norm = probs_norm_similarity.eval()
# print((-sim_norm[0]).argsort()[0:top_k])
print("以下是给您的推荐:")
p = np.squeeze(sim)
p[np.argsort(p)[:-top_k]] = 0
p = p / np.sum(p)
results = set()
while len(results) != 5:
c = np.random.choice(3883, 1, p=p)[0]
results.add(c)
for val in results:
print(val)
print(movies_orig[val])
return results
print(recommend_your_favorite_movie(234, 10))
"""
3、看过这个电影的人还看了(喜欢)哪些电影
- 首先选出喜欢某个电影的top_k个人,得到这几个人的用户特征向量。
- 然后计算这几个人对所有电影的评分
- 选择每个人评分最高的电影作为推荐
- 同样加入了随机选择
"""
def recommend_other_favorite_movie(movie_id_val, top_k=20):
loaded_graph = tf.Graph() #
with tf.Session(graph=loaded_graph) as sess: #
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
probs_movie_embeddings = (movie_matrics[movieid2idx[movie_id_val]]).reshape([1, 200])
probs_user_favorite_similarity = tf.matmul(probs_movie_embeddings, tf.transpose(users_matrics))
favorite_user_id = np.argsort(probs_user_favorite_similarity.eval())[0][-top_k:]
# print(normalized_users_matrics.eval().shape)
# print(probs_user_favorite_similarity.eval()[0][favorite_user_id])
# print(favorite_user_id.shape)
print("您看的电影是:{}".format(movies_orig[movieid2idx[movie_id_val]]))
print("喜欢看这个电影的人是:{}".format(users_orig[favorite_user_id - 1]))
probs_users_embeddings = (users_matrics[favorite_user_id - 1]).reshape([-1, 200])
probs_similarity = tf.matmul(probs_users_embeddings, tf.transpose(movie_matrics))
sim = (probs_similarity.eval())
# results = (-sim[0]).argsort()[0:top_k]
# print(results)
# print(sim.shape)
# print(np.argmax(sim, 1))
p = np.argmax(sim, 1)
print("喜欢看这个电影的人还喜欢看:")
results = set()
while len(results) != 5:
c = p[random.randrange(top_k)]
results.add(c)
for val in results:
print(val)
print(movies_orig[val])
return results
print(recommend_other_favorite_movie(1401, 20))
```
|
{
"source": "JerryCatLeung/leetcode",
"score": 4
}
|
#### File: leetcode/python/024_Swap_Nodes_in_Pairs.py
```python
class Solution(object):
# def swapPairs(self, head):
# current = last = last2 = head
# while current is not None:
# nex = current.next
# if current == last.next:
# last.next = nex
# current.next = last
# if last == head:
# head = current
# else:
# last2.next = current
# last2 = last
# last = nex
# current = nex
# return head
def swapPairs(self, head):
dummyHead = ListNode(-1)
dummyHead.next = head
prev, p = dummyHead, head
while p != None and p.next != None:
q, r = p.next, p.next.next
prev.next = q
q.next = p
p.next = r
prev = p
p = r
return dummyHead.next
```
#### File: leetcode/python/034_Search_for_a_Range.py
```python
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
length = len(nums)
if length == 0:
return [-1, -1]
min = 0
max = length - 1
while min <= max:
pos = (min + max) / 2
if nums[pos] > target:
max = pos - 1
elif nums[pos] < target:
min = pos + 1
else:
# when nums[pos] == target
# find the min and max
for i in range(min, max + 1):
if nums[i] == target:
if min < i and nums[min] != nums[i]:
min = i
max = i
return [min, max]
return [-1, -1]
```
#### File: leetcode/python/043_Multiply_Strings.py
```python
class Solution(object):
# def multiply(self, num1, num2):
# """
# :type num1: str
# :type num2: str
# :rtype: str
# """
# res = ''
# curr, pos = 0, 0
# if len(num1) > num2:
# num1, num2 = num2, num1
# ls1, ls2 = len(num1), len(num2)
# mid = []
# for i in reversed(range(ls1)):
# curr = 0
# fact = ''
# for j in reversed(range(ls2)):
# curr = curr + int(num1[i]) * int(num2[j])
# fact = str(curr % 10) + fact
# curr /= 10
# if curr > 0:
# fact = str(curr) + fact
# if int(fact) == 0:
# pass
# else:
# print fact
# mid.append(fact + '0' * pos)
# pos += 1
# res = self.add_strings(mid)
# return res
#
#
# def add_strings(self, s_list):
# if len(s_list) == 0:
# return '0'
# res = ''
# curr, pos = 0, 0
# max_ls = max([len(t) for t in s_list])
# while pos < max_ls:
# for s in s_list:
# if len(s) <= pos:
# continue
# curr += int(s[len(s) - pos - 1])
# res = str(curr % 10) + res
# curr /= 10
# pos += 1
# if curr > 0:
# res = str(curr) + res
# return res
def multiply(self, num1, num2):
if num1 == '0' or num2 == '0':
return '0'
res = ''
ls1, ls2, = len(num1), len(num2)
ls = ls1 + ls2
# list stores int
arr = [0] * ls
for i in reversed(range(ls1)):
for j in reversed(range(ls2)):
# store the direct results of multiply two ints
arr[i + j + 1] += int(num1[i]) * int(num2[j])
for i in reversed(range(1, ls)):
# digital plus
arr[i - 1] += arr[i] / 10
arr[i] %= 10
pos = 0
# to string
if arr[pos] == 0:
pos += 1
while pos < ls:
res = res + str(arr[pos])
pos += 1
return res
if __name__ == '__main__':
s = Solution()
print s.multiply("98", "9")
```
#### File: leetcode/python/046_Permutations.py
```python
class Solution:
# import itertools
# def permute(self, nums):
# """
# :type nums: List[int]
# :rtype: List[List[int]]
# """
# result = itertools.permutations(nums)
# result = [list(t) for t in result]
# return result
def permute(self, nums):
# DPS with swapping
res = []
if len(nums) == 0:
return res
self.get_permute(res, nums, 0)
return res
def get_permute(self, res, nums, index):
if index == len(nums):
res.append(list(nums))
return
for i in range(index, len(nums)):
nums[i], nums[index] = nums[index], nums[i]
# s(n) = 1 + s(n-1)
self.get_permute(res, nums, index + 1)
nums[i], nums[index] = nums[index], nums[i]
# def permute(self, nums):
# # iterative solution
# res = [[]]
# for i in range(len(nums)):
# cache = set()
# while len(res[0]) == i:
# curr = res.pop(0)
# for j in range(len(curr) + 1):
# # generate new n permutations from n -1 permutations
# new_perm = curr[:j] + [nums[i]] + curr[j:]
# stemp = ''.join(map(str, new_perm))
# if stemp not in cache:
# cache.add(stemp)
# res.append(new_perm)
# return res
```
#### File: leetcode/python/049_Group_Anagrams.py
```python
class Solution(object):
# def groupAnagrams(self, strs):
# """
# :type strs: List[str]
# :rtype: List[List[str]]
# """
# hash = {}
# for s in strs:
# key = self.hash_key(s)
# try:
# hash[key].append(s)
# except KeyError:
# hash[key] = [s]
# for k, v in hash.items():
# if len(v) > 1:
# # sort
# v.sort()
# return hash.values()
#
# def hash_key(self, s):
# # hash string with 26 length array
# table = [0] * 26
# for ch in s:
# index = ord(ch) - ord('a')
# table[index] += 1
# return str(table)
def groupAnagrams(self, strs):
strs.sort()
hash = {}
for s in strs:
key = self.hash_key(s)
try:
hash[key].append(s)
except KeyError:
hash[key] = [s]
return hash.values()
def hash_key(self, s):
# hash string with 26 length array
table = [0] * 26
for ch in s:
index = ord(ch) - ord('a')
table[index] += 1
return str(table)
```
#### File: leetcode/python/052_N-Queens II.py
```python
class Solution(object):
# def totalNQueens(self, n):
# """
# :type n: int
# :rtype: int
# """
# if n == 0:
# return 0
# res = [0]
# board = [['.'] * n for t in range(n)]
# self.do_solveNQueens(res, board, n)
# return res[0]
#
# def do_solveNQueens(self, res, board, num):
# if num == 0:
# res[0] += 1
# return
# ls = len(board)
# pos = ls - num
# check = [True] * ls
# for i in range(pos):
# for j in range(ls):
# if board[i][j] == 'Q':
# check[j] = False
# step = pos - i
# if j + step < ls:
# check[j + step] = False
# if j - step >= 0:
# check[j - step] = False
# break
# if pos == 0:
# # mirror on the first row
# for j in range(ls / 2):
# if check[j]:
# board[pos][j] = 'Q'
# self.do_solveNQueens(res, board, num - 1)
# board[pos][j] = '.'
# res[0] *= 2
# if ls % 2 != 0:
# if check[ls / 2]:
# board[pos][ls / 2] = 'Q'
# self.do_solveNQueens(res, board, num - 1)
# board[pos][ls / 2] = '.'
# else:
# for j in range(ls):
# if check[j]:
# board[pos][j] = 'Q'
# self.do_solveNQueens(res, board, num - 1)
# board[pos][j] = '.'
def __init__(self):
self.count = 0
def totalNQueens(self, n):
self.dfs(0, n, 0, 0, 0)
return self.count
def dfs(self, row, n, column, diag, antiDiag):
# https://leetcode.com/discuss/89951/share-my-java-code-beats-97-83%25-run-times
if row == n:
self.count += 1
return
for index in range(n):
# column check
isColSafe = (1 << index) & column == 0
# diagonal, all nodes have the same n - 1 + row - index
isDigSafe = (1 << (n - 1 + row - index)) & diag == 0
# anti diagonal, all nodes have the same row + index
isAntiDiagSafe = (1 << (row + index)) & antiDiag == 0
if isAntiDiagSafe and isColSafe and isDigSafe:
self.dfs(row + 1, n, (1 << index) | column,
(1 << (n - 1 + row - index)) | diag,
(1 << (row + index)) | antiDiag)
if __name__ == '__main__':
# begin
s = Solution()
print s.totalNQueens(4)
```
#### File: leetcode/python/067_Add_Binary.py
```python
class Solution(object):
# def addBinary(self, a, b):
# """
# :type a: str
# :type b: str
# :rtype: str
# """
# res = ''
# lsa, lsb = len(a), len(b)
# pos = -1
# plus = 0
# while (lsa + pos) >= 0 and (lsb + pos) >= 0:
# curr = int(a[pos]) + int(b[pos]) + plus
# if curr >= 2:
# plus = 1
# curr %= 2
# else:
# plus = 0
# res = str(curr) + res
# pos -= 1
# if lsa + pos >= 0:
# while (lsa + pos) >= 0:
# curr = int(a[pos]) + plus
# if curr >= 2:
# plus = 1
# curr %= 2
# else:
# plus = 0
# res = str(curr) + res
# pos -= 1
# if lsb + pos >= 0:
# while (lsb + pos) >= 0:
# curr = int(b[pos]) + plus
# if curr >= 2:
# plus = 1
# curr %= 2
# else:
# plus = 0
# res = str(curr) + res
# pos -= 1
# if plus == 1:
# res = '1' + res
# return res
def addBinary(self, a, b):
res = ''
lsa, lsb = len(a), len(b)
pos, plus, curr = -1, 0, 0
# plus a[pos], b[pos] and curr % 2
while (lsa + pos) >= 0 or (lsb + pos) >= 0:
if (lsa + pos) >= 0:
curr += int(a[pos])
if (lsb + pos) >= 0:
curr += int(b[pos])
res = str(curr % 2) + res
curr /= 2
pos -= 1
if curr == 1:
res = '1' + res
return res
```
#### File: leetcode/python/081_Search_in_Rotated_Sorted_Array_II.py
```python
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: bool
"""
def get(start, end):
if start > end:
return False
mid = (start + end) / 2
# handle duplicate
while mid < end and nums[mid + 1] == nums[mid]:
mid += 1
while start < mid and nums[start + 1] == nums[start]:
start += 1
if nums[mid] == target:
return True
elif mid == end:
return get(start, mid - 1)
elif start == mid:
return get(mid + 1, end)
elif nums[mid] >= nums[start]:
# First half is sorted
if target >= nums[start] and target < nums[mid]:
return get(start, mid - 1)
else:
return get(mid + 1, end)
elif nums[mid] <= nums[end]:
# Second half is sorted
if target > nums[mid] and target <= nums[end]:
return get(mid + 1, end)
else:
return get(start, mid - 1)
return get(0, len(nums) - 1)
```
|
{
"source": "JerryCCHuang/robotframework-historic",
"score": 3
}
|
#### File: robotframework-historic/robotframework_historic/rfhistoricsetup.py
```python
import mysql.connector
import logging
def rfhistoric_setup(opts):
# connect to database
print("INFO: Connecting to dB")
mydb = connect_to_mysql(opts.host, opts.username, opts.password)
# create new user
obj = mydb.cursor()
print("INFO: Creating superuser with local access")
try:
obj.execute("CREATE USER IF NOT EXISTS 'superuser'@'localhost' IDENTIFIED BY '<PASSWORD>';")
obj.execute("GRANT ALL PRIVILEGES ON *.* TO 'superuser'@'localhost' WITH GRANT OPTION;")
except Exception as e:
print(str(e))
print("INFO: Creating superuser with remote access")
try:
obj.execute("CREATE USER 'superuser'@'%' IDENTIFIED BY '<PASSWORD>';")
obj.execute("GRANT ALL PRIVILEGES ON *.* TO 'superuser'@'%' WITH GRANT OPTION;")
except Exception as e:
print(str(e))
print("INFO: Reloading grant table")
try:
obj.execute("FLUSH PRIVILEGES;")
except Exception as e:
print(str(e))
print("INFO: Creating robothistoric dB")
try:
obj.execute("CREATE DATABASE IF NOT EXISTS robothistoric;")
except Exception as e:
print(str(e))
print("INFO: Creating TB_PROJECT table")
rfdb = connect_to_mysql_db(opts.host, opts.username, opts.password, "<PASSWORD>")
try:
rfobj = rfdb.cursor()
rfobj.execute("CREATE TABLE IF NOT EXISTS TB_PROJECT ( Project_Id INT NOT NULL auto_increment primary key, Project_Name TEXT, Project_Desc TEXT, Project_Image TEXT, Created_Date DATETIME, Last_Updated DATETIME, Total_Executions INT, Recent_Pass_Perc FLOAT, Overall_Pass_Perc FLOAT);")
except Exception as e:
print(str(e))
commit_and_close_db(mydb)
def connect_to_mysql(host, user, pwd):
try:
mydb = mysql.connector.connect(
host=host,
user=user,
passwd=<PASSWORD>
)
return mydb
except Exception as e:
print(e)
def connect_to_mysql_db(host, user, pwd, db):
try:
mydb = mysql.connector.connect(
host=host,
user=user,
passwd=<PASSWORD>,
database=db
)
return mydb
except Exception as e:
print(e)
def use_db(cursor, db_name):
cursor.execute("USE %s;" % db_name)
def commit_and_close_db(db):
db.commit()
db.close()
```
|
{
"source": "jerrycearley/MatchZoo",
"score": 3
}
|
#### File: contrib/layers/spatial_gru.py
```python
import typing
import tensorflow as tf
from keras import backend as K
#from keras.engine import Layer
from keras.layers import Layer # Changed from previous line to fix tensorflow toolchain
from keras.layers import Permute
from keras.layers import Reshape
from keras import activations
from keras import initializers
class SpatialGRU(Layer):
"""
Spatial GRU layer.
:param units: Number of SpatialGRU units.
:param activation: Activation function to use. Default:
hyperbolic tangent (`tanh`). If you pass `None`, no
activation is applied (ie. "linear" activation: `a(x) = x`).
:param recurrent_activation: Activation function to use for
the recurrent step. Default: sigmoid (`sigmoid`).
If you pass `None`, no activation is applied (ie. "linear"
activation: `a(x) = x`).
:param kernel_initializer: Initializer for the `kernel` weights
matrix, used for the linear transformation of the inputs.
:param recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the
recurrent state.
:param direction: Scanning direction. `lt` (i.e., left top)
indicates the scanning from left top to right bottom, and
`rb` (i.e., right bottom) indicates the scanning from
right bottom to left top.
:param kwargs: Standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.SpatialGRU(units=10,
... direction='lt')
>>> num_batch, channel, left_len, right_len = 5, 5, 3, 2
>>> layer.build([num_batch, channel, left_len, right_len])
"""
def __init__(
self,
units: int = 10,
activation: str = 'tanh',
recurrent_activation: str = 'sigmoid',
kernel_initializer: str = 'glorot_uniform',
recurrent_initializer: str = 'orthogonal',
direction: str = 'lt',
**kwargs
):
""":class:`SpatialGRU` constructor."""
super().__init__(**kwargs)
self._units = units
self._activation = activations.get(activation)
self._recurrent_activation = activations.get(recurrent_activation)
self._kernel_initializer = initializers.get(kernel_initializer)
self._recurrent_initializer = initializers.get(recurrent_initializer)
self._direction = direction
def build(self, input_shape: typing.Any):
"""
Build the layer.
:param input_shape: the shapes of the input tensors.
"""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# L = `input_left` sequence length
# R = `input_right` sequence length
# C = number of channels
# U = number of units
# input_shape = [B, C, L, R]
self._batch_size = input_shape[0]
self._channel = input_shape[1]
self._input_dim = self._channel + 3 * self._units
self._text1_maxlen = input_shape[2]
self._text2_maxlen = input_shape[3]
self._recurrent_step = self._text1_maxlen * self._text2_maxlen
# W = [3*U+C, 7*U]
self._W = self.add_weight(
name='W',
shape=(self._input_dim, self._units * 7),
initializer=self._kernel_initializer,
trainable=True
)
# U = [3*U, U]
self._U = self.add_weight(
name='U',
shape=(self._units * 3, self._units),
initializer=self._recurrent_initializer,
trainable=True
)
# bias = [8*U,]
self._bias = self.add_weight(
name='bias',
shape=(self._units * 8,),
initializer='zeros',
trainable=True
)
# w_rl, w_rt, w_rd = [B, 3*U]
self._wr = self._W[:, :self._units * 3]
# b_rl, b_rt, b_rd = [B, 3*U]
self._br = self._bias[:self._units * 3]
# w_zi, w_zl, w_zt, w_zd = [B, 4*U]
self._wz = self._W[:, self._units * 3: self._units * 7]
# b_zi, b_zl, b_zt, b_zd = [B, 4*U]
self._bz = self._bias[self._units * 3: self._units * 7]
# w_ij = [C, U]
self._w_ij = self.add_weight(
name='W_ij',
shape=(self._channel, self._units),
initializer=self._recurrent_initializer,
trainable=True
)
# b_ij = [7*U]
self._b_ij = self._bias[self._units * 7:]
super(SpatialGRU, self).build(input_shape)
def softmax_by_row(self, z: typing.Any) -> tuple:
"""Conduct softmax on each dimension across the four gates."""
# z_transform: [B, U, 4]
z_transform = Permute((2, 1))(Reshape((4, self._units))(z))
size = [-1, 1, -1]
# Perform softmax on each slice
for i in range(0, self._units):
begin = [0, i, 0]
# z_slice: [B, 1, 4]
z_slice = tf.slice(z_transform, begin, size)
if i == 0:
z_s = tf.nn.softmax(z_slice)
else:
z_s = tf.concat([z_s, tf.nn.softmax(z_slice)], 1)
# zi, zl, zt, zd: [B, U]
zi, zl, zt, zd = tf.unstack(z_s, axis=2)
return zi, zl, zt, zd
def calculate_recurrent_unit(
self,
inputs: typing.Any,
states: typing.Any,
step: int,
h: typing.Any,
) -> tuple:
"""
Calculate recurrent unit.
:param inputs: A TensorArray which contains interaction
between left text and right text.
:param states: A TensorArray which stores the hidden state
of every step.
:param step: Recurrent step.
:param h: Hidden state from last operation.
"""
# Get index i, j
i = tf.math.floordiv(step, tf.constant(self._text2_maxlen))
j = tf.math.mod(step, tf.constant(self._text2_maxlen))
# Get hidden state h_diag, h_top, h_left
# h_diag, h_top, h_left = [B, U]
h_diag = states.read(i * (self._text2_maxlen + 1) + j)
h_top = states.read(i * (self._text2_maxlen + 1) + j + 1)
h_left = states.read((i + 1) * (self._text2_maxlen + 1) + j)
# Get interaction between word i, j: s_ij
# s_ij = [B, C]
s_ij = inputs.read(step)
# Concatenate h_top, h_left, h_diag, s_ij
# q = [B, 3*U+C]
q = tf.concat([tf.concat([h_top, h_left], 1),
tf.concat([h_diag, s_ij], 1)], 1)
# Calculate reset gate
# r = [B, 3*U]
r = self._recurrent_activation(
self._time_distributed_dense(self._wr, q, self._br))
# Calculate updating gate
# z: [B, 4*U]
z = self._time_distributed_dense(self._wz, q, self._bz)
# Perform softmax
# zi, zl, zt, zd: [B, U]
zi, zl, zt, zd = self.softmax_by_row(z)
# Get h_ij_
# h_ij_ = [B, U]
h_ij_l = self._time_distributed_dense(self._w_ij, s_ij, self._b_ij)
h_ij_r = K.dot(r * (tf.concat([h_left, h_top, h_diag], 1)), self._U)
h_ij_ = self._activation(h_ij_l + h_ij_r)
# Calculate h_ij
# h_ij = [B, U]
h_ij = zl * h_left + zt * h_top + zd * h_diag + zi * h_ij_
# Write h_ij to states
states = states.write(((i + 1) * (self._text2_maxlen + 1) + j + 1),
h_ij)
h_ij.set_shape(h_top.get_shape())
return inputs, states, step + 1, h_ij
def call(self, inputs: list, **kwargs) -> typing.Any:
"""
The computation logic of SpatialGRU.
:param inputs: input tensors.
"""
batch_size = tf.shape(inputs)[0]
# h0 = [B, U]
self._bounder_state_h0 = tf.zeros([batch_size, self._units])
# input_x = [L, R, B, C]
input_x = tf.transpose(inputs, [2, 3, 0, 1])
if self._direction == 'rb':
# input_x: [R, L, B, C]
input_x = tf.reverse(input_x, [0, 1])
elif self._direction != 'lt':
raise ValueError(f"Invalid direction. "
f"`{self._direction}` received. "
f"Must be in `lt`, `rb`.")
# input_x = [L*R*B, C]
input_x = tf.reshape(input_x, [-1, self._channel])
# input_x = L*R * [B, C]
input_x = tf.split(
axis=0,
num_or_size_splits=self._text1_maxlen * self._text2_maxlen,
value=input_x
)
# inputs = L*R * [B, C]
inputs = tf.TensorArray(
dtype=tf.float32,
size=self._text1_maxlen * self._text2_maxlen,
name='inputs'
)
inputs = inputs.unstack(input_x)
# states = (L+1)*(R+1) * [B, U]
states = tf.TensorArray(
dtype=tf.float32,
size=(self._text1_maxlen + 1) * (self._text2_maxlen + 1),
name='states',
clear_after_read=False
)
# Initialize states
for i in range(self._text2_maxlen + 1):
states = states.write(i, self._bounder_state_h0)
for i in range(1, self._text1_maxlen + 1):
states = states.write(i * (self._text2_maxlen + 1),
self._bounder_state_h0)
# Calculate h_ij
# h_ij = [B, U]
_, _, _, h_ij = tf.while_loop(
cond=lambda _0, _1, i, _3: tf.less(i, self._recurrent_step),
body=self.calculate_recurrent_unit,
loop_vars=(
inputs,
states,
tf.constant(0, dtype=tf.int32),
self._bounder_state_h0
),
parallel_iterations=1,
swap_memory=True
)
return h_ij
def compute_output_shape(self, input_shape: typing.Any) -> tuple:
"""
Calculate the layer output shape.
:param input_shape: the shapes of the input tensors.
"""
output_shape = [input_shape[0], self._units]
return tuple(output_shape)
@classmethod
def _time_distributed_dense(cls, w, x, b):
x = K.dot(x, w)
x = K.bias_add(x, b)
return x
```
|
{
"source": "jerrycgc/vnpy",
"score": 2
}
|
#### File: examples/aorder/dash_plot.py
```python
from __future__ import division
import dash
import dash_core_components as dcc
import dash_html_components as html
"""
展示如何执行策略回测。
"""
from vnpy.trader.app.ctaStrategy.ctaBacktesting import BacktestingEngine, MINUTE_DB_NAME
import pandas as pd
from utils import plot_candles, plot_trade
import talib
import numpy as np
def generate_graph(df):
INCREASING_COLOR = '#17BECF'
DECREASING_COLOR = '#7F7F7F'
data = [dict(
type='candlestick',
open=df.Open,
high=df.High,
low=df.Low,
close=df.Close,
x=df.index,
yaxis='y2',
name='GS',
increasing=dict(line=dict(color=INCREASING_COLOR)),
decreasing=dict(line=dict(color=DECREASING_COLOR)),
)]
layout = dict()
fig = dict(data=data, layout=layout)
fig['layout'] = dict()
fig['layout']['plot_bgcolor'] = 'rgb(250, 250, 250)'
fig['layout']['xaxis'] = dict(rangeselector=dict(visible=True))
fig['layout']['yaxis'] = dict(domain=[0, 0.2], showticklabels=False)
fig['layout']['yaxis2'] = dict(domain=[0.2, 0.8])
fig['layout']['legend'] = dict(orientation='h', y=0.9, x=0.3, yanchor='bottom')
fig['layout']['margin'] = dict(t=40, b=40, r=40, l=40)
rangeselector = dict(
visibe=True,
x=0, y=0.9,
bgcolor='rgba(150, 200, 250, 0.4)',
font=dict(size=13),
buttons=list([
dict(count=1,
label='reset',
step='all'),
dict(count=1,
label='1yr',
step='year',
stepmode='backward'),
dict(count=3,
label='3 mo',
step='month',
stepmode='backward'),
dict(count=1,
label='1 mo',
step='month',
stepmode='backward'),
dict(step='all')
]))
fig['layout']['xaxis']['rangeselector'] = rangeselector
def movingaverage(interval, window_size=10):
window = np.ones(int(window_size)) / float(window_size)
return np.convolve(interval, window, 'same')
mv_y = movingaverage(df.Close)
mv_x = list(df.index)
# Clip the ends
mv_x = mv_x[5:-5]
mv_y = mv_y[5:-5]
fig['data'].append(dict(x=mv_x, y=mv_y, type='scatter', mode='lines',
line=dict(width=1),
marker=dict(color='#E377C2'),
yaxis='y2', name='Moving Average'))
colors = []
for i in range(len(df.Close)):
if i != 0:
if df.Close[i] > df.Close[i - 1]:
colors.append(INCREASING_COLOR)
else:
colors.append(DECREASING_COLOR)
else:
colors.append(DECREASING_COLOR)
fig['data'].append(dict(x=df.index, y=df.Volume,
marker=dict(color=colors),
type='bar', yaxis='y', name='Volume'))
def bbands(price, window_size=10, num_of_std=5):
rolling_mean = price.rolling(window=window_size).mean()
rolling_std = price.rolling(window=window_size).std()
upper_band = rolling_mean + (rolling_std * num_of_std)
lower_band = rolling_mean - (rolling_std * num_of_std)
return rolling_mean, upper_band, lower_band
bb_avg, bb_upper, bb_lower = bbands(df.Close)
fig['data'].append(dict(x=df.index, y=bb_upper, type='scatter', yaxis='y2',
line=dict(width=1),
marker=dict(color='#ccc'), hoverinfo='none',
legendgroup='Bollinger Bands', name='Bollinger Bands'))
fig['data'].append(dict(x=df.index, y=bb_lower, type='scatter', yaxis='y2',
line=dict(width=1),
marker=dict(color='#ccc'), hoverinfo='none',
legendgroup='Bollinger Bands', showlegend=False))
return fig
if __name__ == '__main__':
from vnpy.trader.app.ctaStrategy.strategy.strategyAtrRsi import AtrRsiStrategy
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20170601')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3 / 10000) # 万0.3
engine.setSize(300) # 股指合约大小
engine.setPriceTick(0.2) # 股指最小价格变动
# 设置使用的历史数据库
engine.setDatabase(MINUTE_DB_NAME, 'rb0000')
# 在引擎中创建策略对象
d = {'rsiLength': 4, 'atrLength': 25}
engine.initStrategy(AtrRsiStrategy, d)
engine.loadHistoryData()
# 开始跑回测
engine.loadHistoryData()
engine.runBacktesting()
# 显示回测结果
engine.showBacktestingResult()
# analysis
engine.loadHistoryData()
orders = pd.DataFrame([i.__dict__ for i in engine.calculateBacktestingResult()['resultList']])
df = pd.DataFrame(list(engine.dbCursor)).rename(
columns={'open': 'Open', 'high': 'High', 'low': 'Low', 'close': 'Close', 'volume': 'Volume'}).set_index('datetime')
# df.index = map(lambda x: x.strftime("%Y%m%d %H:%M:%S"), df.datetime)
app = dash.Dash()
app.layout = html.Div(children=[
html.H1(children='Hello Dash'),
html.Div(children='''
Dash: A web application framework for Python.
'''),
dcc.Graph(
id='example-graph',
figure=generate_graph(df[:1000])
)
])
app.run_server(debug=True)
```
|
{
"source": "jerry-chang3300/sonic-mgmt",
"score": 3
}
|
#### File: platform/mellanox/test_check_sysfs.py
```python
import logging
from check_sysfs import check_sysfs
def test_check_hw_mgmt_sysfs(duthost):
"""This test case is to check the symbolic links under /var/run/hw-management
"""
check_sysfs(duthost)
def test_hw_mgmt_sysfs_mapped_to_pmon(duthost):
"""This test case is to verify that the /var/run/hw-management folder is mapped to pmon container
"""
logging.info("Verify that the /var/run/hw-management folder is mapped to the pmon container")
files_under_dut = set(duthost.command("find /var/run/hw-management")["stdout_lines"])
files_under_pmon = set(duthost.command("docker exec pmon find /var/run/hw-management")["stdout_lines"])
assert files_under_dut == files_under_pmon, "Folder /var/run/hw-management is not mapped to pmon"
```
#### File: tests/snmp/test_snmp_queue.py
```python
import pytest
from ansible_host import AnsibleHost
def test_snmp_queues(ansible_adhoc, duthost, creds, collect_techsupport):
lhost = AnsibleHost(ansible_adhoc, 'localhost', True)
hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']
snmp_facts = lhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts']
for k, v in snmp_facts['snmp_interfaces'].items():
if "Ethernet" in v['description']:
if not v.has_key('queues'):
pytest.fail("port %s does not have queue counters" % v['name'])
```
|
{
"source": "jerrychen110/python-aiml",
"score": 3
}
|
#### File: CN/xiaomayou/getweather.py
```python
import urllib
import sys
import json
import sys
reload(sys)
sys.setdefaultencoding('utf8')
ENCODING = 'utf-8'
# 武汉天气
def queryLocation(term):
# term = term.encode(ENCODING) if type(term) == unicode else term
# print(term)
url = "http://toy1.weather.com.cn/search?cityname=" + term
resp = urllib.urlopen(url)
result = resp.read()[1:-1]
data = json.loads(result)
if not data:
print(u"找不到地点".encode(ENCODING))
for d in data:
ref = d["ref"]
ref_code = ref.split("~")
code = ref_code[0]
break
return code
def queryRealTimeWeatherInfo(code):
#url = "http://m.weather.com.cn/data/%s.html" % code
url = "http://www.weather.com.cn/data/sk/%s.html" % code
resp = urllib.urlopen(url)
data = json.load(resp)
if not data:
print(u"天气预报还没出来".encode(ENCODING))
return data['weatherinfo']
def showRealTimeWeatherInfo(info):
template = u"{city} {time} 天气实况: 气温{temp}℃, {WD}{WS}, 湿度{SD}"
print template.format(**info).encode(ENCODING)
def main():
assert len(sys.argv) >= 3
function = sys.argv[1]
term = ''.join(sys.argv[2:])
if function == 'realtime':
# 实时
showRealTimeWeatherInfo(queryRealTimeWeatherInfo(queryLocation(term)))
if __name__ == '__main__':
main()
```
|
{
"source": "jerrychenhf/cloudtik",
"score": 2
}
|
#### File: core/_private/call_context.py
```python
import copy
from cloudtik.core._private.cli_logger import cli_logger
class CallContext:
def __init__(self, _cli_logger=cli_logger) -> None:
"""The call context object for controlling the context shared data.
"""
self._redirect_output = False # Whether to log command output to a temporary file
self._allow_interactive = True # whether to pass on stdin to running commands.
self._config = {"use_login_shells": True, "silent_rsync": True}
self._cli_logger = _cli_logger
def new_call_context(self):
new_context = CallContext(_cli_logger=self._cli_logger.new_logger())
new_context._redirect_output = self._redirect_output
new_context._allow_interactive = self._allow_interactive
new_context._config = copy.deepcopy(self._config)
return new_context
@property
def cli_logger(self):
return self._cli_logger
def is_output_redirected(self):
return self._redirect_output
def set_output_redirected(self, val: bool):
"""Choose between logging to a temporary file and to `sys.stdout`.
The default is to log to a file.
Args:
val (bool): If true, subprocess output will be redirected to
a temporary file.
"""
self._redirect_output = val
def does_allow_interactive(self):
return self._allow_interactive
def set_allow_interactive(self, val: bool):
"""Choose whether to pass on stdin to running commands.
The default is to pipe stdin and close it immediately.
Args:
val (bool): If true, stdin will be passed to command.
"""
self._allow_interactive = val
def is_rsync_silent(self):
return self._config["silent_rsync"]
def set_rsync_silent(self, val):
"""Choose whether to silence rsync output.
Most commands will want to list rsync'd files themselves rather than
print the default rsync spew.
"""
self._config["silent_rsync"] = val
def is_using_login_shells(self):
return self._config["use_login_shells"]
def set_using_login_shells(self, val):
"""Choose between login and non-interactive shells.
Non-interactive shells have the benefit of receiving less output from
subcommands (since progress bars and TTY control codes are not printed).
Sometimes this can be significant since e.g. `pip install` prints
hundreds of progress bar lines when downloading.
Login shells have the benefit of working very close to how a proper bash
session does, regarding how scripts execute and how the environment is
set up. This is also how all commands were run in the past. The only reason
to use login shells over non-interactive shells is if you need some weird
and non-robust tool to work.
Args:
val (bool): If true, login shells will be used to run all commands.
"""
self._config["use_login_shells"] = val
```
#### File: core/_private/services.py
```python
import base64
import collections
import errno
import io
import json
import logging
import os
import random
import signal
import socket
import subprocess
import sys
import time
from typing import List
import uuid
from shlex import quote
import redis
import cloudtik
# Import psutil and colorama after cloudtik so the packaged version is used.
import psutil
import cloudtik.core._private.constants as constants
import cloudtik.core._private.utils as utils
from cloudtik.core import tags
from cloudtik.core._private.state.control_state import ControlState
resource = None
if sys.platform != "win32":
import resource
EXE_SUFFIX = ".exe" if sys.platform == "win32" else ""
# Location of the redis server.
CLOUDTIK_HOME = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "../..")
CLOUDTIK_PATH = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
CLOUDTIK_CORE_PRIVATE_SERVICE = "core/_private/service"
CLOUDTIK_REDIS_EXECUTABLE = os.path.join(
CLOUDTIK_PATH, "core/thirdparty/redis/redis-server" + EXE_SUFFIX)
CLOUDTIK_JEMALLOC_LIB_PATH_ENV = "CLOUDTIK_JEMALLOC_LIB_PATH"
CLOUDTIK_JEMALLOC_CONF_ENV = "CLOUDTIK_JEMALLOC_CONF"
CLOUDTIK_JEMALLOC_PROFILE_ENV = "CLOUDTIK_JEMALLOC_PROFILE"
# Logger for this module. It should be configured at the entry point
# into the program. We provide a default configuration at
# entry points.
logger = logging.getLogger(__name__)
ProcessInfo = collections.namedtuple("ProcessInfo", [
"process",
"stdout_file",
"stderr_file",
"use_valgrind",
"use_gdb",
"use_valgrind_profiler",
"use_perftools_profiler",
"use_tmux",
])
def serialize_config(config):
return base64.b64encode(json.dumps(config).encode("utf-8")).decode("utf-8")
def propagate_jemalloc_env_var(*, jemalloc_path: str, jemalloc_conf: str,
jemalloc_comps: List[str], process_type: str):
"""Read the jemalloc memory profiling related
env var and return the dictionary that translates
them to proper jemalloc related env vars.
For example, if users specify `CLOUDTIK_JEMALLOC_LIB_PATH`,
it is translated into `LD_PRELOAD` which is needed to
run Jemalloc as a shared library.
Params:
jemalloc_path (str): The path to the jemalloc shared library.
jemalloc_conf (str): `,` separated string of jemalloc config.
jemalloc_comps List(str): The list of components
that we will profile.
process_type (str): The process type that needs jemalloc
env var for memory profiling. If it doesn't match one of
jemalloc_comps, the function will return an empty dict.
Returns:
dictionary of {env_var: value}
that are needed to jemalloc profiling. The caller can
call `dict.update(return_value_of_this_func)` to
update the dict of env vars. If the process_type doesn't
match jemalloc_comps, it will return an empty dict.
"""
assert isinstance(jemalloc_comps, list)
assert process_type is not None
process_type = process_type.lower()
if (not jemalloc_path or process_type not in jemalloc_comps):
return {}
env_vars = {
"LD_PRELOAD": jemalloc_path,
}
if jemalloc_conf:
env_vars.update({"MALLOC_CONF": jemalloc_conf})
return env_vars
class ConsolePopen(subprocess.Popen):
if sys.platform == "win32":
def terminate(self):
if isinstance(self.stdin, io.IOBase):
self.stdin.close()
if self._use_signals:
self.send_signal(signal.CTRL_BREAK_EVENT)
else:
super(ConsolePopen, self).terminate()
def __init__(self, *args, **kwargs):
# CREATE_NEW_PROCESS_GROUP is used to send Ctrl+C on Windows:
# https://docs.python.org/3/library/subprocess.html#subprocess.Popen.send_signal
new_pgroup = subprocess.CREATE_NEW_PROCESS_GROUP
flags_to_add = 0
if utils.detect_fate_sharing_support():
# If we don't have kernel-mode fate-sharing, then don't do this
# because our children need to be in out process group for
# the process reaper to properly terminate them.
flags_to_add = new_pgroup
flags_key = "creationflags"
if flags_to_add:
kwargs[flags_key] = (kwargs.get(flags_key) or 0) | flags_to_add
self._use_signals = (kwargs[flags_key] & new_pgroup)
super(ConsolePopen, self).__init__(*args, **kwargs)
def address(ip_address, port):
return ip_address + ":" + str(port)
def new_port(lower_bound=10000, upper_bound=65535, denylist=None):
if not denylist:
denylist = set()
port = random.randint(lower_bound, upper_bound)
retry = 0
while port in denylist:
if retry > 100:
break
port = random.randint(lower_bound, upper_bound)
retry += 1
if retry > 100:
raise ValueError("Failed to find a new port from the range "
f"{lower_bound}-{upper_bound}. Denylist: {denylist}")
return port
def find_redis_address(address=None):
"""
Attempts to find all valid redis addresses on this node.
Returns:
Set of detected Redis instances.
"""
# Currently, this extracts the deprecated --redis-address from the command
# that launched the service running on this node, if any.
pids = psutil.pids()
redis_addresses = set()
for pid in pids:
try:
proc = psutil.Process(pid)
# HACK: Workaround for UNIX idiosyncrasy
# Normally, cmdline() is supposed to return the argument list.
# But it in some cases (such as when setproctitle is called),
# an arbitrary string resembling a command-line is stored in
# the first argument.
# Explanation: https://unix.stackexchange.com/a/432681
# More info: https://github.com/giampaolo/psutil/issues/1179
cmdline = proc.cmdline()
# NOTE: To support Windows, we can't use
# `os.path.basename(cmdline[0]) == "abc"` here.
# TODO (haifeng): use the right way to detect the redis
if utils.find_name_in_command(cmdline, "cloudtik_cluster_controller"):
for arglist in cmdline:
# Given we're merely seeking --redis-address, we just split
# every argument on spaces for now.
for arg in arglist.split(" "):
# TODO(ekl): Find a robust solution for locating Redis.
if arg.startswith("--redis-address="):
proc_addr = arg.split("=")[1]
if address is not None and address != proc_addr:
continue
redis_addresses.add(proc_addr)
except psutil.AccessDenied:
pass
except psutil.NoSuchProcess:
pass
return redis_addresses
def get_address_to_use_or_die():
"""
Attempts to find an address for an existing cluster if it is not
already specified as an environment variable.
Returns:
A string to redis address
"""
return os.environ.get(constants.CLOUDTIK_ADDRESS_ENV,
find_redis_address_or_die())
def find_redis_address_or_die():
redis_addresses = find_redis_address()
if len(redis_addresses) > 1:
raise ConnectionError(
f"Found multiple active Redis instances: {redis_addresses}. "
"Please specify the one to connect to by setting `address`.")
sys.exit(1)
elif not redis_addresses:
raise ConnectionError(
"Could not find any running Redis instance. "
"Please specify the one to connect to by setting `address`.")
return redis_addresses.pop()
def wait_for_node(redis_address,
node_ip_address,
redis_password=None,
timeout=30):
"""Wait until this node has appeared in the client table.
Args:
redis_address (str): The redis address.
node_ip_address (str): the node ip address to wait for
redis_password (str): the redis password.
timeout: The amount of time in seconds to wait before raising an
exception.
Raises:
TimeoutError: An exception is raised if the timeout expires before
the node appears in the client table.
"""
redis_ip_address, redis_port = redis_address.split(":")
wait_for_redis_to_start(redis_ip_address, redis_port, redis_password)
# TODO (haifeng): implement control state for node services
global_state = ControlState()
global_state.initialize_control_state(redis_address, redis_port, redis_password)
start_time = time.time()
while time.time() - start_time < timeout:
# FIXME It depends on the implementation of global_state_accessor to pass. Skip it temporarily.
# clients = global_state.node_table()
# node_ip_addresses = [
# client["node_ip_address"] for client in clients
# ]
node_ip_addresses = [node_ip_address]
if node_ip_address in node_ip_addresses:
return
else:
time.sleep(0.1)
raise TimeoutError("Timed out while waiting for node to startup.")
def validate_redis_address(address):
"""Validates address parameter.
Returns:
redis_address: string containing the full <host:port> address.
redis_ip: string representing the host portion of the address.
redis_port: integer representing the port portion of the address.
"""
if address == "auto":
address = find_redis_address_or_die()
redis_address = address_to_ip(address)
redis_address_parts = redis_address.split(":")
if len(redis_address_parts) != 2:
raise ValueError(f"Malformed address. Expected '<host>:<port>',"
f" but got {redis_address} from {address}.")
redis_ip = redis_address_parts[0]
try:
redis_port = int(redis_address_parts[1])
except ValueError:
raise ValueError("Malformed address port. Must be an integer.")
if redis_port < 1024 or redis_port > 65535:
raise ValueError("Invalid address port. Must "
"be between 1024 and 65535.")
return redis_address, redis_ip, redis_port
def address_to_ip(address):
"""Convert a hostname to a numerical IP addresses in an address.
This should be a no-op if address already contains an actual numerical IP
address.
Args:
address: This can be either a string containing a hostname (or an IP
address) and a port or it can be just an IP address.
Returns:
The same address but with the hostname replaced by a numerical IP
address.
"""
address_parts = address.split(":")
ip_address = socket.gethostbyname(address_parts[0])
# Make sure localhost isn't resolved to the loopback ip
if ip_address == "127.0.0.1":
ip_address = get_node_ip_address()
return ":".join([ip_address] + address_parts[1:])
def node_ip_address_from_perspective(address):
"""IP address by which the local node can be reached *from* the `address`.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address by which the local node can be reached from the address.
"""
ip_address, port = address.split(":")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# This command will raise an exception if there is no internet
# connection.
s.connect((ip_address, int(port)))
node_ip_address = s.getsockname()[0]
except OSError as e:
node_ip_address = "127.0.0.1"
# [Errno 101] Network is unreachable
if e.errno == errno.ENETUNREACH:
try:
# try get node ip address from host name
host_name = socket.getfqdn(socket.gethostname())
node_ip_address = socket.gethostbyname(host_name)
except Exception:
pass
finally:
s.close()
return node_ip_address
def get_node_ip_address(address="8.8.8.8:53"):
if sys.platform == "darwin" or sys.platform == "win32":
# Due to the mac osx/windows firewall,
# we use loopback ip as the ip address
# to prevent security popups.
return "127.0.0.1"
return node_ip_address_from_perspective(address)
def create_redis_client(redis_address, password=None):
"""Create a Redis client.
Args:
The IP address, port, and password of the Redis server.
Returns:
A Redis client.
"""
if not hasattr(create_redis_client, "instances"):
create_redis_client.instances = {}
else:
cli = create_redis_client.instances.get(redis_address)
if cli is not None:
try:
cli.ping()
return cli
except Exception:
create_redis_client.instances.pop(redis_address)
_, redis_ip_address, redis_port = validate_redis_address(redis_address)
# For this command to work, some other client (on the same machine
# as Redis) must have run "CONFIG SET protected-mode no".
create_redis_client.instances[redis_address] = redis.StrictRedis(
host=redis_ip_address, port=int(redis_port), password=password)
return create_redis_client.instances[redis_address]
def start_cloudtik_process(command,
process_type,
fate_share,
env_updates=None,
cwd=None,
use_valgrind=False,
use_gdb=False,
use_valgrind_profiler=False,
use_perftools_profiler=False,
use_tmux=False,
stdout_file=None,
stderr_file=None,
pipe_stdin=False):
"""Start one of the service processes.
TODO(rkn): We need to figure out how these commands interact. For example,
it may only make sense to start a process in gdb if we also start it in
tmux. Similarly, certain combinations probably don't make sense, like
simultaneously running the process in valgrind and the profiler.
Args:
command (List[str]): The command to use to start the process.
process_type (str): The type of the process that is being started
fate_share: If true, the child will be killed if its parent (us) dies.
True must only be passed after detection of this functionality.
env_updates (dict): A dictionary of additional environment variables to
run the command with (in addition to the caller's environment
variables).
cwd (str): The directory to run the process in.
use_valgrind (bool): True if we should start the process in valgrind.
use_gdb (bool): True if we should start the process in gdb.
use_valgrind_profiler (bool): True if we should start the process in
the valgrind profiler.
use_perftools_profiler (bool): True if we should profile the process
using perftools.
use_tmux (bool): True if we should start the process in tmux.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
pipe_stdin: If true, subprocess.PIPE will be passed to the process as
stdin.
Returns:
Information about the process that was started including a handle to
the process that was started.
"""
# Detect which flags are set through environment variables.
valgrind_env_var = f"CLOUDTIK_{process_type.upper()}_VALGRIND"
if os.environ.get(valgrind_env_var) == "1":
logger.info("Detected environment variable '%s'.", valgrind_env_var)
use_valgrind = True
valgrind_profiler_env_var = f"CLOUDTIK_{process_type.upper()}_VALGRIND_PROFILER"
if os.environ.get(valgrind_profiler_env_var) == "1":
logger.info("Detected environment variable '%s'.",
valgrind_profiler_env_var)
use_valgrind_profiler = True
perftools_profiler_env_var = (f"CLOUDTIK_{process_type.upper()}"
"_PERFTOOLS_PROFILER")
if os.environ.get(perftools_profiler_env_var) == "1":
logger.info("Detected environment variable '%s'.",
perftools_profiler_env_var)
use_perftools_profiler = True
tmux_env_var = f"CLOUDTIK_{process_type.upper()}_TMUX"
if os.environ.get(tmux_env_var) == "1":
logger.info("Detected environment variable '%s'.", tmux_env_var)
use_tmux = True
gdb_env_var = f"CLOUDTIK_{process_type.upper()}_GDB"
if os.environ.get(gdb_env_var) == "1":
logger.info("Detected environment variable '%s'.", gdb_env_var)
use_gdb = True
# Jemalloc memory profiling.
jemalloc_lib_path = os.environ.get(CLOUDTIK_JEMALLOC_LIB_PATH_ENV)
jemalloc_conf = os.environ.get(CLOUDTIK_JEMALLOC_CONF_ENV)
jemalloc_comps = os.environ.get(CLOUDTIK_JEMALLOC_PROFILE_ENV)
jemalloc_comps = [] if not jemalloc_comps else jemalloc_comps.split(",")
jemalloc_env_vars = propagate_jemalloc_env_var(
jemalloc_path=jemalloc_lib_path,
jemalloc_conf=jemalloc_conf,
jemalloc_comps=jemalloc_comps,
process_type=process_type)
use_jemalloc_mem_profiler = len(jemalloc_env_vars) > 0
if sum([
use_gdb,
use_valgrind,
use_valgrind_profiler,
use_perftools_profiler,
use_jemalloc_mem_profiler,
]) > 1:
raise ValueError("At most one of the 'use_gdb', 'use_valgrind', "
"'use_valgrind_profiler', 'use_perftools_profiler', "
"and 'use_jemalloc_mem_profiler' flags can "
"be used at a time.")
if env_updates is None:
env_updates = {}
if not isinstance(env_updates, dict):
raise ValueError("The 'env_updates' argument must be a dictionary.")
modified_env = os.environ.copy()
modified_env.update(env_updates)
if use_gdb:
if not use_tmux:
raise ValueError(
"If 'use_gdb' is true, then 'use_tmux' must be true as well.")
# TODO(suquark): Any better temp file creation here?
gdb_init_path = os.path.join(utils.get_cloudtik_temp_dir(),
f"gdb_init_{process_type}_{time.time()}")
process_path = command[0]
process_args = command[1:]
run_args = " ".join(["'{}'".format(arg) for arg in process_args])
with open(gdb_init_path, "w") as gdb_init_file:
gdb_init_file.write(f"run {run_args}")
command = ["gdb", process_path, "-x", gdb_init_path]
if use_valgrind:
command = [
"valgrind",
"--track-origins=yes",
"--leak-check=full",
"--show-leak-kinds=all",
"--leak-check-heuristics=stdstring",
"--error-exitcode=1",
] + command
if use_valgrind_profiler:
command = ["valgrind", "--tool=callgrind"] + command
if use_perftools_profiler:
modified_env["LD_PRELOAD"] = os.environ["PERFTOOLS_PATH"]
modified_env["CPUPROFILE"] = os.environ["PERFTOOLS_LOGFILE"]
if use_jemalloc_mem_profiler:
logger.info(f"Jemalloc profiling will be used for {process_type}. "
f"env vars: {jemalloc_env_vars}")
modified_env.update(jemalloc_env_vars)
if use_tmux:
# The command has to be created exactly as below to ensure that it
# works on all versions of tmux. (Tested with tmux 1.8-5, travis'
# version, and tmux 2.1)
command = ["tmux", "new-session", "-d", f"{' '.join(command)}"]
if fate_share:
assert utils.detect_fate_sharing_support(), (
"kernel-level fate-sharing must only be specified if "
"detect_fate_sharing_support() has returned True")
def preexec_fn():
import signal
signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT})
if fate_share and sys.platform.startswith("linux"):
utils.set_kill_on_parent_death_linux()
win32_fate_sharing = fate_share and sys.platform == "win32"
# With Windows fate-sharing, we need special care:
# The process must be added to the job before it is allowed to execute.
# Otherwise, there's a race condition: the process might spawn children
# before the process itself is assigned to the job.
# After that point, its children will not be added to the job anymore.
CREATE_SUSPENDED = 0x00000004 # from Windows headers
if sys.platform == "win32":
# CreateProcess, which underlies Popen, is limited to
# 32,767 characters, including the Unicode terminating null
# character
total_chrs = sum([len(x) for x in command])
if total_chrs > 31766:
raise ValueError(
f"command is limited to a total of 31767 characters, "
f"got {total_chrs}")
process = ConsolePopen(
command,
env=modified_env,
cwd=cwd,
stdout=stdout_file,
stderr=stderr_file,
stdin=subprocess.PIPE if pipe_stdin else None,
preexec_fn=preexec_fn if sys.platform != "win32" else None,
creationflags=CREATE_SUSPENDED if win32_fate_sharing else 0)
if win32_fate_sharing:
try:
utils.set_kill_child_on_death_win32(process)
psutil.Process(process.pid).resume()
except (psutil.Error, OSError):
process.kill()
raise
def _get_stream_name(stream):
if stream is not None:
try:
return stream.name
except AttributeError:
return str(stream)
return None
return ProcessInfo(
process=process,
stdout_file=_get_stream_name(stdout_file),
stderr_file=_get_stream_name(stderr_file),
use_valgrind=use_valgrind,
use_gdb=use_gdb,
use_valgrind_profiler=use_valgrind_profiler,
use_perftools_profiler=use_perftools_profiler,
use_tmux=use_tmux)
def wait_for_redis_to_start(redis_ip_address, redis_port, password=None):
"""Wait for a Redis server to be available.
This is accomplished by creating a Redis client and sending a random
command to the server until the command gets through.
Args:
redis_ip_address (str): The IP address of the redis server.
redis_port (int): The port of the redis server.
password (str): The password of the <PASSWORD>.
Raises:
Exception: An exception is raised if we could not connect with Redis.
"""
redis_client = redis.StrictRedis(
host=redis_ip_address, port=redis_port, password=password)
# Wait for the Redis server to start.
num_retries = constants.CLOUDTIK_START_REDIS_WAIT_RETRIES
delay = 0.001
for i in range(num_retries):
try:
# Run some random command and see if it worked.
logger.debug(
"Waiting for redis server at {}:{} to respond...".format(
redis_ip_address, redis_port))
redis_client.client_list()
# If the Redis service is delayed getting set up for any reason, we may
# get a redis.ConnectionError: Error 111 connecting to host:port.
# Connection refused.
# Unfortunately, redis.ConnectionError is also the base class of
# redis.AuthenticationError. We *don't* want to obscure a
# redis.AuthenticationError, because that indicates the user provided a
# bad password. Thus a double except clause to ensure a
# redis.AuthenticationError isn't trapped here.
except redis.AuthenticationError as authEx:
raise RuntimeError("Unable to connect to Redis at {}:{}.".format(
redis_ip_address, redis_port)) from authEx
except redis.ConnectionError as connEx:
if i >= num_retries - 1:
raise RuntimeError(
f"Unable to connect to Redis at {redis_ip_address}:"
f"{redis_port} after {num_retries} retries. Check that "
f"{redis_ip_address}:{redis_port} is reachable from this "
"machine. If it is not, your firewall may be blocking "
"this port. If the problem is a flaky connection, try "
"setting the environment variable "
"`CLOUDTIK_START_REDIS_WAIT_RETRIES` to increase the number of"
" attempts to ping the Redis server.") from connEx
# Wait a little bit.
time.sleep(delay)
delay *= 2
else:
break
else:
raise RuntimeError(
f"Unable to connect to Redis (after {num_retries} retries). "
"If the Redis instance is on a different machine, check that "
"your firewall and relevant ports are configured properly. "
"You can also set the environment variable "
"`CLOUDTIK_START_REDIS_WAIT_RETRIES` to increase the number of "
"attempts to ping the Redis server.")
def _compute_version_info():
"""Compute the versions of Python, and CloudTik.
Returns:
A tuple containing the version information.
"""
cloudtik_version = cloudtik.__version__
python_version = ".".join(map(str, sys.version_info[:3]))
return cloudtik_version, python_version
def _put_version_info_in_redis(redis_client):
"""Store version information in Redis.
This will be used to detect if workers or drivers are started using
different versions of Python, or CloudTik.
Args:
redis_client: A client for the primary Redis shard.
"""
redis_client.set("VERSION_INFO", json.dumps(_compute_version_info()))
def check_version_info(redis_client):
"""Check if various version info of this process is correct.
This will be used to detect if workers or drivers are started using
different versions of Python, or CloudTik. If the version
information is not present in Redis, then no check is done.
Args:
redis_client: A client for the primary Redis shard.
Raises:
Exception: An exception is raised if there is a version mismatch.
"""
redis_reply = redis_client.get("VERSION_INFO")
# Don't do the check if there is no version information in Redis. This
# is to make it easier to do things like start the processes by hand.
if redis_reply is None:
return
true_version_info = tuple(
json.loads(utils.decode(redis_reply)))
version_info = _compute_version_info()
if version_info != true_version_info:
node_ip_address = get_node_ip_address()
error_message = ("Version mismatch: The cluster was started with:\n"
" CloudTik: " + true_version_info[0] + "\n"
" Python: " + true_version_info[1] + "\n"
"This process on node " + node_ip_address +
" was started with:" + "\n"
" CloudTik: " + version_info[0] + "\n"
" Python: " + version_info[1] + "\n")
if version_info[:2] != true_version_info[:2]:
raise RuntimeError(error_message)
else:
logger.warning(error_message)
def start_reaper(fate_share=None):
"""Start the reaper process.
This is a lightweight process that simply
waits for its parent process to die and then terminates its own
process group. This allows us to ensure that cloudtik processes are always
terminated properly so long as that process itself isn't SIGKILLed.
Returns:
ProcessInfo for the process that was started.
"""
# Make ourselves a process group leader so that the reaper can clean
# up other processes without killing the process group of the
# process that started us.
try:
if sys.platform != "win32":
os.setpgrp()
except OSError as e:
errcode = e.errno
if errcode == errno.EPERM and os.getpgrp() == os.getpid():
# Nothing to do; we're already a session leader.
pass
else:
logger.warning("setpgrp failed, processes may not be "
"cleaned up properly: {}.".format(e))
# Don't start the reaper in this case as it could result in killing
# other user processes.
return None
reaper_filepath = os.path.join(CLOUDTIK_PATH, CLOUDTIK_CORE_PRIVATE_SERVICE,
"cloudtik_process_reaper.py")
command = [sys.executable, "-u", reaper_filepath]
process_info = start_cloudtik_process(
command,
constants.PROCESS_TYPE_REAPER,
pipe_stdin=True,
fate_share=fate_share)
return process_info
def start_redis(node_ip_address,
redirect_files,
resource_spec,
session_dir_path,
port=None,
redis_shard_ports=None,
num_redis_shards=1,
redis_max_clients=None,
redirect_worker_output=False,
password=<PASSWORD>,
fate_share=None,
external_addresses=None,
port_denylist=None):
"""Start the Redis global state store.
Args:
node_ip_address: The IP address of the current node. This is only used
for recording the log filenames in Redis.
redirect_files: The list of (stdout, stderr) file pairs.
resource_spec (ResourceSpec): Resources for the node.
session_dir_path (str): Path to the session directory of
this cluster.
port (int): If provided, the primary Redis shard will be started on
this port.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards.
num_redis_shards (int): If provided, the number of Redis shards to
start, in addition to the primary one. The default value is one
shard.
redis_max_clients: If this is provided, we will attempt to configure
Redis with this maxclients number.
redirect_worker_output (bool): True if worker output should be
redirected to a file and false otherwise. Workers will have access
to this value when they start up.
password (str): Prevents external clients without the password
from connecting to Redis if provided.
port_denylist (set): A set of denylist ports that shouldn't
be used when allocating a new port.
Returns:
A tuple of the address for the primary Redis shard, a list of
addresses for the remaining shards, and the processes that were
started.
"""
processes = []
if external_addresses is not None:
primary_redis_address = external_addresses[0]
[primary_redis_ip, port] = primary_redis_address.split(":")
port = int(port)
redis_address = address(primary_redis_ip, port)
primary_redis_client = create_redis_client(
"%s:%s" % (primary_redis_ip, port), password=password)
else:
if len(redirect_files) != 1 + num_redis_shards:
raise ValueError(
"The number of redirect file pairs should be equal "
"to the number of redis shards (including the "
"primary shard) we will start.")
if redis_shard_ports is None:
redis_shard_ports = num_redis_shards * [None]
elif len(redis_shard_ports) != num_redis_shards:
raise RuntimeError(
"The number of Redis shard ports does not match "
"the number of Redis shards.")
redis_executable = CLOUDTIK_REDIS_EXECUTABLE
redis_stdout_file, redis_stderr_file = redirect_files[0]
# If no port is given, fallback to default Redis port for the primary
# shard.
if port is None:
port = constants.CLOUDTIK_DEFAULT_PORT
num_retries = 20
else:
num_retries = 1
# Start the primary Redis shard.
port, p = _start_redis_instance(
redis_executable,
session_dir_path,
bind_address=node_ip_address,
port=port,
password=password,
redis_max_clients=redis_max_clients,
num_retries=num_retries,
# Below we use None to indicate no limit on the memory of the
# primary Redis shard.
redis_max_memory=None,
stdout_file=redis_stdout_file,
stderr_file=redis_stderr_file,
fate_share=fate_share,
port_denylist=port_denylist,
listen_to_localhost_only=(node_ip_address == "127.0.0.1"))
processes.append(p)
redis_address = address(node_ip_address, port)
primary_redis_client = redis.StrictRedis(
host=node_ip_address, port=port, password=password)
# Register the number of Redis shards in the primary shard, so that clients
# know how many redis shards to expect under RedisShards.
primary_redis_client.set("NumRedisShards", str(num_redis_shards))
# Deleting the key to avoid duplicated rpush.
primary_redis_client.delete("RedisShards")
# Put the redirect_worker_output bool in the Redis shard so that workers
# can access it and know whether or not to redirect their output.
primary_redis_client.set("RedirectOutput", 1
if redirect_worker_output else 0)
# Init job counter to GCS.
primary_redis_client.set("JobCounter", 0)
# Store version information in the primary Redis shard.
_put_version_info_in_redis(primary_redis_client)
# Calculate the redis memory.
# TODO (haifeng): if not specified, calculate according to the node memory
assert resource_spec.resolved()
redis_max_memory = resource_spec.redis_max_memory
# Start other Redis shards. Each Redis shard logs to a separate file,
# prefixed by "redis-<shard number>".
redis_shards = []
# If Redis shard ports are not provided, start the port range of the
# other Redis shards at a high, random port.
last_shard_port = new_port(denylist=port_denylist) - 1
for i in range(num_redis_shards):
if external_addresses is not None:
shard_address = external_addresses[i + 1]
else:
redis_stdout_file, redis_stderr_file = redirect_files[i + 1]
redis_executable = CLOUDTIK_REDIS_EXECUTABLE
redis_shard_port = redis_shard_ports[i]
# If no shard port is given, try to start this shard's Redis
# instance on the port right after the last shard's port.
if redis_shard_port is None:
redis_shard_port = last_shard_port + 1
num_retries = 20
else:
num_retries = 1
redis_shard_port, p = _start_redis_instance(
redis_executable,
session_dir_path,
bind_address=node_ip_address,
port=redis_shard_port,
password=password,
redis_max_clients=redis_max_clients,
num_retries=num_retries,
redis_max_memory=redis_max_memory,
stdout_file=redis_stdout_file,
stderr_file=redis_stderr_file,
fate_share=fate_share,
port_denylist=port_denylist,
listen_to_localhost_only=(node_ip_address == "127.0.0.1"))
processes.append(p)
shard_address = address(node_ip_address, redis_shard_port)
last_shard_port = redis_shard_port
redis_shards.append(shard_address)
# Store redis shard information in the primary redis shard.
primary_redis_client.rpush("RedisShards", shard_address)
return redis_address, redis_shards, processes
def _start_redis_instance(executable,
session_dir_path,
bind_address,
port,
redis_max_clients=None,
num_retries=20,
stdout_file=None,
stderr_file=None,
password=<PASSWORD>,
redis_max_memory=None,
fate_share=None,
port_denylist=None,
listen_to_localhost_only=False):
"""Start a single Redis server.
Notes:
We will initially try to start the Redis instance at the given port,
and then try at most `num_retries - 1` times to start the Redis
instance at successive random ports.
Args:
executable (str): Full path of the redis-server executable.
session_dir_path (str): Path to the session directory of
this cluster.
bind_address: The address to bind. None to bind all
port (int): Try to start a Redis server at this port.
redis_max_clients: If this is provided, we will attempt to configure
Redis with this maxclients number.
num_retries (int): The number of times to attempt to start Redis at
successive ports.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
password (str): Prevents external clients without the password
from connecting to Redis if provided.
redis_max_memory: The max amount of memory (in bytes) to allow redis
to use, or None for no limit. Once the limit is exceeded, redis
will start LRU eviction of entries.
port_denylist (set): A set of denylist ports that shouldn't
be used when allocating a new port.
listen_to_localhost_only (bool): Redis server only listens to
localhost (127.0.0.1) if it's true,
otherwise it listens to all network interfaces.
Returns:
A tuple of the port used by Redis and ProcessInfo for the process that
was started. If a port is passed in, then the returned port value
is the same.
Raises:
Exception: An exception is raised if Redis could not be started.
"""
assert os.path.isfile(executable)
counter = 0
while counter < num_retries:
# Construct the command to start the Redis server.
command = [executable]
if password:
if " " in password:
raise ValueError("Spaces not permitted in redis password.")
command += ["--requirepass", password]
command += (["--port", str(port), "--loglevel", "warning"])
command += (["--dir", session_dir_path])
if listen_to_localhost_only:
command += ["--bind", "127.0.0.1"]
elif bind_address is not None:
command += ["--bind", "127.0.0.1", bind_address]
pidfile = os.path.join(session_dir_path,
"redis-" + uuid.uuid4().hex + ".pid")
command += ["--pidfile", pidfile]
process_info = start_cloudtik_process(
command,
constants.PROCESS_TYPE_REDIS_SERVER,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
try:
wait_for_redis_to_start("127.0.0.1", port, password=password)
except (redis.exceptions.ResponseError, RuntimeError):
# Connected to redis with the wrong password, or exceeded
# the number of retries. This means we got the wrong redis
# or there is some error in starting up redis.
# Try the next port by looping again.
pass
else:
r = redis.StrictRedis(
host="127.0.0.1", port=port, password=password)
# Check if Redis successfully started and we connected
# to the right server.
if r.config_get("pidfile")["pidfile"] == pidfile:
break
port = new_port(denylist=port_denylist)
counter += 1
if counter == num_retries:
raise RuntimeError("Couldn't start Redis. "
"Check log files: {} {}".format(
stdout_file.name if stdout_file is not None else
"<stdout>", stderr_file.name
if stdout_file is not None else "<stderr>"))
# Create a Redis client just for configuring Redis.
redis_client = redis.StrictRedis(
host="127.0.0.1", port=port, password=password)
# Wait for the Redis server to start.
wait_for_redis_to_start("127.0.0.1", port, password=password)
# Configure Redis to generate keyspace notifications. TODO(rkn): Change
# this to only generate notifications for the export keys.
redis_client.config_set("notify-keyspace-events", "Kl")
# Configure Redis to not run in protected mode so that processes on other
# hosts can connect to it. TODO(rkn): Do this in a more secure way.
redis_client.config_set("protected-mode", "no")
# Discard old task and object metadata.
if redis_max_memory is not None:
redis_client.config_set("maxmemory", str(redis_max_memory))
redis_client.config_set("maxmemory-policy", "allkeys-lru")
redis_client.config_set("maxmemory-samples", "10")
logger.debug("Starting Redis shard with {} GB max memory.".format(
round(redis_max_memory / 1e9, 2)))
# If redis_max_clients is provided, attempt to raise the number of maximum
# number of Redis clients.
if redis_max_clients is not None:
redis_client.config_set("maxclients", str(redis_max_clients))
elif resource is not None:
# If redis_max_clients is not provided, determine the current ulimit.
# We will use this to attempt to raise the maximum number of Redis
# clients.
current_max_clients = int(
redis_client.config_get("maxclients")["maxclients"])
# The below command should be the same as doing ulimit -n.
ulimit_n = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# The quantity redis_client_buffer appears to be the required buffer
# between the maximum number of redis clients and ulimit -n. That is,
# if ulimit -n returns 10000, then we can set maxclients to
# 10000 - redis_client_buffer.
redis_client_buffer = 32
if current_max_clients < ulimit_n - redis_client_buffer:
redis_client.config_set("maxclients",
ulimit_n - redis_client_buffer)
# Increase the hard and soft limits for the redis client pubsub buffer to
# 128MB. This is a hack to make it less likely for pubsub messages to be
# dropped and for pubsub connections to therefore be killed.
cur_config = (redis_client.config_get("client-output-buffer-limit")[
"client-output-buffer-limit"])
cur_config_list = cur_config.split()
assert len(cur_config_list) == 12
cur_config_list[8:] = ["pubsub", "134217728", "134217728", "60"]
redis_client.config_set("client-output-buffer-limit",
" ".join(cur_config_list))
# Put a time stamp in Redis to indicate when it was started.
redis_client.set("redis_start_time", time.time())
return port, process_info
def start_log_monitor(redis_address,
logs_dir,
stdout_file=None,
stderr_file=None,
redis_password=<PASSWORD>,
fate_share=None,
max_bytes=0,
backup_count=0):
"""Start a log monitor process.
Args:
redis_address (str): The address of the Redis instance.
logs_dir (str): The directory of logging files.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
max_bytes (int): Log rotation parameter. Corresponding to
RotatingFileHandler's maxBytes.
backup_count (int): Log rotation parameter. Corresponding to
RotatingFileHandler's backupCount.
Returns:
ProcessInfo for the process that was started.
"""
log_monitor_filepath = os.path.join(CLOUDTIK_PATH, CLOUDTIK_CORE_PRIVATE_SERVICE,
"cloudtik_log_monitor.py")
command = [
sys.executable, "-u", log_monitor_filepath,
f"--redis-address={redis_address}", f"--logs-dir={logs_dir}",
f"--logging-rotate-bytes={max_bytes}",
f"--logging-rotate-backup-count={backup_count}",
]
if redis_password:
command += ["--redis-password", redis_password]
process_info = start_cloudtik_process(
command,
constants.PROCESS_TYPE_LOG_MONITOR,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
return process_info
def start_cluster_controller(redis_address,
logs_dir,
stdout_file=None,
stderr_file=None,
cluster_scaling_config=None,
redis_password=<PASSWORD>,
fate_share=None,
max_bytes=0,
backup_count=0,
controller_ip=None):
"""Run a process to control the cluster.
Args:
redis_address (str): The address that the Redis server is listening on.
logs_dir(str): The path to the log directory.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
cluster_scaling_config: path to autoscaling config file.
redis_password (str): The password of the redis server.
max_bytes (int): Log rotation parameter. Corresponding to
RotatingFileHandler's maxBytes.
backup_count (int): Log rotation parameter. Corresponding to
RotatingFileHandler's backupCount.
controller_ip (str): IP address of the machine that the controller will be
run on. Can be excluded, but required for scaler metrics.
Returns:
ProcessInfo for the process that was started.
"""
controller_path = os.path.join(CLOUDTIK_PATH, CLOUDTIK_CORE_PRIVATE_SERVICE, "cloudtik_cluster_controller.py")
command = [
sys.executable,
"-u",
controller_path,
f"--logs-dir={logs_dir}",
f"--redis-address={redis_address}",
f"--logging-rotate-bytes={max_bytes}",
f"--logging-rotate-backup-count={backup_count}",
]
if cluster_scaling_config:
command.append("--cluster-scaling-config=" + str(cluster_scaling_config))
if redis_password:
command.append("--redis-password=" + redis_password)
if controller_ip:
command.append("--controller-ip=" + controller_ip)
process_info = start_cloudtik_process(
command,
constants.PROCESS_TYPE_CLUSTER_CONTROLLER,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
return process_info
def start_node_controller(head, redis_address,
logs_dir,
resource_spec,
stdout_file=None,
stderr_file=None,
redis_password=None,
fate_share=None,
max_bytes=0,
backup_count=0,
controller_ip=None,
runtimes=None):
"""Run a process to controller the other processes.
Args:
head (bool): Whether to run this on head or worker
redis_address (str): The address that the Redis server is listening on.
logs_dir(str): The path to the log directory.
resource_spec (ResourceSpec): Resources for the node.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis <PASSWORD>.
max_bytes (int): Log rotation parameter. Corresponding to
RotatingFileHandler's maxBytes.
backup_count (int): Log rotation parameter. Corresponding to
RotatingFileHandler's backupCount.
controller_ip (str): IP address of the machine that the controller will be
run on. Can be excluded, but required for scaler metrics.
runtimes (str): List of runtimes to pass to node controller
Returns:
ProcessInfo for the process that was started.
"""
controller_path = os.path.join(CLOUDTIK_PATH, CLOUDTIK_CORE_PRIVATE_SERVICE, "cloudtik_node_controller.py")
command = [
sys.executable,
"-u",
controller_path,
f"--logs-dir={logs_dir}",
f"--redis-address={redis_address}",
f"--logging-rotate-bytes={max_bytes}",
f"--logging-rotate-backup-count={backup_count}",
]
node_type = tags.NODE_KIND_HEAD if head else tags.NODE_KIND_WORKER
command.append("--node-type=" + node_type)
if redis_password:
command.append("--redis-password=" + redis_password)
if controller_ip:
command.append("--controller-ip=" + controller_ip)
assert resource_spec.resolved()
static_resources = resource_spec.to_resource_dict()
# Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'.
resource_argument = ",".join(
["{},{}".format(*kv) for kv in static_resources.items()])
command.append(f"--static_resource_list={resource_argument}")
if runtimes and len(runtimes) > 0:
command.append("--runtimes=" + quote(runtimes))
process_info = start_cloudtik_process(
command,
constants.PROCESS_TYPE_NODE_CONTROLLER,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
return process_info
```
#### File: _private/state/state_node_manager.py
```python
import logging
from cloudtik.core._private.state.state_table_store import StateTableStore
logger = logging.getLogger(__name__)
class StateNodeManager:
"""
Manager class for node information
"""
def __init__(self, state_table_store: StateTableStore):
self._state_table_store = state_table_store
def register_node(self, node_id, node_info):
self._state_table_store.get_node_table().put(node_id, node_info)
def drain_node(self, node_id):
# TODO: update node table info to DEAD instead of delete it.
self._state_table_store.get_node_table().delete(node_id)
def get_node_table(self):
self._state_table_store.get_node_table().get_all()
```
#### File: _private/state/store_client.py
```python
import logging
from cloudtik.core._private.state.redis_shards_client import \
RedisShardsClient, generate_match_pattern, generate_redis_key
from cloudtik.core._private.state.redis_shards_scanner import RedisShardsScanner
logger = logging.getLogger(__name__)
class StoreClient:
def __init__(self, redis_shards_client: RedisShardsClient):
self._redis_shards_client = redis_shards_client
def put(self, table_name, key, value):
redis_key = generate_redis_key(table_name, key)
redis_shard = self._redis_shards_client.get_shard(redis_key)
redis_shard.put(redis_key, value)
def get(self, table_name, key):
redis_key = generate_redis_key(table_name, key)
redis_shard = self._redis_shards_client.get_shard(redis_key)
return redis_shard.get(redis_key)
def delete(self, table_name, key):
redis_key = generate_redis_key(table_name, key)
redis_shard = self._redis_shards_client.get_shard(redis_key)
redis_shard.delete(redis_key)
def get_all(self, table_name):
match_pattern = generate_match_pattern(table_name)
scanner = RedisShardsScanner(self._redis_shards_client, table_name)
return scanner.scan_keys_and_values(match_pattern)
```
#### File: _private/_azure/utils.py
```python
from typing import Any, Dict
def get_azure_config(provider_config, node_type_config: Dict[str, Any], node_id: str):
config_dict = {}
azure_storage_type = provider_config.get("azure_cloud_storage", {}).get("azure.storage.type")
if azure_storage_type:
config_dict["AZURE_STORAGE_TYPE"] = azure_storage_type
azure_storage_account = provider_config.get("azure_cloud_storage", {}).get("azure.storage.account")
if azure_storage_account:
config_dict["AZURE_STORAGE_ACCOUNT"] = azure_storage_account
azure_container = provider_config.get("azure_cloud_storage", {}).get(
"azure.container")
if azure_container:
config_dict["AZURE_CONTAINER"] = azure_container
azure_account_key = provider_config.get("azure_cloud_storage", {}).get(
"azure.account.key")
if azure_account_key:
config_dict["AZURE_ACCOUNT_KEY"] = azure_account_key
if node_type_config is not None:
user_assigned_identity_client_id = node_type_config.get(
"azure.user.assigned.identity.client.id")
if user_assigned_identity_client_id:
config_dict["AZURE_MANAGED_IDENTITY_CLIENT_ID"] = user_assigned_identity_client_id
user_assigned_identity_tenant_id = node_type_config.get(
"azure.user.assigned.identity.tenant.id")
if user_assigned_identity_tenant_id:
config_dict["AZURE_MANAGED_IDENTITY_TENANT_ID"] = user_assigned_identity_tenant_id
return config_dict
def _get_node_info(node):
node_info = {"node_id": node["name"].split("-")[-1],
"instance_type": node["vm_size"],
"private_ip": node["internal_ip"],
"public_ip": node["external_ip"],
"instance_status": node["status"]}
node_info.update(node["tags"])
return node_info
```
#### File: runtime/hdfs/runtime.py
```python
import logging
from typing import Any, Dict
from cloudtik.core.node_provider import NodeProvider
from cloudtik.core.runtime import Runtime
from cloudtik.runtime.hdfs.utils import _config_runtime_resources, _with_runtime_environment_variables, \
_is_runtime_scripts, _get_runnable_command, _get_runtime_processes, _validate_config, \
_verify_config, _get_runtime_logs, _get_runtime_commands, \
_get_defaults_config, _get_useful_urls, publish_service_uri
logger = logging.getLogger(__name__)
class HDFSRuntime(Runtime):
"""Implementation for HDFS Runtime"""
def __init__(self, runtime_config: Dict[str, Any]) -> None:
Runtime.__init__(self, runtime_config)
def prepare_config(self, cluster_config: Dict[str, Any]) -> Dict[str, Any]:
"""Prepare runtime specific configurations"""
return _config_runtime_resources(cluster_config)
def validate_config(self, cluster_config: Dict[str, Any], provider: NodeProvider):
"""Validate cluster configuration from runtime perspective."""
_validate_config(cluster_config, provider)
def verify_config(self, cluster_config: Dict[str, Any], provider: NodeProvider):
"""Verify cluster configuration at the last stage of bootstrap.
The verification may mean a slow process to check with a server"""
_verify_config(cluster_config, provider)
def with_environment_variables(
self, config: Dict[str, Any], provider: NodeProvider,
node_id: str) -> Dict[str, Any]:
"""Export necessary runtime environment variables for running node commands.
For example: {"ENV_NAME": value}
"""
return _with_runtime_environment_variables(
self.runtime_config, config=config, provider=provider, node_id=node_id)
def cluster_booting_completed(
self, cluster_config: Dict[str, Any], head_node_id: str) -> None:
publish_service_uri(cluster_config, head_node_id)
def get_runnable_command(self, target: str):
"""Return the runnable command for the target script.
For example: ["bash", target]
"""
if not _is_runtime_scripts(target):
return None
return _get_runnable_command(target)
def get_runtime_commands(self, cluster_config: Dict[str, Any]) -> Dict[str, Any]:
"""Returns a copy of runtime commands to run at different stages"""
return _get_runtime_commands(self.runtime_config, cluster_config)
def get_defaults_config(self, cluster_config: Dict[str, Any]) -> Dict[str, Any]:
"""Returns a copy of runtime config"""
return _get_defaults_config(self.runtime_config, cluster_config)
def get_useful_urls(self, cluster_head_ip: str):
return _get_useful_urls(cluster_head_ip)
@staticmethod
def get_logs() -> Dict[str, str]:
"""Return a dictionary of name to log paths.
For example {"server-a": "/tmp/server-a/logs"}
"""
return _get_runtime_logs()
@staticmethod
def get_processes():
"""Return a list of processes for this runtime.
Format:
#1 Keyword to filter,
#2 filter by command (True)/filter by args (False)
#3 The third element is the process name.
#4 The forth element, if node, the process should on all nodes, if head, the process should on head node.
For example
["cloudtik_cluster_controller.py", False, "ClusterController", "head"],
"""
return _get_runtime_processes()
```
#### File: runtime/hdfs/utils.py
```python
import os
from typing import Any, Dict
from cloudtik.core._private.utils import merge_rooted_config_hierarchy, _get_runtime_config_object
from cloudtik.core._private.workspace.workspace_operator import _get_workspace_provider
from cloudtik.core._private.providers import _get_node_provider
RUNTIME_PROCESSES = [
# The first element is the substring to filter.
# The second element, if True, is to filter ps results by command name.
# The third element is the process name.
# The forth element, if node, the process should on all nodes,if head, the process should on head node.
["proc_namenode", False, "NameNode", "head"],
["proc_datanode", False, "DataNode", "worker"],
]
RUNTIME_ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
def _config_runtime_resources(cluster_config: Dict[str, Any]) -> Dict[str, Any]:
return cluster_config
def publish_service_uri(cluster_config: Dict[str, Any], head_node_id: str) -> None:
workspace_name = cluster_config["workspace_name"]
if workspace_name is None:
return
provider = _get_node_provider(cluster_config["provider"], cluster_config["cluster_name"])
head_internal_ip = provider.internal_ip(head_node_id)
service_uris = {"hdfs-namenode-uri": "hdfs://{}:9000".format(head_internal_ip)}
workspace_provider = _get_workspace_provider(cluster_config["provider"], workspace_name)
workspace_provider.publish_global_variables(cluster_config, service_uris)
def _get_runtime_processes():
return RUNTIME_PROCESSES
def _is_runtime_scripts(script_file):
return False
def _get_runnable_command(target):
return None
def _with_runtime_environment_variables(runtime_config, config, provider, node_id: str):
runtime_envs = {"HDFS_ENABLED": True}
return runtime_envs
def _get_runtime_logs():
hadoop_logs_dir = os.path.join(os.getenv("HADOOP_HOME"), "logs")
all_logs = {"hadoop": hadoop_logs_dir}
return all_logs
def _validate_config(config: Dict[str, Any], provider):
pass
def _verify_config(config: Dict[str, Any], provider):
pass
def _get_config_object(cluster_config: Dict[str, Any], object_name: str) -> Dict[str, Any]:
config_root = os.path.join(RUNTIME_ROOT_PATH, "config")
runtime_commands = _get_runtime_config_object(config_root, cluster_config["provider"], object_name)
return merge_rooted_config_hierarchy(config_root, runtime_commands, object_name)
def _get_runtime_commands(runtime_config: Dict[str, Any],
cluster_config: Dict[str, Any]) -> Dict[str, Any]:
return _get_config_object(cluster_config, "commands")
def _get_defaults_config(runtime_config: Dict[str, Any],
cluster_config: Dict[str, Any]) -> Dict[str, Any]:
return _get_config_object(cluster_config, "defaults")
def _get_useful_urls(cluster_head_ip):
urls = [
{"name": "HDFS Web UI", "url": "http://{}:9870".format(cluster_head_ip)},
]
return urls
```
|
{
"source": "jerrychens/PowerOutagePredictor",
"score": 2
}
|
#### File: PowerOutagePredictor/SVM/test_SVMClassifier.py
```python
from SVM.SVMClassifier import *
import numpy as np
from sklearn.externals import joblib
from nose.tools import with_setup
import math
# Setup
scaler = joblib.load("scaler.pkl")
zero = np.array([[12,70,80,8,5,10,0.]])
one = np.array([[9,50,70,20,15,30,0.5]])
two = np.array([[8,30,60,50,30,50,0.5]])
zero = zero.reshape(1, -1)
one = one.reshape(1, -1)
two = two.reshape(1, -1)
"""
Test probability outputs of model.
"""
def test_predictOutageProba():
zeroPredict = predictOutageProba(zero)
assert math.isclose(0.96614198, zeroPredict[0,0], rel_tol=1e-05)
assert math.isclose(0.03266029, zeroPredict[0,1], rel_tol=1e-05)
assert math.isclose(0.00119773, zeroPredict[0,2], rel_tol=1e-05)
onePredict = predictOutageProba(one)
assert math.isclose(0.88164684, onePredict[0,0], rel_tol=1e-05)
assert math.isclose(0.11012898, onePredict[0,1], rel_tol=1e-05)
assert math.isclose(0.00822418, onePredict[0,2], rel_tol=1e-05)
twoPredict = predictOutageProba(two)
assert math.isclose(0.04806885, twoPredict[0,0], rel_tol=1e-05)
assert math.isclose(0.3045513, twoPredict[0,1], rel_tol=1e-05)
assert math.isclose(0.64737985, twoPredict[0,2], rel_tol=1e-05)
"""
Test classification outputs of model
"""
def test_predictOutage():
assert predictOutage(zero) == 0
assert predictOutage(one) == 1
assert predictOutage(two) == 2
return
```
|
{
"source": "JerryCui/MF1_SDK",
"score": 3
}
|
#### File: ui/pic/merge_kfpkg.py
```python
import sys, os
import json, zipfile, struct, hashlib
import tempfile
def mergeBinProccess(files, fileSaveName):
files.sort(key=lambda file:file[1])
bin = b''
aesFlag = b'\x00'
startAddrLast = files[0][1]
fileSizeLast = 0
if files[0][2]: # firmware
name = files[0][0]
size = os.path.getsize(name)
f = open(name, "rb")
firmware = f.read()
f.close()
bin += aesFlag # add aes key flag
bin += struct.pack('I', size) # add firmware length
bin += firmware # add firmware content
sha256Hash = hashlib.sha256(bin).digest()
bin += sha256Hash # add parity
startAddrLast = 0
fileSizeLast = len(bin)
files.remove(files[0])
for file, addr, firmware, enable in files:
if not enable:
continue
fillLen = addr - (startAddrLast + fileSizeLast)
if fillLen > 0: # fill 0xFF
fill = bytearray([0xFF for i in range(fillLen)])
bin += fill
with open(file, "rb") as f: # add bin file content
bin += f.read()
startAddrLast = addr
fileSizeLast = os.path.getsize(file)
with open(fileSaveName, "wb") as f:
f.write(bin)
print("Save merged bin file success")
def getBurnFilesInfoFromKfpkg(kfpkg):
tempDir = tempfile.gettempdir()
listFileName = "flash-list.json"
try:
zip = zipfile.ZipFile(kfpkg, mode="r")
zip.extract(listFileName, tempDir)
with open(tempDir+"/"+listFileName) as f:
info = json.load(f)
filesInfo = {}
for fileInfo in info["files"]:
filesInfo[fileInfo["bin"]] = [fileInfo["address"], fileInfo["sha256Prefix"]]
print(filesInfo, zip.namelist())
binFiles = zip.namelist()
binFiles.remove(listFileName)
zipTempFiles = []
for file in binFiles:
zip.extract(file, tempDir)
zipTempFiles.append( (tempDir + "/" + file, filesInfo[file][0], filesInfo[file][1], True ) )
zip.close()
except Exception as e:
return (None, str(e))
return (zipTempFiles,"get file info ok")
def checkFilesAddrValid(fileType, files):
if fileType == "bin":
files.sort(key=lambda file:file[1])
startAddr = -1
fileSize = 0
fileShortLast = ""
count = 0
for file, addr, firmware, enable in files:
if not enable:
continue
fileShort = ".../"+"/".join(file.split("/")[-2:])
if startAddr + fileSize > addr:
return (False, ("File address error")+": {} {} 0x{:X}, {} {} {} [0x{:X},0x{:X}]".format(fileShort, ("start from"), addr, ("but file"), fileShortLast, ("address range is"), startAddr, startAddr+fileSize) )
fileSize = os.path.getsize(file)
startAddr = addr
fileShortLast = fileShort
count += 1
if count == 0:
return (False, ("No file selected"))
return (True, "FilesAddrValid")
def mergeBin(file):
tablename = os.path.splitext(file)[0]
fileType = "bin"
files, msg = getBurnFilesInfoFromKfpkg(file)
print(msg)
if not files:
print("error at get kfpkg file info")
return
ok, msg = checkFilesAddrValid(fileType, files)
print(msg)
if not ok:
print("file addr error")
return
# pack and save
mergeBinProccess(files, tablename +".bin")
mergeBin(sys.argv[1])
```
#### File: ui/pic/pack_kfpkg.py
```python
import sys,os,json,zipfile,tempfile
class KFPKG():
def __init__(self):
self.fileInfo = {"version": "0.1.0", "files": []}
self.filePath = {}
self.burnAddr = []
def addFile(self, addr, path, prefix=False):
if not os.path.exists(path):
raise ValueError(("FilePathError"))
if addr in self.burnAddr:
raise ValueError(("Burn dddr duplicate")+":0x%06x" %(addr))
f = {}
f_name = os.path.split(path)[1]
f["address"] = int(addr)
f["bin"] = f_name
f["sha256Prefix"] = prefix
self.fileInfo["files"].append(f)
self.filePath[f_name] = path
self.burnAddr.append(addr)
def listDumps(self):
kfpkg_json = json.dumps(self.fileInfo, indent=4)
return kfpkg_json
def listDump(self, path):
with open(path, "w") as f:
f.write(json.dumps(self.fileInfo, indent=4))
def listLoads(self, kfpkgJson):
self.fileInfo = json.loads(kfpkgJson)
def listLload(self, path):
with open(path) as f:
self.fileInfo = json.load(f)
def save(self, path):
listName = os.path.join(tempfile.gettempdir(), "kflash_gui_tmp_list.json")
self.listDump(listName)
try:
with zipfile.ZipFile(path, "w") as zip:
for name,path in self.filePath.items():
zip.write(path, arcname=name, compress_type=zipfile.ZIP_DEFLATED)
zip.write(listName, arcname="flash-list.json", compress_type=zipfile.ZIP_DEFLATED)
zip.close()
except Exception as e:
os.remove(listName)
raise e
os.remove(listName)
kfpkg = KFPKG()
kfpkg.addFile(sys.argv[1],sys.argv[2])
kfpkg.save(sys.argv[3])
```
|
{
"source": "jerrydark/antenna-tracking",
"score": 3
}
|
#### File: antenna-tracking/Actuator/abstract_servo.py
```python
from Actuator import PID
import Adafruit_PCA9685
from Utility.abstract_process import processAbstract
# temporary solution for path
#sys.path.append(os.getcwd() + "/../")
import time
class AbstractServo(processAbstract):
def __init__(self, antenna_shared_data, setpoint_shared_data, pin_number):
processAbstract.__init__(self)
self.pid = PID.PID()
self.pid.setSetPoint(0)
self.antenna_data = antenna_shared_data
self.setpoint_data = setpoint_shared_data
self.pin_number = pin_number
# self.servo = PWM.Servo()
self.servo = Adafruit_PCA9685.PCA9685()
self.servo.set_pwm_freq(60)
def process(self):
#self.servo = PWM.Servo()
# time.sleep(1)
while self.kill_pill.empty():
# schedule.run_pending()
self.job()
time.sleep(0.01)
def job(self):
""" abstract method"""
raise
def set_servo_pulse(channel, pulse):
pulse_length = 1000000 # 1,000,000 us per second
pulse_length //= 60 # 60 Hz
#print('{0}us per period'.format(pulse_length))
pulse_length //= 4096 # 12 bits of resolution
#print('{0}us per bit'.format(pulse_length))
pulse *= 1000
pulse //= pulse_length
self.servo.set_pwm(channel, 0, pulse)
"""
Example:
def job(self):
self.pid.setSetPoint(setpoint_shared_data.getYaw())
up = self.pid.update(self.antenna_data.getYaw())
pulse_width = self.pid.pidscale(up)
self.servo.set_servo(self.pin_number, pulse_width)
"""
# usage
# if __name__ == "__main__":
# ant = antenna_shared_data()
# point = setpoint_shared_data()
# s = ServoYaw(ant, point, [pin], [min_angle], [max_angle])
# s.start()
# print(scale(200, (0,360), (-180,180)))
```
#### File: antenna-tracking/Control/antennaControl.py
```python
import GeneralSettings
from Utility.abstract_process import processAbstract
from Actuator.pitch_servo import PitchServo
from Actuator.yaw_servo import YawServo
from Sensors.imuYaw import imuYaw
from Sensors.imuPitch import imuPitch
from Vehicle.uavInteraction import mavlinkHandler
#from debugger import Debugger
from Sensors.gps import GPSClient
import time
import math
import sys
from multiprocessing import Value
class antennaControl(processAbstract):
def __init__(self, antenna_data, uav_data, actuator_setpoint):
processAbstract.__init__(self)
# classes for shared data across multiprocess
# compre from utility.multiprocessDataType
self.antenna_data = antenna_data
self.uav_data = uav_data
self.actuator_setpoint = actuator_setpoint
# self.period = 0.2 # 200 ms of period might not be optimal
self.antenna_data.setYawOffset(math.radians(
GeneralSettings.MAGNETIC_DECLINATION))
self.yaw = YawServo(
self.antenna_data,
self.actuator_setpoint,
GeneralSettings.servo_yaw_pin,
0, 0)
self.pitch = PitchServo( # processAbstract):
self.antenna_data,
self.actuator_setpoint,
GeneralSettings.servo_yaw_pin,
math.radians(10), math.radians(70))
self.imuYaw = imuYaw(self.antenna_data)
self.imuPitch = imuPitch(self.antenna_data)
self.uav = mavlinkHandler(self.uav_data)
self.antenna_data.setLon(GeneralSettings.default_lon)
self.antenna_data.setLat(GeneralSettings.default_lat)
self.antenna_data.setAlt(GeneralSettings.default_rel_alt)
#self.debugger = Debugger(self.antenna_data, self.uav_data, self.actuator_setpoint)
self.gps = GPSClient(self.antenna_data)
self.running = False
def process(self):
if not self.running:
self.gps.start()
self.imuYaw.start()
self.imuPitch.start()
self.uav.start()
self.yaw.start()
self.pitch.start()
self.running = True
while self.running:
self.actuator_setpoint.setPitch(self._calculate_pitch(
self.antenna_data.getLat(),
self.antenna_data.getLon(),
self.antenna_data.getAlt(),
self.uav_data.getLat(),
self.uav_data.getLon(),
self.uav_data.getAlt()))
_bearing = self._calculate_bearing(
self.antenna_data.getLat(),
self.antenna_data.getLon(),
self.uav_data.getLat(),
self.uav_data.getLon())
self.actuator_setpoint.setYaw(
_bearing + self.antenna_data.getYawOffset())
time.sleep(0.1)
def stop(self):
self.running = False
def soft_stop_everything(self):
self.running = False
self.yaw.soft_process_stop()
self.pitch.soft_process_stop()
self.gps.soft_process_stop()
self.imuYaw.soft_process_stop()
self.imuPitch.soft_process_stop()
self.uav.soft_process_stop()
# time.sleep(1)
# self.start()
def _calculate_pitch(self, lat_sat, long_sat, alt_sat, lat_drone, long_drone, alt_drone):
""" Calculate the pitch using haversine formula """
R = 6371000
lat_sat = math.radians(lat_sat)
lat_drone = math.radians(lat_drone)
long_sat = math.radians(long_sat)
long_drone = math.radians(long_drone)
delta_long = long_drone - long_sat
delta_lat = lat_drone - lat_sat
delta_alt = alt_drone - alt_sat
a = math.pow(math.sin(delta_lat / 2), 2) + math.cos(lat_sat) * \
math.cos(lat_drone) * math.pow(math.sin(delta_long / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = R * c
pitch_angle = math.atan2(delta_alt, d)
return pitch_angle
def _calculate_bearing(self, lat_sat, long_sat, lat_drone, long_drone):
""" Calculate the bearing based on antenna and uav gps coordinates"""
lat_sat = math.radians(lat_sat)
lat_drone = math.radians(lat_drone)
long_sat = math.radians(long_sat)
long_drone = math.radians(long_drone)
delta_long = long_drone - long_sat
delta_lat = lat_drone - lat_sat
y = math.sin(delta_long) * math.cos(lat_drone)
x = math.cos(lat_sat) * math.sin(lat_drone) - \
math.sin(lat_sat) * math.cos(lat_drone) * math.cos(delta_long)
bearing_initial = math.atan2(y, x)
return bearing_initial
```
#### File: jerrydark/antenna-tracking/flasky.py
```python
import time
from flask import Flask, Response, request
from Utility.abstract_process import processAbstract
from tabulate import tabulate
import math
#import time
#from Utility.MultiprocessDataType import antenna_shared_data, uav_shared_data, setpoint_shared_data
#from multiprocessing import Value
from Control.antennaControl import antennaControl
class Flasky(processAbstract):
def __init__(self, antenna_data, uav_data, actuator_setpoint):
processAbstract.__init__(self)
self.antenna_data = antenna_data
self.uav_data = uav_data
self.actuator_setpoint = actuator_setpoint
self.antenna_control = antennaControl(
self.antenna_data,
self.uav_data,
self.actuator_setpoint)
self.head = [' Pitch',
'P_goal',
' Yaw',
'Y_goal',
'D_Lat',
'D_Lon',
'D_Alt',
'A_Lat',
'A_Lon',
'A_Alt', ]
self.header = tabulate([[]], headers=self.head, tablefmt="orgtbl")
self.app = Flask(__name__)
self.running = False
self.refreshing = False
self.input_template = '<form action="/" method="POST"><p><input type="{}" name="{}" value="{}"></p></form>'
self.refresher_state = ['REFRESHER ON', 'REFRESHER OFF']
self.ctrl_state = ['START', 'STOP']
self.page = ''
@self.app.route("/", methods=['POST', 'GET'])
def index():
if request.method == "POST":
if list(request.form)[0] == 'switch':
print('refresh')
if request.form['switch'] == self.refresher_state[1] and self.refreshing:
self.refreshing = False
# time.sleep(0.1)
if request.form['switch'] == self.refresher_state[0] and not self.refreshing:
# self.page += refresher
self.refreshing = True
elif list(request.form)[0] == 'antenna_ctrl':
print('ctrl')
if request.form['antenna_ctrl'] == 'START' and not self.running:
try:
self.antenna_control.start()
except:
self.antenna_control.soft_stop_everything()
self.antenna_control = antennaControl(
self.antenna_data, self.uav_data, self.actuator_setpoint)
self.antenna_control.start()
self.running = True
if request.form['antenna_ctrl'] == 'STOP' and self.running:
self.antenna_control.soft_stop_everything()
self.running = False
elif list(request.form)[0] == 'reset':
print('reset')
self.antenna_control.soft_stop_everything()
self.running = False
self.antenna_control = antennaControl(
self.antenna_data, self.uav_data, self.actuator_setpoint)
try:
self.antenna_control.start()
self.running = True
except:
pass
else:
print('offset')
try:
off_deg = float(request.form['offset'])
except:
print('Error on offset input')
off_deg = math.degrees(self.antenna_data.getYawOffset())
if off_deg >= 90:
off_deg = 90
if off_deg <= -90:
off_deg = -90
off = math.radians(off_deg)
self.antenna_data.setYawOffset(off)
def inner():
self.page = ''
body = [math.degrees(self.antenna_data.getPitch()),
math.degrees(self.actuator_setpoint.getPitch()),
math.degrees(self.antenna_data.getYaw()),
math.degrees(self.actuator_setpoint.getYaw()),
self.uav_data.getLat(),
self.uav_data.getLon(),
self.uav_data.getAlt(),
self.antenna_data.getLat(),
self.antenna_data.getLon(),
self.antenna_data.getAlt()
]
values = tabulate(
[map(str, body)],
headers=self.head,
tablefmt="orgtbl",
floatfmt=".2f"
).split('|\n')[-1]
# refresher switch
self.page += self.input_template.format(
'submit',
'switch',
self.refresher_state[self.refreshing]
)
# antenna Control
self.page += self.input_template.format(
'submit',
'antenna_ctrl',
self.ctrl_state[self.running]
)
# offset
self.page += 'Offset:'
self.page += self.input_template.format(
'text',
'offset',
''
)
# table
self.page += "<textarea cols='120' rows='15'>"
self.page += self.header + '\n' + values + ' \n'
# list
self.page += '\n'.join(
list(map(lambda x: '\t:\t'.join(map(str, x)), zip(self.head, body))))
self.page += "</textarea>"
# Offset value
self.page += '\n<p>Offset: ' + \
str(round(math.degrees(self.antenna_data.getYawOffset()), 2)) + '</p>\n'
# refresher
if self.refreshing:
self.page += '\n<meta http-equiv="refresh" content=1>\n'
self.page += 'running:' + str(self.running)
# reset
self.page += self.input_template.format(
'submit',
'reset',
'RESET'
)
return self.page
return Response(inner(), mimetype='text/html')
def process(self):
self.app.run(host='0.0.0.0')
# for local testing
"""
antenna_data = antenna_shared_data()
uav_data = uav_shared_data()
setpoint_data = setpoint_shared_data()
flasky = Flasky(antenna_data, uav_data, setpoint_data)
flasky.start()
"""
```
|
{
"source": "JerryDog/horizon-f-road",
"score": 2
}
|
#### File: test/tests/middleware.py
```python
from mock import patch
import django
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django.http import HttpResponseRedirect # noqa
from django.test.utils import override_settings
from django.utils import timezone
from horizon import exceptions
from horizon import middleware
from horizon.test import helpers as test
class MiddlewareTests(test.TestCase):
def setUp(self):
self._timezone_backup = timezone.get_current_timezone_name()
return super(MiddlewareTests, self).setUp()
def tearDown(self):
timezone.activate(self._timezone_backup)
return super(MiddlewareTests, self).tearDown()
def test_redirect_login_fail_to_login(self):
url = settings.LOGIN_URL
request = self.factory.post(url)
mw = middleware.HorizonMiddleware()
resp = mw.process_exception(request, exceptions.NotAuthorized())
resp.client = self.client
if django.VERSION >= (1, 9):
self.assertRedirects(resp, settings.TESTSERVER + url)
else:
self.assertRedirects(resp, url)
def test_process_response_redirect_on_ajax_request(self):
url = settings.LOGIN_URL
mw = middleware.HorizonMiddleware()
request = self.factory.post(url,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
request.horizon = {'async_messages':
[('error', 'error_msg', 'extra_tag')]}
response = HttpResponseRedirect(url)
response.client = self.client
resp = mw.process_response(request, response)
self.assertEqual(200, resp.status_code)
self.assertEqual(url, resp['X-Horizon-Location'])
def test_timezone_awareness(self):
url = settings.LOGIN_REDIRECT_URL
mw = middleware.HorizonMiddleware()
request = self.factory.get(url)
request.session['django_timezone'] = 'America/Chicago'
mw.process_request(request)
self.assertEqual(
timezone.get_current_timezone_name(), 'America/Chicago')
request.session['django_timezone'] = 'Europe/Paris'
mw.process_request(request)
self.assertEqual(timezone.get_current_timezone_name(), 'Europe/Paris')
request.session['django_timezone'] = 'UTC'
mw.process_request(request)
self.assertEqual(timezone.get_current_timezone_name(), 'UTC')
class OperationLogMiddlewareTest(test.TestCase):
http_host = u'test_host'
http_referer = u'/dashboard/test_http_referer'
def test_middleware_not_used(self):
with self.assertRaises(MiddlewareNotUsed):
middleware.OperationLogMiddleware()
def _test_ready_for_post(self):
url = settings.LOGIN_URL
request = self.factory.post(url)
request.META['HTTP_HOST'] = self.http_host
request.META['HTTP_REFERER'] = self.http_referer
request.POST = {
"username": u"admin",
"password": u"<PASSWORD>"
}
request.user.username = u'test_user_name'
response = HttpResponseRedirect(url)
response.client = self.client
return request, response
def _test_ready_for_get(self):
url = '/dashboard/project/?start=2016-03-01&end=2016-03-11'
request = self.factory.get(url)
request.META['HTTP_HOST'] = self.http_host
request.META['HTTP_REFERER'] = self.http_referer
request.user.username = u'test_user_name'
response = HttpResponseRedirect(url)
response.client = self.client
return request, response
@override_settings(OPERATION_LOG_ENABLED=True)
@patch(('horizon.middleware.operation_log.OperationLogMiddleware.'
'OPERATION_LOG'))
def test_process_response_for_post(self, mock_logger):
olm = middleware.OperationLogMiddleware()
request, response = self._test_ready_for_post()
resp = olm.process_response(request, response)
self.assertTrue(mock_logger.info.called)
self.assertEqual(302, resp.status_code)
log_args = mock_logger.info.call_args[0]
logging_str = log_args[0] % log_args[1]
self.assertTrue(request.user.username in logging_str)
self.assertTrue(self.http_referer in logging_str)
self.assertTrue(settings.LOGIN_URL in logging_str)
self.assertTrue('POST' in logging_str)
self.assertTrue('302' in logging_str)
post_data = ['"username": "admin"', '"password": "********"']
for data in post_data:
self.assertTrue(data in logging_str)
@override_settings(OPERATION_LOG_ENABLED=True)
@override_settings(OPERATION_LOG_OPTIONS={'target_methods': ['GET']})
@patch(('horizon.middleware.operation_log.OperationLogMiddleware.'
'OPERATION_LOG'))
def test_process_response_for_get(self, mock_logger):
olm = middleware.OperationLogMiddleware()
request, response = self._test_ready_for_get()
resp = olm.process_response(request, response)
self.assertTrue(mock_logger.info.called)
self.assertEqual(302, resp.status_code)
log_args = mock_logger.info.call_args[0]
logging_str = log_args[0] % log_args[1]
self.assertTrue(request.user.username in logging_str)
self.assertTrue(self.http_referer in logging_str)
self.assertTrue(request.path in logging_str)
self.assertTrue('GET' in logging_str)
self.assertTrue('302' in logging_str)
@override_settings(OPERATION_LOG_ENABLED=True)
@patch(('horizon.middleware.operation_log.OperationLogMiddleware.'
'OPERATION_LOG'))
def test_process_response_for_get_no_target(self, mock_logger):
"""In default setting, Get method is not logged"""
olm = middleware.OperationLogMiddleware()
request, response = self._test_ready_for_get()
resp = olm.process_response(request, response)
self.assertEqual(0, mock_logger.info.call_count)
self.assertEqual(302, resp.status_code)
@override_settings(OPERATION_LOG_ENABLED=True)
@patch(('horizon.middleware.operation_log.OperationLogMiddleware.'
'OPERATION_LOG'))
def test_process_exception(self, mock_logger):
olm = middleware.OperationLogMiddleware()
request, response = self._test_ready_for_post()
exception = Exception("Unexpected error occurred.")
olm.process_exception(request, exception)
log_args = mock_logger.info.call_args[0]
logging_str = log_args[0] % log_args[1]
self.assertTrue(mock_logger.info.called)
self.assertTrue(request.user.username in logging_str)
self.assertTrue(self.http_referer in logging_str)
self.assertTrue(settings.LOGIN_URL in logging_str)
self.assertTrue('Unexpected error occurred.' in logging_str)
post_data = ['"username": "admin"', '"password": "********"']
for data in post_data:
self.assertTrue(data in logging_str)
```
|
{
"source": "JerryDot/advent-2021-py",
"score": 4
}
|
#### File: fast/day02/day02.py
```python
from typing import Iterable, List, Tuple
"""
----------> (1,0)
|
|
|
|
v
(0,1)
"""
def parse_input() -> List[Tuple[int, int]]:
with open('day02.txt', 'rb') as f:
INPUT = map(lambda x: x.strip(), map(lambda x: x.decode("utf-8"), f.readlines()))
moves = []
for entry in INPUT:
direction, size = entry.split()[0], int(entry.split()[1])
if direction == "forward":
moves.append((size, 0))
elif direction == "backward":
moves.append((-size, 0))
elif direction == "down":
moves.append((0, size))
elif direction == "up":
moves.append((0, -size))
else:
raise Exception("This should not occur")
return moves
def part_one(p_input: List[Tuple[int, int]]) -> int:
position = [0, 0]
for move in p_input:
position[0] += move[0]
position[1] += move[1]
return position[0] * position[1]
def part_two(p_input: List[Tuple[int, int]]) -> int:
position = [0, 0]
aim = 0
for move in p_input:
position[0] += move[0]
position[1] += move[0] * aim
aim += move[1]
return position[0] * position[1]
if __name__ == "__main__":
print(part_one(parse_input()))
print(part_two(parse_input()))
```
#### File: fast/day05/day05.py
```python
from typing import List, Tuple
def parse_input() -> List[str]:
with open('day05.txt', 'rb') as f:
INPUT = f.readlines()
def parse_line(line: bytes) -> Tuple[int, int, int, int]:
coords = line.split(b' -> ')
x_zero, y_zero = map(int, coords[0].split(b','))
x_one, y_one = map(int, coords[1].split(b','))
return x_zero, y_zero, x_one, y_one
return list(map(parse_line, INPUT))
def part_one(input: list) -> int:
world = [ [ 0 for i in range(1000) ] for j in range(1000) ]
for line in input:
if line[0] == line[2]:
lower = min(line[1], line[3])
larger = max(line[1], line[3])
for y in range(lower, larger + 1):
world[line[0]][y] += 1
if line[1] == line[3]:
lower = min(line[0], line[2])
larger = max(line[0], line[2])
for x in range(lower, larger + 1):
world[x][line[1]] += 1
total = 0
for i, x in enumerate(world):
for j, y in enumerate(x):
if y >= 2:
total += 1
return total
def part_two(input: list) -> int:
world = [ [ 0 for i in range(1000) ] for j in range(1000) ]
for line in input:
if line[0] == line[2]:
lower = min(line[1], line[3])
larger = max(line[1], line[3])
for y in range(lower, larger + 1):
world[line[0]][y] += 1
elif line[1] == line[3]:
lower = min(line[0], line[2])
larger = max(line[0], line[2])
for x in range(lower, larger + 1):
world[x][line[1]] += 1
else:
if line[0] < line[2]:
xrange = range(line[0], line[2] + 1)
else:
xrange = range(line[0], line[2] - 1, -1)
if line[1] < line[3]:
yrange = range(line[1], line[3] + 1)
else:
yrange = range(line[1], line[3] - 1, -1)
for x,y in zip(xrange, yrange):
world[x][y] += 1
total = 0
for i, x in enumerate(world):
for j, y in enumerate(x):
if y >= 2:
total += 1
return total
if __name__ == "__main__":
print(part_one(parse_input()))
print(part_two(parse_input()))
print(int('010000101111', 2) * int('111001111010', 2))
```
|
{
"source": "Jerrydotpy/Disgames",
"score": 3
}
|
#### File: Disgames/disgames/errors.py
```python
import discord
from discord.ext import commands
class PathNotFound(commands.CommandError):
"""Error raised when it can't find the path to stockfish_20011801_32bit.exe and or the path provided is wrong"""
def __init__(self):
super().__init__(
"Couldn't find the path to your stockfish_20011801_32bit.exe\n\nPlease head to https://www.dropbox.com/sh/75gzfgu7qo94pvh/AADMl6xkjU9qdx-Q5xeUJMxba/Stockfish%2011?dl=0&subfolder_nav_tracking=1 to download stockfish"
)
```
#### File: Disgames/disgames/__init__.py
```python
from .cog import *
from .constants import *
from discord.ext import commands
from .errors import PathNotFound
from .mixins import TicTacToe, TicTacToeReactions
from typing import NamedTuple
buttons = False
try:
from discord.ui import Button
from discord.ui import View
buttons = True
except ImportError:
pass
__title__ = 'disgames'
__author__ = 'andrewthederp'
__license__ = 'Apache License 2.0'
__copyright__ = 'Copyright 2021-2022 Andrewthederp and MarzaElise'
__version__ = '2.3.0'
def register_commands(
bot, *, ignore: list = [], stockfish_path=None, ttt_reactions=False, button_commands=True
):
if button_commands:
ignore.extend(BUTTON_GAMES if not buttons else NON_BUTTON_GAMES)
else:
ignore.extend(BUTTON_GAMES)
games = []
if ttt_reactions:
ignore.append(TicTacToe)
ignore.append(TicTacToeButtons)
games.append(TicTacToeReactions)
games += [game for game in ALL_GAMES if game not in ignore]
class Games(*games):
def __init__(self, bot):
for cls in games:
cls.__init__(self, bot)
g = Games(bot)
if stockfish_path:
g.stockfish_path = stockfish_path
bot.add_cog(g)
class VersionInfo(NamedTuple):
major: int
minor: int
micro: int
```
#### File: disgames/mixins/chess.py
```python
import discord
from discord.ext import commands
import chess
import os
from stockfish import Stockfish
from pathlib import Path
from ..errors import PathNotFound
class Chess(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.stockfish_path = None
try:
path = os.getcwd().split("\\")[2]
except IndexError:
pass
else:
stockfish_path = sorted(
Path(f"C:\\Users\\{path}").rglob("stockfish_20011801_32bit.exe")
)
if stockfish_path:
self.stockfish_path = stockfish_path[0]
def has_won_chess(self, board, member):
"""Checks if game is over"""
value = None
results = board.result()
if board.is_checkmate():
value = f"Checkmate, Winner: {member.mention} | Score: `{results}`"
elif board.is_stalemate():
value = f"Stalemate | Score: `{results}`"
elif board.is_insufficient_material():
value = (
f"Insufficient material left to continue the game | Score: `{results}`"
)
elif board.is_seventyfive_moves():
value = f"75-moves rule | Score: `{results}`"
elif board.is_fivefold_repetition():
value = f"Five-fold repitition. | Score: `{results}`"
return value
def create_chess_board(self, board, turn, member):
"""Creates the chess embed"""
url = f"http://www.fen-to-image.com/image/64/double/coords/{board.board_fen()}"
e = discord.Embed(
title="Chess",
description="To move a piece get it's current coordinates and the coordinates of where you want it to be, eg: `a2a4`",
color=discord.Color.blurple(),
)
e.add_field(name="Turn", value=turn.mention, inline=False)
e.add_field(
name=f"Legal moves",
value=", ".join([f"`{str(i)}`" for i in board.legal_moves]) or 'No legal moves',
inline=False,
)
e.add_field(name="Check", value=board.is_check(), inline=False)
if board.halfmove_clock >= 45:
e.add_field(name="Half move clock", value=board.halfmove_clock)
gameOver = self.has_won_chess(board, member)
if gameOver:
e.description = "GAME OVER"
e.add_field(name="Winner", value=gameOver)
e.set_image(url=url)
e.set_footer(
text='Send "end"/"stop"/"cancel" to stop the game | "back" to go back a step | "re"/"re-send"/"resend" to send a new embed'
)
return e
def get_best_chess_move(self, stockfish, board):
"""Gets the best move using the stockfish_20011801_32bit.exe"""
stockfish.set_fen_position(board.fen())
return stockfish.get_best_move()
@commands.command("chess")
async def chess(self, ctx, member: discord.Member = None):
"""a board game of strategic skill for two players, played on a chequered board on which each playing piece is moved according to precise rules. The object is to put the opponent's king under a direct attack from which escape is impossible"""
if member == None:
if not self.stockfish_path:
raise PathNotFound
await ctx.send("Please enter a a difficulty level from 0-20")
smort_level = await self.bot.wait_for(
"message",
check=lambda m: m.author == ctx.author and m.channel == ctx.channel,
)
try:
smort_level = int(smort_level.content)
except ValueError:
return await ctx.send("That's not a number")
else:
if smort_level not in range(21):
return await ctx.send("difficulty needs to be in 0-20")
try:
stockfish = Stockfish(
str(self.stockfish_path), parameters={"Skill Level": smort_level}
)
except (AttributeError, FileNotFoundError):
raise PathNotFound
board = chess.Board()
turn = ctx.author
e = self.create_chess_board(
board, turn, member if turn == ctx.author else ctx.author
)
msg = await ctx.send(embed=e)
while True:
if turn == ctx.author:
def check(m):
try:
if board.parse_uci(m.content.lower()):
return m.author == turn and m.channel == ctx.channel
else:
return False
except ValueError:
return m.content.lower() in ['end','stop','cancel','re','re-send','resend','back']
inp = await self.bot.wait_for("message",check=check)
if inp.content.lower() in ["stop", "cancel", "end"]:
return await ctx.send("Game ended", delete_after=5)
elif inp.content.lower() == "back":
try:
board.pop()
turn = member if turn == ctx.author else ctx.author
continue
except IndexError:
await ctx.send("Can't go back", delete_after=5)
continue
elif inp.content.lower() in ['re','re-send','resend']:
e = self.create_chess_board(board, turn, member if turn == ctx.author else ctx.author)
msg = await ctx.send(embed=e)
continue
else:
board.push_uci(inp.content.lower())
try:
await inp.delete()
except discord.Forbidden:
pass
else:
move = self.get_best_chess_move(stockfish, board)
move = chess.Move.from_uci(str(move))
board.push(move)
turn = ctx.bot.user if turn == ctx.author else ctx.author
won = self.has_won_chess(
board, ctx.bot.user if turn == ctx.author else ctx.author
)
if won:
e = self.create_chess_board(
board, turn, ctx.bot.user if turn == ctx.author else ctx.author
)
return await ctx.send(embed=e)
e = self.create_chess_board(
board, turn, ctx.bot.user if turn == ctx.author else ctx.author
)
await msg.edit(embed=e)
elif member.bot or member == ctx.author:
return await ctx.send(
f"Invalid Syntax: Can't play against {member.display_name}"
)
else:
board = chess.Board()
turn = ctx.author
e = self.create_chess_board(
board, turn, member if turn == ctx.author else ctx.author
)
msg = await ctx.send(embed=e)
while True:
def check(m):
try:
if board.parse_uci(m.content.lower()) or m.content.lower() in ['end','stop','cancel','re','re-send','resend','back']:
return m.author == turn and m.channel == ctx.channel
else:
return False
except ValueError:
return m.content.lower() in ['end','stop','cancel','re','re-send','resend','back']
inp = await ctx.bot.wait_for(
"message",
check=check
)
if inp.content.lower() in ["stop", "end", "cancel"]:
return await ctx.send("Game ended", delete_after=5)
elif inp.content.lower() == "back":
try:
board.pop()
except IndexError:
await ctx.send("Can't go back", delete_after=5)
continue
elif inp.content.lower() in ['re','re-send','resend']:
e = self.create_chess_board(board, turn, member if turn == ctx.author else ctx.author)
msg = await ctx.send(embed=e)
continue
else:
if inp.author == turn:
board.push_uci(inp.content.lower())
try:
await inp.delete()
except discord.Forbidden:
pass
else:
continue
turn = member if turn == ctx.author else ctx.author
won = self.has_won_chess(
board, member if turn == ctx.author else ctx.author
)
if won:
e = self.create_chess_board(
board, turn, member if turn == ctx.author else ctx.author
)
return await ctx.send(embed=e)
e = self.create_chess_board(
board, turn, member if turn == ctx.author else ctx.author
)
await msg.edit(embed=e)
```
#### File: disgames/mixins/snl.py
```python
import random
import discord
from discord.ext import commands
class SNL(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.snakes_and_ladders = {"s": [(7,2), (6,6), (4,8), (3, 1), (1, 8), (0, 5)], 'l':[(9,9), (8, 4), (7, 0), (6, 5), (5, 3), (2, 0)]}
def format_snl_board(self, board):
dct = {' ':'⬛', 's':'🐍', 'l':'🪜', 'p1':'🔴', 'p2':'🟡', 'p3':'🟢', 'p4':'🔵'}
lst = []
for row in board:
lst.append(''.join([dct[column] for column in row]))
return '\n'.join(lst)
def create_board(self):
board = [[' ' for _ in range(10)] for _ in range(10)]
for key in self.snakes_and_ladders:
for indx in self.snakes_and_ladders[key]:
board[indx[0]][indx[1]] = key
board[9][0] = "p1"
return board
@commands.command()
async def snl(self, ctx, players: commands.Greedy[discord.Member]=[]):
for player in players:
if player == ctx.author:
players.remove(player)
if not players:
players.append(self.bot.user)
players.append(ctx.author)
if len(players) > 4:
return await ctx.send("Can't have more than 4 people playing")
tokens = {'p1':'🔴', 'p2':'🟡', 'p3':'🟢', 'p4':'🔵'}
indexes = {}
for player in players:
indexes[player] = [9,0]
board = self.create_board()
player_string = f' '.join([f"{player.mention}: {tokens['p'+str(num)]}" for num, player in enumerate(players, start=1)])
embed = discord.Embed(title='Snakes and Ladders', description=f"React to '🎲' to roll your dice\n\n{player_string}\n{self.format_snl_board(board)}", color=discord.Color.blurple())
msg = await ctx.send(embed=embed)
await msg.add_reaction('🎲')
await msg.add_reaction('🏳️')
current_player = 0
leaderboard = []
while True:
if len(players) == 1:
leaderboard.append(players[0])
break
player = players[current_player]
index = indexes[player]
number = random.randint(1,6)
await msg.edit(embed = discord.Embed(title='Snakes and Ladders', description=f"React to '🎲' to roll your dice\n\n{player_string}\nturn: `{player.display_name}`\n{self.format_snl_board(board)}", color=discord.Color.blurple()))
if not player.bot:
reaction, user = await self.bot.wait_for('reaction_add', check = lambda r, u: str(r) in ['🎲','🏳️'] and r.message == msg and u in players)
try:
await msg.remove_reaction(str(reaction), user)
except discord.Forbidden:
pass
if str(reaction) == '🏳️':
players.remove(user)
await ctx.send(f"{user.mention} leaves")
else:
if user != player:
continue
await ctx.send(f'{player.mention} rolled a {number}', delete_after=5)
board[index[0]][index[1]] = ' '
past_number = index[1]
if index[0]%2:
index[1] += number
else:
if index[0] == 0 and (index[1] - number) < 0:
pass
else:
index[1] -= number
if (index[1]) > 9 or (index[1]) < 0 and index[1] != 0:
index[0] -= 1
if index[0]%2:
index[1] = (number-past_number)-1
else:
index[1] = 10-((past_number+number)-9)
dct = {'72':[9, 1],'66':[8, 5], '48':[7, 9], '31':[5, 2], '18':[3, 7], '05':[2, 6], '99':[6, 7], '84':[6, 3], '70':[5, 0], '65':[4, 6], '53':[2, 4], '20':[0, 1]}
for key in self.snakes_and_ladders:
for indx in self.snakes_and_ladders[key]:
board[indx[0]][indx[1]] = key
if str(index[0])+str(index[1]) in dct:
await ctx.send(f"{player.mention} has {'hit a snake' if tuple(index) in self.snakes_and_ladders['s'] else 'went up a ladder'}", delete_after=5)
indexes[player] = dct[str(index[0])+str(index[1])]
index = indexes[player]
elif index == [0, 0]:
await ctx.send(f"{player.mention} won!!!")
players.remove(player)
leaderboard.append(player)
current_player += 1
if current_player == len(players):
current_player = 0
for num, player in enumerate(players, start=1):
board[indexes[player][0]][indexes[player][1]] = 'p'+str(num)
winning_string = ''
for num, player in enumerate(leaderboard, start=1):
medal = None
if num == 1:
medal = '🥇'
elif num == len(leaderboard):
medal = 'Looser'
elif num == 2:
medal = '🥈'
elif num == 3:
medal = '🥉'
winning_string += f'\n{player.display_name}: {medal}'
await ctx.send(winning_string)
```
#### File: disgames/mixins/uno.py
```python
import discord
from discord.ext import commands
import random
colors = ['red','yellow','blue','green','']
numbers = ['draw2','reverse','skip','wild','draw4'] + [str(i) for i in range(1,10)] # Names could be deceiving
class Card:
def __init__(self, color, number):
if number in ['wild','draw4']:
self.color = ''
else:
self.color = color
self.col = color
self.number = number
self.name = self.color+number
self.type = 'normal' if number.isdecimal() else number
class Player:
def __init__(self):
self.cards = []
for _ in range(5):
self.cards.append(Card(random.choice(colors), random.choice(numbers)))
self.inv = [card.name for card in self.cards]
self.play = True
class Uno(commands.Cog):
def __init__(self, bot):
self.bot = bot
def convert_to_card(self, string):
try:
if int(string[-1]) in range(1, 10) and string[:-1] in colors:
return Card(string[:-1], string[-1])
else:
raise ValueError
except ValueError:
for special in ['draw2', 'reverse', 'skip','wild','draw4']:
if string.endswith(special) and string[:-len(special)] in colors:
return Card(string[:-len(special)], special)
def cycle_uno_turns(self, lst):
lst.append(lst[0])
lst.pop(0)
return lst, lst[0]
def has_won_uno(self, player):
return not len(player.cards)
@commands.command()
async def uno(self, ctx, players:commands.Greedy[discord.Member]):
for player in players:
if player.bot or player == ctx.author or not isinstance(player, discord.Member) or player in players:
players.remove(player)
if len(players) > 4:
return await ctx.send("A maximum of 5 people that can play at the same time")
elif len(players) <= 0:
return await ctx.send("You need people to play with")
decks = {}
players.insert(0, ctx.author)
turn = players[0]
for player in players:
decks[player] = Player()
top_card = Card(random.choice(colors), random.choice(numbers))
while top_card.number in ['draw2','reverse','skip','wild','draw4']:
top_card = Card(random.choice(colors), random.choice(numbers))
send = True
while True:
if len(players) <= 1:
return await ctx.send("Gave over!")
for player in players:
if self.has_won_uno(decks[player]):
for player in players:
await player.send(f"{player.display_name} won uno!")
await ctx.send(f"{player.mention} won uno!")
players.remove(player)
if not decks[turn].play:
for player in players:
await player.send(f"{turn.display_name}'s turn has been skipped")
decks[turn].play = True
players, turn = self.cycle_uno_turns(players)
continue
if send:
embed = discord.Embed(title=f'{turn.display_name} Inventory', description='\n'.join(decks[turn].inv), color=discord.Color.blurple())
for player in players:
try:
await player.send(f'turn: {turn.display_name}\nThe top card is '+top_card.name, embed=embed if player == turn else None)
except discord.Forbidden:
return await ctx.send(f"I was unable to dm {player.mention}")
send = False
inp = await self.bot.wait_for('message', check = lambda m: m.author in players and not m.guild)
if inp.content.lower() in ['inv','cards','deck']:
embed = discord.Embed(title=f'{inp.author.display_name} Inventory', description='\n'.join(decks[inp.author].inv), color=discord.Color.blurple())
await inp.author.send(embed=embed)
continue
elif inp.content.lower() in ['end','cancel','stop']:
players.remove(inp.author)
for player in players:
await player.send(f"{inp.author.display_name} leaves the game")
continue
else:
if inp.author != turn:
continue
if inp.content.lower() == 'draw':
card_drew = Card(random.choice(colors), random.choice(numbers))
await inp.author.send(f"You drew a {card_drew.name}")
decks[turn].inv.append(card_drew.name)
decks[turn].cards.append(card_drew)
elif inp.content.lower() not in decks[turn].inv:
continue
else:
card = self.convert_to_card(inp.content.lower())
if not card:
await inp.author.send("That's an invalid card name")
continue
if card.type == 'normal':
if card.color == top_card.color or card.number == top_card.number:
top_card = card
send = True
for thing in decks[turn].cards:
if thing.name == card.name:
decks[turn].inv.remove(thing.name)
decks[turn].cards.remove(thing)
break
for player in players:
await player.send(f"{turn.display_name} played a {card.name}")
players, turn = self.cycle_uno_turns(players)
else:
await inp.author.send("That card cannot be played")
elif card.type == 'draw2':
if card.color == top_card.color or card.number == top_card.number:
top_card = card
send = True
next_turn = players[1]
for _ in range(2):
card_drew = Card(random.choice(colors), random.choice(numbers))
decks[next_turn].inv.append(card_drew.name)
decks[next_turn].cards.append(card_drew)
await next_turn.send(f'You drew a {card_drew.name}')
decks[next_turn].play = False
for thing in decks[turn].cards:
if thing.name == card.name:
decks[turn].inv.remove(thing.name)
decks[turn].cards.remove(thing)
break
for player in players:
await player.send(f"{turn.display_name} played a {card.name}")
players, turn = self.cycle_uno_turns(players)
else:
await inp.author.send("That card cannot be played")
elif card.type == 'reverse':
if card.color == top_card.color or card.number == top_card.number:
top_card = card
send = True
for thing in decks[turn].cards:
if thing.name == card.name:
decks[turn].inv.remove(thing.name)
decks[turn].cards.remove(thing)
break
turn = players[-1]
players = players[::-1]
for player in players:
await player.send(f'{inp.author.display_name} has reversed the play order')
else:
await inp.author.send("That card cannot be played")
elif card.type == 'skip':
if card.color == top_card.color or card.number == top_card.number:
top_card = card
send = True
next_turn = players[1]
decks[next_turn].play = False
for thing in decks[turn].cards:
if thing.name == card.name:
decks[turn].inv.remove(thing.name)
decks[turn].cards.remove(thing)
break
for player in players:
await player.send(f"{turn.display_name} played a {card.name}")
players, turn = self.cycle_uno_turns(players)
else:
await inp.author.send("That card cannot be played")
elif card.type == 'wild':
await inp.author.send("Send the color to change to")
inp = await self.bot.wait_for('message', check=lambda m: m.author == inp.author and m.guild is None)
while inp.content.lower() not in colors:
await inp.author.send("Invalid, send the color to change to")
inp = await self.bot.wait_for('message', check=lambda m: m.author == inp.author and m.guild is None)
top_card = Card(inp.content, '')
send = True
for thing in decks[turn].cards:
if thing.name == card.name:
decks[turn].inv.remove(thing.name)
decks[turn].cards.remove(thing)
break
for player in players:
await player.send(f"{turn.display_name} played a {card.name}")
players, turn = self.cycle_uno_turns(players)
elif card.type == 'draw4':
await inp.author.send("Send the color to change to")
inp = await self.bot.wait_for('message', check=lambda m: m.author == inp.author and m.guild is None)
while inp.content not in colors:
await inp.author.send("Invalid, send the color to change to")
inp = await self.bot.wait_for('message', check=lambda m: m.author == inp.author and m.guild is None)
top_card = Card(inp.content, '')
send = True
next_turn = players[1]
for _ in range(4):
card_drew = Card(random.choice(colors), random.choice(numbers))
decks[next_turn].inv.append(card_drew.name)
decks[next_turn].cards.append(card_drew)
await next_turn.send(f'You drew a {card_drew.name}')
decks[next_turn].play = False
for thing in decks[turn].cards:
if thing.name == card.name:
decks[turn].inv.remove(thing.name)
decks[turn].cards.remove(thing)
break
for player in players:
await player.send(f"{turn.display_name} played a {card.name}")
players, turn = self.cycle_uno_turns(players)
```
|
{
"source": "Jerrydotpy/EpikCord.py",
"score": 2
}
|
#### File: EpikCord.py/examples/message.py
```python
from EpikCord import Client,Intents,Messageable, Embed
intents = Intents().guilds.guild_members.guild_messages.direct_messages
client = Client("your_token", intents)
@client.event
async def message_create(message):
if message.author.id == client.user.id:
return
if message.content == "example test":
message.channel = Messageable(client, message.channel_id)
await message.channel.send(content="hello, chat testing")
client.login()
```
|
{
"source": "jerryduan07/gametime",
"score": 3
}
|
#### File: gametime/src/analyzer.py
```python
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
import bz2
import os
import pickle
import random
import shutil
import time
from copy import deepcopy
from numpy import dot, exp, eye, genfromtxt, savetxt
from numpy.linalg import det, inv, slogdet
import cilHelper
import inliner
import loopHandler
import merger
import nxHelper
import phoenixHelper
import pulpHelper
import networkx as nx
from defaults import config, logger
from fileHelper import createDir, removeAllExcept, removeFile
from gametimeError import GameTimeError
from nxHelper import Dag
from path import Path
from pathGenerator import PathGenerator
from smt.query import readQueryFromFile, Satisfiability
class Analyzer(object):
"""Maintains information about the code being analyzed, such as
the name of the file that contains the code being analyzed
and the basis paths of the code.
Attributes:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
"""
def __init__(self, projectConfig):
### CONFIGURATIONS ###
#: :class:`~gametime.projectConfiguration.ProjectConfiguration` object
#: that represents the configuration of a GameTime project.
self.projectConfig = projectConfig
### GRAPH INFORMATION ###
#: Data structure for the DAG of the code being analyzed.
self.dag = Dag()
### PATHS INFORMATION ###
#: Dimension of the vector representing each path.
self.pathDimension = 0
#: Basis matrix.
self.basisMatrix = None
#: Set whose elements are lists of edges that must not be taken
#: together along any path through the DAG. For example, the element
#: [e1, e2] means "if you take e1, you cannot take e2" and
#: "if you take e2, you cannot take e1".
self.pathExclusiveConstraints = []
#: List whose elements are lists of edges that must be taken together,
#: if at least one is taken along a path through the DAG. For example,
#: the element [e1, e2] means "if you take e1, then you take e2".
self.pathBundledConstraints = []
# Number of `bad' rows in the basis matrix.
self.numBadRows = 0
# List of the Path objects associated with all basis paths
# generated so far.
self.basisPaths = []
# List of lists, each of which is a list of IDs of the nodes in
# the DAG along each basis path. Each ID is a string. The lists are
# arranged in the same order as the Path objects associated with
# the basis paths are arranged in the `basisPaths' list.
# This list is maintained for efficiency purposes.
self.basisPathsNodes = []
# Specify default parameters for the values used with
# --ob_extraction flag. The values are outputted only
# when the flag is used.
# Value of mu_max computed for the observed measurements
self.inferredMuMax = 0
# The in predictions is error is 2 * inferredMuMax * errorScaleFactor
self.errorScaleFactor = 0
# Finally, preprocess the file before analysis.
self._preprocess()
def _preprocess(self):
"""Preprocesses the file before analysis. The preprocessing steps are:
1. Create a temporary directory that will contain the files
generated during analysis.
2. Copy the source file being analyzed into this temporary directory.
3. Run CIL on the copied source file to perform, for example, loop
unrolling and function inlining.
"""
# Check if the file to be analyzed exists.
origFile = self.projectConfig.locationOrigFile
projectTempDir = self.projectConfig.locationTempDir
if not os.path.exists(origFile):
shutil.rmtree(projectTempDir)
errMsg = "File to analyze not found: %s" % origFile
raise GameTimeError(errMsg)
# Remove any temporary directory created during a previous run
# of the same GameTime project, and create a fresh new
# temporary directory.
if os.path.exists(projectTempDir):
if self.projectConfig.UNROLL_LOOPS:
# If a previous run of the same GameTime project produced
# a loop configuration file, and the current run involves
# unrolling the loops that are configured in the file,
# do not remove the file.
removeAllExcept([config.TEMP_LOOP_CONFIG], projectTempDir)
else:
removeAllExcept([], projectTempDir)
else:
os.mkdir(projectTempDir)
# Make a temporary copy of the original file to preprocess.
preprocessedFile = self.projectConfig.locationTempFile
shutil.copyfile(origFile, preprocessedFile)
# Preprocessing pass: merge other source files.
if len(self.projectConfig.merged) > 0:
self._runMerger()
# Preprocessing pass: unroll loops.
if self.projectConfig.UNROLL_LOOPS:
self._runLoopUnroller()
# Preprocessing pass: inline functions.
if len(self.projectConfig.inlined) > 0:
self._runInliner()
# Preprocessing pass: run the file through CIL once more,
# to reduce the C file to the subset of constructs used by CIL
# for ease of analysis.
self._runCil()
# We are done with the preprocessing.
logger.info("Preprocessing complete.")
logger.info("")
### PREPROCESSING HELPER FUNCTIONS ###
def _runMerger(self):
"""As part of preprocessing, runs CIL on the source file under
analysis to merge other source files. A copy of the file that
results from the CIL preprocessing is made and renamed for use by
other preprocessing phases, and the file itself is renamed and
stored for later perusal.
"""
preprocessedFile = self.projectConfig.locationTempFile
# Infer the name of the file that results from the CIL preprocessing.
cilFile = "%s.cil.c" % self.projectConfig.locationTempNoExtension
logger.info("Preprocessing the file: merging other source files...")
if merger.runMerger(self.projectConfig):
errMsg = "Error running the merger."
raise GameTimeError(errMsg)
else:
shutil.copyfile(cilFile, preprocessedFile)
shutil.move(cilFile,
"%s%s.c" % (self.projectConfig.locationTempNoExtension,
config.TEMP_SUFFIX_MERGED))
if not self.projectConfig.debugConfig.KEEP_CIL_TEMPS:
cilHelper.removeTempCilFiles(self.projectConfig)
logger.info("")
logger.info("Other source files merged.")
def _runLoopUnroller(self):
"""As part of preprocessing, runs CIL on the source file under
analysis to unroll loops. A copy of the file that results from
the CIL preprocessing is made and renamed for use by other
preprocessing phases, and the file itself is renamed and
stored for later perusal.
"""
preprocessedFile = self.projectConfig.locationTempFile
# Infer the name of the file that results from the CIL preprocessing.
cilFile = "%s.cil.c" % self.projectConfig.locationTempNoExtension
logger.info("Preprocessing the file: unrolling loops in the code...")
if loopHandler.runUnroller(self.projectConfig):
errMsg = "Error running the loop unroller."
raise GameTimeError(errMsg)
else:
shutil.copyfile(cilFile, preprocessedFile)
shutil.move(cilFile,
"%s%s.c" % (self.projectConfig.locationTempNoExtension,
config.TEMP_SUFFIX_UNROLLED))
if not self.projectConfig.debugConfig.KEEP_CIL_TEMPS:
cilHelper.removeTempCilFiles(self.projectConfig)
logger.info("")
logger.info("Loops in the code have been unrolled.")
def _runInliner(self):
"""As part of preprocessing, runs CIL on the source file under
analysis to inline functions. A copy of the file that results from
the CIL preprocessing is made and renamed for use by other
preprocessing phases, and the file itself is renamed and
stored for later perusal.
"""
preprocessedFile = self.projectConfig.locationTempFile
# Infer the name of the file that results from the CIL preprocessing.
cilFile = "%s.cil.c" % self.projectConfig.locationTempNoExtension
logger.info("Preprocessing the file: inlining...")
if inliner.runInliner(self.projectConfig):
errMsg = "Error running the inliner."
raise GameTimeError(errMsg)
else:
shutil.copyfile(cilFile, preprocessedFile)
shutil.move(cilFile,
"%s%s.c" % (self.projectConfig.locationTempNoExtension,
config.TEMP_SUFFIX_INLINED))
if not self.projectConfig.debugConfig.KEEP_CIL_TEMPS:
cilHelper.removeTempCilFiles(self.projectConfig)
logger.info("")
logger.info("Inlining complete.")
def _runCil(self):
"""As part of preprocessing, runs CIL on the source file under
analysis to to reduce the C file to the subset of constructs
used by CIL for ease of analysis. The file that results from
the CIL preprocessing is renamed for use by the rest of
the GameTime toolflow. Another copy, with preprocessor directives
that maintain the line numbers from the original source file
(and other merged source files), is also made.
"""
preprocessedFile = self.projectConfig.locationTempFile
# Infer the name of the file that results from the CIL preprocessing.
cilFile = "%s.cil.c" % self.projectConfig.locationTempNoExtension
logger.info("Preprocessing the file: running CIL to produce code "
"simplified for analysis...")
if cilHelper.runCil(self.projectConfig, keepLineNumbers=True):
errMsg = "Error running CIL in the final preprocessing phase."
raise GameTimeError(errMsg)
else:
shutil.move(cilFile,
"%s%s.c" % (self.projectConfig.locationTempNoExtension,
config.TEMP_SUFFIX_LINE_NUMS))
if not self.projectConfig.debugConfig.KEEP_CIL_TEMPS:
cilHelper.removeTempCilFiles(self.projectConfig)
if cilHelper.runCil(self.projectConfig):
errMsg = "Error running CIL in the final preprocessing phase."
raise GameTimeError(errMsg)
else:
shutil.move(cilFile, preprocessedFile)
if not self.projectConfig.debugConfig.KEEP_CIL_TEMPS:
cilHelper.removeTempCilFiles(self.projectConfig)
logger.info("")
logger.info("Final preprocessing phase complete.")
### BASIS MATRIX FUNCTIONS ###
def _initBasisMatrix(self):
"""Initializes the basis matrix."""
self.basisMatrix = eye(self.pathDimension)
if self.projectConfig.RANDOMIZE_INITIAL_BASIS:
self._randomizeBasisMatrix()
def _randomizeBasisMatrix(self):
"""Randomizes the rows of the basis matrix using
a Fisher-Yates shuffle.
Precondition: The basis matrix has been initialized.
"""
for i in xrange(self.pathDimension, 0, -1):
j = random.randrange(i)
self._swapBasisMatrixRows(i-1, j)
def _swapBasisMatrixRows(self, i, j):
"""Swaps two rows of the basis matrix.
@param i Index of one row to swap.
@param j Index of other row to swap.
"""
rowToSwapOut = self.basisMatrix[j]
rowToSwapIn = self.basisMatrix[i]
rowLen = len(rowToSwapOut)
tempRowToSwapOut = [0] * rowLen
for k in xrange(rowLen):
tempRowToSwapOut[k] = rowToSwapOut[k]
for k in xrange(rowLen):
rowToSwapOut[k] = rowToSwapIn[k]
rowToSwapIn[k] = tempRowToSwapOut[k]
def saveBasisMatrix(self, location=None):
"""Saves the basis matrix to a file for future analysis.
@param location Location of the file. If this is not provided,
the basis matrix will be stored in a temporary file located in
the temporary directory used by GameTime for its analysis.
"""
location = location or os.path.join(self.projectConfig.locationTempDir,
config.TEMP_BASIS_MATRIX)
try:
savetxt(location, self.basisMatrix, fmt="%01.1f")
except EnvironmentError as e:
errMsg = "Error saving the basis matrix: %s" % e
raise GameTimeError(errMsg)
def loadBasisMatrix(self, location=None):
"""Loads the basis matrix from a file.
@param location Location of the file. If this is not provided,
the basis file will be loaded from a temporary file located in
the temporary directory used by GameTime for its analysis.
"""
location = location or os.path.join(self.projectConfig.locationTempDir,
config.TEMP_BASIS_MATRIX)
try:
self.basisMatrix = genfromtxt(location, delimiter=" ")
except EnvironmentError as e:
errMsg = "Error loading the basis matrix: %s" % e
raise GameTimeError(errMsg)
### GRAPH FUNCTIONS ###
def createDag(self):
"""Creates the DAG corresponding to the code being analyzed
and dumps the DAG, in DOT format, to a temporary file for further
analysis. This method also stores a local copy in a data
structure that represents the DAG.
"""
logger.info("Generating the DAG and associated information...")
if phoenixHelper.createDag(self.projectConfig):
errMsg = "Error running the Phoenix program analyzer."
raise GameTimeError(errMsg)
location = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_DAG)
self.loadDagFromDotFile(location)
numEdgesReduced = len(self.dag.edgesReduced)
self.pathDimension = self.dag.numEdges - self.dag.numNodes + 2
if numEdgesReduced != self.pathDimension:
errMsg = ("The number of non-special edges is different "
"from the dimension of the path.")
raise GameTimeError(errMsg)
logger.info("DAG generated.")
if nxHelper.hasCycles(self.dag):
logger.warn("The control-flow graph has cycles.")
self._runLoopDetector()
else:
logger.info("The control-flow graph has %d nodes and %d edges, "
"with at most %d possible paths." %
(self.dag.numNodes, self.dag.numEdges,
self.dag.numPaths))
logger.info("There are at most %d possible basis paths." %
self.pathDimension)
logger.info("")
def loadDagFromDotFile(self, location):
"""Loads the DAG that corresponds to the code being analyzed
from a DOT file.
@param location Location of the file.
"""
self.dag = nxHelper.constructDag(location)
# Reset variables of this "Analyzer" object.
self.resetPathExclusiveConstraints()
self.resetPathBundledConstraints()
def writeDagToDotFile(self, location=None, annotateEdges=False,
highlightedPath=None, highlightColor="red"):
"""Writes the DAG that corresponds to the code being analyzed
to a DOT file.
@param location Location of the file. If this is not provided,
the basis matrix will be stored in a temporary file located in
the temporary directory used by GameTime for its analysis.
@param annotateEdges Whether each edge should be annotated with
its weight, when the file is processed by a visualization tool.
@param highlightedPath "Path" object whose corresponding edges
will be highlighted when the DOT file is processed by
a visualization tool. If this argument is not provided, no edges
will be highlighted.
@param highlightColor Color of the highlighted edges. This argument
can be any value that is legal in the DOT format; by default, its value
is "red". If the "highlightedPath" argument is not provided,
this argument is ignored.
"""
location = location or os.path.join(self.projectConfig.locationTempDir,
config.TEMP_DAG_WEIGHTS)
edgeWeights = [("%g" % edgeWeight) for edgeWeight
in self.dag.edgeWeights]
edgesToWeights = (dict(zip(self.dag.allEdges, edgeWeights))
if annotateEdges else None)
nxHelper.writeDagToDotFile(self.dag, location,
self.projectConfig.func, edgesToWeights,
(Dag.getEdges(highlightedPath.nodes)
if highlightedPath else None),
highlightColor)
def _runLoopDetector(self):
"""Runs the loop detector on the code under analysis."""
logger.info("Detecting loops in the code...")
if loopHandler.runDetector(self.projectConfig):
errMsg = "Error running the loop detector."
raise GameTimeError(errMsg)
else:
if not self.projectConfig.debugConfig.KEEP_CIL_TEMPS:
cilHelper.removeTempCilFiles(self.projectConfig)
logger.info("")
logger.info("Loops in the code have been detected.")
logger.info("Before proceeding, please modify the loop configuration "
"file in the temporary directory generated by GameTime "
"for this analysis, and then run the loop unroller "
"to unroll these loops.")
def _compressPath(self, pathEdges):
"""Compresses the path provided: this method converts
the provided path to a 0-1 vector that is 1 if a
'non-special' edge is along the path, and 0 otherwise.
@param pathEdges Edges along the path to represent with
'non-special' edges.
@retval 0-1 vector that is 1 if a `non-special' edge is along
the path, and 0 otherwise.
"""
return [(1.0 if edge in pathEdges else 0.0)
for edge in self.dag.edgesReduced]
### PATH GENERATION FUNCTIONS ###
def addPathExclusiveConstraint(self, edges):
"""Adds the edges provided to the list of path-exclusive
constraints, if not already present. These edges must not
be taken together along any path through the DAG.
@param edges List of edges to add to the list of
path-exclusive constraints.
"""
if edges not in self.pathExclusiveConstraints:
self.pathExclusiveConstraints.append(edges)
def addPathBundledConstraint(self, edges):
"""Adds the edges provided to the list of path-bundled
constraints, if not already present. These edges must
be taken together if at least one of them is taken along
a path through the DAG.
@param edges List of edges to add to the list of path-bundled
constraints.
"""
if edges not in self.pathBundledConstraints:
self.pathBundledConstraints.append(edges)
def resetPathExclusiveConstraints(self):
"""Resets the path-exclusive constraints."""
self.pathExclusiveConstraints = []
def resetPathBundledConstraints(self):
"""Resets the path-bundled constraints."""
self.pathBundledConstraints = []
def generateOvercompleteBasis(self, k):
"""Generates an overcomplete basis so that each feasible path can be
written as a liner combination of the paths in the basis so that the
L1 norm is at most 'k'. This method is for testing purposes
only as it exhaustively generates all paths in the graph!. Use the
function below for a scalable version.
"""
logger.info("Generating all paths")
paths = nx.all_simple_paths(self.dag, self.dag.source, self.dag.sink)
feasible = list(paths)
logger.info("Find minimal overcomplete basis")
pulpHelper.findMinimalOvercompleteBasis(self, feasible, k)
def iterativelyFindOvercompleteBasis(self, initialPaths, k):
"""Generates overcomplete basis such the the lenth of the longest
feasible path is at most 'k'. The basis is computed by iteratively
extending the basis with the longest path. Parameter 'initialPaths'
specifies the set of paths the iterative algorithm begins with. This
can be any set of paths, in practice we use the paths generated by
the standard algorithm.
"""
infeasible = []
edgeNodePaths = initialPaths
optimalBound = 1
startTime = time.clock()
while True:
beforeTime = time.clock()
length, path, ilpProblem = \
pulpHelper.findWorstExpressiblePath(self, self.basisPaths, 0)
afterTime = time.clock()
logger.info("Found a candidate path of length %.2f in %d seconds" %
(length, afterTime - beforeTime))
optimalBound = length
# if the length of the longest path is within the given bound, stop
if (length <= k): break
candidatePathNodes = path
candidatePathEdges = Dag.getEdges(candidatePathNodes)
logger.info("Checking if the found path is feasible...")
resultPath = self.checkFeasibility(candidatePathNodes,
ilpProblem)
querySatisfiability = resultPath.smtQuery.satisfiability
if querySatisfiability == Satisfiability.SAT:
logger.info("Path is feasible.")
self.basisPaths.append(resultPath)
edgeNodePaths.append(candidatePathEdges)
elif querySatisfiability == Satisfiability.UNSAT:
logger.info("Path is infeasible.")
logger.info("Finding the edges to exclude...")
infeasible.append(candidatePathEdges)
unsatCore = resultPath.smtQuery.unsatCore
excludeEdges = resultPath.getEdgesForConditions(unsatCore)
logger.info("Edges to be excluded found.")
logger.info("Adding a constraint to exclude "
"these edges...")
if len(excludeEdges) > 0:
self.addPathExclusiveConstraint(excludeEdges)
else:
self.addPathExclusiveConstraint(candidatePathEdges)
logger.info("Constraint added.")
logger.info("Found overcomplete basis of size %d, yielding bound %.2f" %
(len(edgeNodePaths), optimalBound))
self.basisPathsNodes = [path.nodes for path in self.basisPaths]
return self.basisPaths
def generateBasisPaths(self):
"""Generates a list of "Path" objects, each of which represents
a basis path of the code being analyzed. The basis "Path" objects
are regenerated each time this method is called.
@retval List of basis paths of the code being analyzed, each
represented by an object of the "Path" class.
"""
basisPaths = []
if nxHelper.hasCycles(self.dag):
logger.warn("Loops in the code have been detected.")
logger.warn("No basis paths have been generated.")
return []
logger.info("Generating the basis paths...")
logger.info("")
startTime = time.clock()
logger.info("Initializing the basis matrix...")
self._initBasisMatrix()
logger.info("Basis matrix initialized to")
logger.info(self.basisMatrix)
logger.info("")
logger.info("There are a maximum of %d possible basis paths." %
self.pathDimension)
logger.info("")
def onExit(startTime, infeasible):
"""Helper function that is called when this method is about to
return the basis Path objects, and performs the appropriate
pre-exit cleanup. This inner function will be used in two
places below, and is defined once to keep the code neat,
to prevent deeper indentation, and to reduce confusion.
@param startTime Time when the generation of basis Path objects
was started.
@retval List of basis paths of the code being analyzed, each
represented by an object of the Path class.
"""
self.basisPaths = basisPaths
self.basisPathsNodes = [path.nodes for path in basisPaths]
#self.resetPathExclusiveConstraints()
logger.info("Time taken to generate paths: %.2f seconds." %
(time.clock() - startTime))
logger.info("Basis paths generated.")
# If we are computing overcomplete basis, use the computed set as
# the initial set of paths in the iterative algorithm,
if self.projectConfig.OVER_COMPLETE_BASIS:
logger.info("Iteratively improving the basis")
for path in infeasible:
self.addPathExclusiveConstraint(path)
edgePaths = \
[Dag.getEdges(path.nodes) for path in self.basisPaths]
result = self.iterativelyFindOvercompleteBasis(
edgePaths, self.projectConfig.MAXIMUM_ERROR_SCALE_FACTOR)
logger.info("Number of paths generated: %d" % len(result))
logger.info("Time taken to generate paths: %.2f seconds." %
(time.clock() - startTime))
return result
else:
return self.basisPaths
if self.pathDimension == 1:
warnMsg = ("Basis matrix has dimensions 1x1. "
"There is only one path through the function "
"under analysis, which is the only basis path.")
logger.warn(warnMsg)
# Collects all infeasible paths discovered during the computation
infeasible = []
currentRow, numPathsUnsat = 0, 0
while currentRow < (self.pathDimension - self.numBadRows):
logger.info("Currently at row %d..." % (currentRow+1))
logger.info("So far, the bottom %d rows of the basis "
"matrix are `bad'." % self.numBadRows)
logger.info("So far, %d candidate paths were found to be "
"unsatisfiable." % numPathsUnsat)
logger.info("Basis matrix is")
logger.info(self.basisMatrix)
logger.info("")
logger.info("Calculating subdeterminants...")
if numPathsUnsat == 0:
# Calculate the subdeterminants only if the replacement
# of this row has not yet been attempted.
self.dag.resetEdgeWeights()
self.dag.edgeWeights = self._calculateSubdets(currentRow)
logger.info("Calculation complete.")
logger.info("Finding a candidate path using an integer "
"linear program...")
logger.info("")
candidatePathNodes, ilpProblem = pulpHelper.findExtremePath(self)
logger.info("")
if ilpProblem.objVal is None:
logger.info("Unable to find a candidate path to "
"replace row %d." % (currentRow+1))
logger.info("Moving the bad row to the bottom "
"of the basis matrix.")
for k in xrange((currentRow+1), self.pathDimension):
self._swapBasisMatrixRows(k-1, k)
self.numBadRows += 1
numPathsUnsat = 0
continue
logger.info("Candidate path found.")
candidatePathEdges = Dag.getEdges(candidatePathNodes)
compressedPath = self._compressPath(candidatePathEdges)
# Temporarily replace the row in the basis matrix
# to calculate the new determinant.
prevMatrixRow = self.basisMatrix[currentRow].copy()
self.basisMatrix[currentRow] = compressedPath
sign, newBasisMatrixLogDet = slogdet(self.basisMatrix)
newBasisMatrixDet = exp(newBasisMatrixLogDet)
logger.info("Absolute value of the new determinant: %g" %
newBasisMatrixDet)
logger.info("")
DETERMINANT_THRESHOLD = self.projectConfig.DETERMINANT_THRESHOLD
MAX_INFEASIBLE_PATHS = self.projectConfig.MAX_INFEASIBLE_PATHS
if ((sign == 0 and newBasisMatrixLogDet == float("-inf")) or
newBasisMatrixDet < DETERMINANT_THRESHOLD or
numPathsUnsat >= MAX_INFEASIBLE_PATHS):
if (newBasisMatrixDet < DETERMINANT_THRESHOLD and
not (sign == 0 and newBasisMatrixLogDet == float("-inf"))):
logger.info("Determinant is too small.")
else:
logger.info("Unable to find a path that makes "
"the determinant non-zero.")
logger.info("Moving the bad row to the bottom "
"of the basis matrix.")
self.basisMatrix[currentRow] = prevMatrixRow
for k in xrange((currentRow+1), self.pathDimension):
self._swapBasisMatrixRows(k-1, k)
self.numBadRows += 1
numPathsUnsat = 0
else:
logger.info("Possible replacement for row found.")
logger.info("Checking if replacement is feasible...")
logger.info("")
resultPath = self.checkFeasibility(candidatePathNodes,
ilpProblem)
querySatisfiability = resultPath.smtQuery.satisfiability
if querySatisfiability == Satisfiability.SAT:
# Sanity check:
# A row should not be replaced if it replaces a good
# row and decreases the determinant. However,
# replacing a bad row and decreasing the determinant
# is okay. (TODO: Are we actually doing this?)
logger.info("Replacement is feasible.")
logger.info("Row %d replaced." % (currentRow+1))
basisPaths.append(resultPath)
currentRow += 1
numPathsUnsat = 0
elif querySatisfiability == Satisfiability.UNSAT:
logger.info("Replacement is infeasible.")
logger.info("Finding the edges to exclude...")
unsatCore = resultPath.smtQuery.unsatCore
excludeEdges = resultPath.getEdgesForConditions(unsatCore)
logger.info("Edges to be excluded found.")
logger.info("Adding a constraint to exclude "
"these edges...")
if len(excludeEdges) > 0:
self.addPathExclusiveConstraint(excludeEdges)
infeasible.append(excludeEdges)
else:
self.addPathExclusiveConstraint(candidatePathEdges)
infeasible.append(candidatePathEdges)
logger.info("Constraint added.")
self.basisMatrix[currentRow] = prevMatrixRow
numPathsUnsat += 1
logger.info("")
logger.info("")
if self.projectConfig.PREVENT_BASIS_REFINEMENT:
return onExit(startTime, infeasible)
logger.info("Refining the basis into a 2-barycentric spanner...")
logger.info("")
isTwoBarycentric = False
refinementRound = 0
while not isTwoBarycentric:
logger.info("Currently in round %d of refinement..." %
(refinementRound+1))
logger.info("")
isTwoBarycentric = True
currentRow, numPathsUnsat = 0, 0
goodRows = (self.pathDimension - self.numBadRows)
while currentRow < goodRows:
logger.info("Currently at row %d out of %d..." %
(currentRow+1, goodRows))
logger.info("So far, %d candidate paths were found to be "
"unsatisfiable." % numPathsUnsat)
logger.info("Basis matrix is")
logger.info(self.basisMatrix)
logger.info("")
logger.info("Calculating subdeterminants...")
if numPathsUnsat == 0:
# Calculate the subdeterminants only if the replacement
# of this row has not yet been attempted.
self.dag.resetEdgeWeights()
self.dag.edgeWeights = self._calculateSubdets(currentRow)
logger.info("Calculation complete.")
logger.info("Finding a candidate path using an integer "
"linear program...")
logger.info("")
candidatePathNodes, ilpProblem = \
pulpHelper.findExtremePath(self)
logger.info("")
if ilpProblem.objVal is None:
logger.info("Unable to find a candidate path to "
"replace row %d." % (currentRow+1))
currentRow += 1
numPathsUnsat = 0
continue
logger.info("Candidate path found.")
candidatePathEdges = Dag.getEdges(candidatePathNodes)
compressedPath = self._compressPath(candidatePathEdges)
sign, oldBasisMatrixLogDet = slogdet(self.basisMatrix)
oldBasisMatrixDet = exp(oldBasisMatrixLogDet)
logger.info("Absolute value of the old determinant: %g" %
oldBasisMatrixDet)
# Temporarily replace the row in the basis matrix
# to calculate the new determinant.
prevMatrixRow = self.basisMatrix[currentRow].copy()
self.basisMatrix[currentRow] = compressedPath
sign, newBasisMatrixLogDet = slogdet(self.basisMatrix)
newBasisMatrixDet = exp(newBasisMatrixLogDet)
logger.info("Absolute value of the new determinant: %g" %
newBasisMatrixDet)
if newBasisMatrixDet > 2 * oldBasisMatrixDet:
logger.info("Possible replacement for row found.")
logger.info("Checking if replacement is feasible...")
logger.info("")
resultPath = self.checkFeasibility(candidatePathNodes,
ilpProblem)
querySatisfiability = resultPath.smtQuery.satisfiability
if querySatisfiability == Satisfiability.SAT:
logger.info("Replacement is feasible.")
isTwoBarycentric = False
basisPaths[currentRow] = resultPath
logger.info("Row %d replaced." % (currentRow+1))
currentRow += 1
numPathsUnsat = 0
elif querySatisfiability == Satisfiability.UNSAT:
logger.info("Replacement is infeasible.")
logger.info("Finding the edges to exclude...")
unsatCore = resultPath.smtQuery.unsatCore
excludeEdges = \
resultPath.getEdgesForConditions(unsatCore)
logger.info("Edges to be excluded found.")
logger.info("Adding a constraint to exclude "
"these edges...")
if len(excludeEdges) > 0:
self.addPathExclusiveConstraint(excludeEdges)
infeasible.append(excludeEdges)
else:
self.addPathExclusiveConstraint(candidatePathEdges)
infeasible.append(candidatePathEdges)
logger.info("Constraint added.")
self.basisMatrix[currentRow] = prevMatrixRow
numPathsUnsat += 1
else:
logger.info("No replacement for row %d found." %
(currentRow+1))
self.basisMatrix[currentRow] = prevMatrixRow
currentRow += 1
numPathsUnsat = 0
logger.info("")
logger.info("")
refinementRound += 1
logger.info("")
logger.info("Basis refined.")
return onExit(startTime, infeasible)
# Methods imported from the "PathGenerator" class.
def generatePaths(self, *args, **kwargs):
return PathGenerator.generatePaths(self, *args, **kwargs)
### PATH GENERATION HELPER FUNCTIONS ###
def _calculateSubdets(self, row):
"""Returns a list of weights, where weight i is assigned to
edge i. The weights assigned to the `non-special' edges are
subdeterminants of the basis matrix without row i and column j:
column j corresponds to the `non-special' edge j.
@param row Row to ignore.
@retval List of weights as specified above.
"""
edgesReduced = self.dag.edgesReduced
edgesReducedIndices = self.dag.edgesReducedIndices
edgeWeightList = [0] * self.dag.numEdges
rowList = range(self.pathDimension)
rowList.remove(row)
for j in xrange(self.pathDimension):
colList = range(self.pathDimension)
colList.remove(j)
subMatrix = self.basisMatrix[rowList][:, colList]
if subMatrix.size != 0:
# Compute the subdeterminant of this submatrix.
subdet = det(subMatrix)
if ((row+j) % 2) == 1:
edgeWeight = -1 * subdet
else:
edgeWeight = subdet
else:
# Special case of a 1x1 matrix, or of code under analysis
# with only one path that goes through.
edgeWeight = 1
# Assign this edge weight to the proper `non-special' edge.
edgeWeightList[edgesReducedIndices[edgesReduced[j]]] = edgeWeight
return edgeWeightList
def checkFeasibility(self, pathNodes, ilpProblem):
"""Determines the feasibility of the provided path in the DAG;
the feasibility is checked with an SMT solver. This method
returns a Path object that contains, at least, a Query object
that represents the SMT query that contains the conditions along
the path provided; the feasibility of the path is the same as the
satisfiability of this Query object. If the path is feasible,
then the Path object also contains satisfying assignments.
@param pathNodes Path whose feasibility should be checked, given
as a list of nodes along the path.
@param ilpProblem Integer linear programming problem that, when solved,
produced this path, represented as an IlpProblem object.
@retval Path object as described above.
"""
# First, check if the candidate path is already a basis path.
# This allows us to prevent unnecessary work.
# It is also a hack around a problem in Z3, where the same query
# can result in different models when checked more than once in
# the same execution.
# (See http://stackoverflow.com/q/15731179/1834042 for more details.)
logger.info("Checking if the candidate path is already "
"a basis path...")
try:
basisPathIndex = self.basisPathsNodes.index(pathNodes)
logger.info("Candidate path is a basis path.")
# Create a copy of the Path object that represents the basis path:
# we do not want to modify the IlpProblem object associated with
# the basis Path object.
pathCopy = deepcopy(self.basisPaths[basisPathIndex])
pathCopy.ilpProblem = ilpProblem
return pathCopy
except ValueError as e:
logger.info("Candidate path is not a basis path.")
# Write the candidate path to a file for further analysis
# by the Phoenix backend.
logger.info("Writing nodes along candidate path to file...")
nodesFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_NODES)
try:
nodesFileHandler = open(nodesFile, "w")
except EnvironmentError as e:
errMsg = "Error writing nodes along candidate path: %s" % e
raise GameTimeError(errMsg)
else:
with nodesFileHandler:
nodesFileHandler.write(" ".join(pathNodes))
logger.info("Writing complete.")
logger.info("Running the Phoenix program analyzer...")
logger.info("")
if phoenixHelper.findConditions(self.projectConfig):
errMsg = "Error running the Phoenix program analyzer."
raise GameTimeError(errMsg)
logger.info("Phoenix program analysis complete.")
logger.info("")
logger.info("Reading the line numbers of statements "
"along the path...")
lineNumbersFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_LINE_NUMBERS)
lineNumbers = Path.readLineNumbersFromFile(lineNumbersFile)
logger.info("Line numbers of the statements along "
"the path read and processed.")
logger.info("Reading the conditions along the path...")
conditionsFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_CONDITIONS)
conditions = Path.readConditionsFromFile(conditionsFile)
logger.info("Path conditions read and processed.")
logger.info("Reading the edges that are associated with "
"the conditions along the path...")
conditionEdgesFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_CONDITION_EDGES)
conditionEdges = Path.readConditionEdgesFromFile(conditionEdgesFile)
logger.info("Edges read and processed.")
logger.info("Reading the line numbers and truth values "
"of conditional points...")
conditionTruthsFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_CONDITION_TRUTHS)
conditionTruths = Path.readConditionTruthsFromFile(conditionTruthsFile)
logger.info("Path condition truths read and processed.")
logger.info("Reading information about array accesses...")
arrayAccessesFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_ARRAY_ACCESSES)
arrayAccesses = Path.readArrayAccessesFromFile(arrayAccessesFile)
logger.info("Array accesses information read and processed.")
logger.info("Reading information about the expressions "
"for aggregate accesses...")
aggIndexExprsFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_AGG_INDEX_EXPRS)
aggIndexExprs = Path.readAggIndexExprsFromFile(aggIndexExprsFile)
logger.info("Aggregate accesses information read and processed.")
logger.info("Reading the SMT query generated by the "
"Phoenix program analyzer...")
smtQueryFile = os.path.join(self.projectConfig.locationTempDir,
"%s.smt" % config.TEMP_PATH_QUERY)
smtQuery = readQueryFromFile(smtQueryFile)
logger.info("SMT query read.")
assignments = {}
logger.info("Checking the satisfiability of the SMT query...")
smtSolver = self.projectConfig.smtSolver
smtSolver.checkSat(smtQuery)
logger.info("Satisfiability checked.")
if smtQuery.satisfiability == Satisfiability.SAT:
logger.info("Candidate path is FEASIBLE.")
logger.info("Generating assignments...")
smtModelParser = self.projectConfig.smtModelParser
assignments = smtModelParser.parseModel(smtQuery.model,
arrayAccesses,
aggIndexExprs,
self.projectConfig)
logger.info("Assignments generated.")
elif smtQuery.satisfiability == Satisfiability.UNSAT:
logger.info("Candidate path is INFEASIBLE.")
elif smtQuery.satisfiability == Satisfiability.UNKNOWN:
errMsg = "Candidate path has UNKNOWN satisfiability."
raise GameTimeError(errMsg)
if self.projectConfig.debugConfig.DUMP_ALL_QUERIES:
try:
allQueriesFile = \
os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_QUERY_ALL)
allQueriesFileHandler = open(allQueriesFile, "a")
except EnvironmentError as e:
errMsg = "Error writing the candidate SMT query: %s" % e
raise GameTimeError(errMsg)
else:
with allQueriesFileHandler:
allQueriesFileHandler.write("*** CANDIDATE QUERY ***\n")
allQueriesFileHandler.write("%s\n\n" % smtQuery)
logger.info("Removing temporary path information files...")
self._removeTempPathFiles()
logger.info("Temporary path information files removed.")
logger.info("")
return Path(ilpProblem, pathNodes, lineNumbers,
conditions, conditionEdges, conditionTruths,
arrayAccesses, aggIndexExprs,
smtQuery, assignments)
def estimateEdgeWeights(self):
"""Estimates the weights on the edges of the DAG, using the values
of the basis "Path" objects. The result is stored in the instance
variable "edgeWeights".
Precondition: The basis paths have been generated and have values.
"""
self.dag.resetEdgeWeights()
basisValues = [basisPath.measuredValue for basisPath
in self.basisPaths]
# By default, we assume a value of 0 for each of the rows in
# the basis matrix that no replacement could be found for
# (the `bad' rows in the basis matrix).
basisValues += [0] * (self.pathDimension - len(basisValues))
# Estimate the weights on the `non-special' edges of the graph.
logger.info("Estimating the weights on the `non-special' edges...")
reducedEdgeWeights = dot(inv(self.basisMatrix), basisValues)
logger.info("Weights estimated.")
# Generate the list of edge weights that the integer linear
# programming problem will use.
logger.info("Generating the list of weights on all edges...")
for reducedEdgeIndex, reducedEdge in enumerate(self.dag.edgesReduced):
self.dag.edgeWeights[self.dag.edgesReducedIndices[reducedEdge]] = \
reducedEdgeWeights[reducedEdgeIndex]
logger.info("List generated.")
def _removeTempPathFiles(self):
"""Removes the temporary path information files that are
generated when the feasibility of a path is determined.
"""
nodesFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_NODES)
removeFile(nodesFile)
lineNumbersFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_LINE_NUMBERS)
removeFile(lineNumbersFile)
conditionsFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_CONDITIONS)
removeFile(conditionsFile)
conditionEdgesFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_CONDITION_EDGES)
removeFile(conditionEdgesFile)
conditionTruthsFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_CONDITION_TRUTHS)
removeFile(conditionTruthsFile)
arrayAccessesFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_ARRAY_ACCESSES)
removeFile(arrayAccessesFile)
aggIndexExprsFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_PATH_AGG_INDEX_EXPRS)
removeFile(aggIndexExprsFile)
smtQueryFile = os.path.join(self.projectConfig.locationTempDir,
"%s.smt" % config.TEMP_PATH_QUERY)
removeFile(smtQueryFile)
### PATH VALUE FUNCTIONS ###
def writeBasisValuesToFile(self, location, measured=False):
"""Convenience wrapper around the "writePathValuesToFile" method
that writes the values of the "Path" objects that represent
the feasible basis paths of the code being analyzed to a file.
Arguments:
location:
Location of the file.
measured:
`True` if, and only if, the values that will be written to
the file are the measured values of the feasible basis paths.
"""
Analyzer.writePathValuesToFile(self.basisPaths, location, measured)
def writeTemplateBasisValuesFile(self, location):
"""Creates a template file, at the location provided, which can
be used as input to the "loadBasisValuesFromFile" method.
The template file contains instructions on how to specify
the measured values to be associated with the feasible basis
"Path" objects, and follows the grammar described in
the documentation of the "loadBasisValuesFromFile" method.
@param location Location of the file.
"""
try:
templateBasisValuesFileHander = open(location, "w")
except EnvironmentError as e:
errMsg = ("Error writing the template file to load values "
"for the basis Path objects: %s") % e
raise GameTimeError(errMsg)
else:
with templateBasisValuesFileHander:
projectConfig = self.projectConfig
templateHeader = \
"""# This template was generated by GameTime during the analysis of
# the function %s in the file located at
# %s.
# Below, supply the values to be associated with the Path objects
# that represent the basis paths.
""" % (projectConfig.func, projectConfig.locationOrigFile)
contents = []
contents.append(templateHeader)
for position in xrange(len(self.basisPaths)):
contents.append("# Append the value for basis path %d "
"to the line below." % (position+1))
contents.append("%d\t" % (position + 1))
contents.append("")
templateBasisValuesFileHander.write("\n".join(contents))
def loadBasisValuesFromFile(self, location):
"""Loads the measured values of the "Path" objects that represent
the feasible basis paths of the code being analyzed from a file.
Each line of the file should have a pair of numbers separated by
whitespace: the former is the (one-based) number of a basis
"Path" object, which is also its (one-based) position in the list
of basis "Path" objects maintained by this "Analyzer" object, while
the latter is the value to be associated with the "Path" object.
Lines that start with a "#" character are assumed to be comments,
and are thus ignored. For a template file, refer to the
"writeTemplateBasisValuesFile" method.
Precondition: The basis paths have been generated.
@param location Location of the file.
"""
try:
basisValuesFileHandler = open(location, "r")
except EnvironmentError as e:
errMsg = "Error loading the values of the basis paths: %s" % e
raise GameTimeError(errMsg)
else:
with basisValuesFileHandler:
basisValuesLines = basisValuesFileHandler.readlines()
basisValuesLines = [line.strip() for line in basisValuesLines]
basisValuesLines = [line for line in basisValuesLines
if line != "" and not line.startswith("#")]
basisValuesLines = [line.split() for line in basisValuesLines]
self.loadBasisValues([(int(position), int(value))
for position, value in basisValuesLines])
def loadBasisValues(self, basisValues):
"""Loads the measured values of the "Path" objects that represent
the feasible basis paths of the code being analyzed from the list of
tuples provided. Each tuple has two elements: the first element is
the (one-based) position of a basis "Path" object in the list of
basis "Path" objects maintained by this "Analyzer" object, and
the second element is the measured value to be associated with
the "Path" object.
Precondition: The basis paths have been generated.
@param basisValues List of tuples, as described.
"""
numBasisPaths, numBasisValues = len(self.basisPaths), len(basisValues)
if numBasisPaths != numBasisValues:
errMsg = ("There are %d basis paths, but %d values "
"were provided.") % (numBasisPaths, numBasisValues)
raise GameTimeError(errMsg)
for position, value in basisValues:
self.basisPaths[position-1].setMeasuredValue(value)
@staticmethod
def writePathValuesToFile(paths, location, measured=False):
"""Writes the values of each of the :class:`~gametime.path.Path`
objects in the list provided to a file.
Each line of the file is a pair of numbers separated by whitespace:
the former is the (one-based) number of
a :class:`~gametime.path.Path` object, which is also its (one-based)
position in the list provided, while the latter is a value of
the :class:`~gametime.path.Path` object.
Arguments:
paths:
List of :class:`~gametime.path.Path` objects whose values
are to be written to a file.
location:
Location of the file.
measured:
`True` if, and only if, the values that will be written to
the file are the measured values of the feasible paths.
"""
try:
pathValuesFileHandler = open(location, "w")
except EnvironmentError as e:
errMsg = "Error writing the values of the paths: %s" % e
raise GameTimeError(errMsg)
else:
with pathValuesFileHandler:
for position, path in enumerate(paths):
pathValue = (path.measuredValue if measured else
path.predictedValue)
pathValuesFileHandler.write("%d\t%d\n" %
(position+1, pathValue))
@staticmethod
def writeValueToFile(value, location):
"""Write the given `value` into file `location`. `value` is a floating
point. It is written with 2 decimal points. For compatibility
purposes, if the `value` is an int, it is written without any decimal
points
"""
try:
valuesFileHandler = open(location, "w")
except EnvironmentError as e:
errMsg = "Error writing the value: %s" % e
raise GameTimeError(errMsg)
else:
with valuesFileHandler:
if (int(value) == value):
valuesFileHandler.write("%d\n" % value)
else:
valuesFileHandler.write("%.2f\n" % value)
### SERIALIZATION FUNCTIONS ###
def saveToFile(self, location):
"""Saves the current state of this Analyzer object to a file.
@param location Location of the file to save the current state
of this Analyzer object to.
"""
try:
logger.info("Saving the Analyzer object to a file...")
analyzerFileHandler = bz2.BZ2File(location, "w")
except EnvironmentError as e:
errMsg = "Error saving the Analyzer object to a file: %s" % e
raise GameTimeError(errMsg)
else:
with analyzerFileHandler:
pickle.dump(self, analyzerFileHandler)
logger.info("Analyzer object saved.")
@staticmethod
def loadFromFile(location):
"""Loads an Analyzer object from the file whose location is provided.
@param location Location of the file.
@return Analyzer object, loaded from the file whose location
is provided.
"""
try:
logger.info("Loading an Analyzer object from a file...")
analyzerFileHandler = bz2.BZ2File(location, "r")
except EnvironmentError as e:
errMsg = "Error loading an Analyzer object: %s" % e
raise GameTimeError(errMsg)
else:
with analyzerFileHandler:
analyzer = pickle.load(analyzerFileHandler)
logger.info("Analyzer object loaded.")
return analyzer
def writePathsToFiles(self, paths, writePerPath=False, rootDir=None):
"""Utility method that writes information available within the Path
objects in the list provided to different files.
All of the files are stored within directories. The hierarchy of these
directories and files is determined by the "writePerPath" argument.
If the "writePerPath" argument is True, each directory corresponds
to one Path object. The contents of each directory are files, one
for each type of information available within the Path object.
For example, the conditions of the first Path object in the list will
be written in the file "[config.TEMP_PATH_CONDITIONS]", located in
the directory "[config.TEMP_CASE]-1". "config" is the Configuration
object of this analysis.
If the "writePerPath" argument is False, which is the default value,
this hierarchy is `rotated'. Each directory instead corresponds to
one type of information. The contents of each directory are files,
one for each Path object. For example, the conditions of the first
Path object in the list will be written in the file
"[config.TEMP_PATH_CONDITIONS]-1", located in the directory
"[config.TEMP_PATH_CONDITIONS]".
The "rootDir" argument specifies where the directories should be
created; if None is provided, which is the default value, they are
created in the temporary directory created by GameTime for
this analysis. Directories from a previous execution will be
overwritten.
@param paths List of Path objects to write to files.
@param writePerPath Boolean flag, as described. True if each directory
created corresponds to one Path object; False if each directory
created corresponds to one type of information available within
a Path object.
@param rootDir Location of a root directory, as described.
"""
rootDir = rootDir or self.projectConfig.locationTempDir
def generateLocation(infoType):
"""Helper function that returns the location of the file where
the provided type of information (about a Path object) will be
written.
@param infoType Type of information (about a Path object) that
will be written, provided as a string.
@retval Location of the file where the information will be written.
"""
infoDir = os.path.join(rootDir,
("%s-%s" % (config.TEMP_CASE, pathNum + 1))
if writePerPath else infoType)
createDir(infoDir)
infoFile = os.path.join(infoDir,
"%s%s" % (infoType,
("" if writePerPath
else ("-%s" % (pathNum + 1)))))
return infoFile
for pathNum, path in enumerate(paths):
ilpProblemFile = generateLocation(config.TEMP_PATH_ILP_PROBLEM)
path.writeIlpProblemToLpFile(ilpProblemFile)
nodeFile = generateLocation(config.TEMP_PATH_NODES)
path.writeNodesToFile(nodeFile)
lineNumbersFile = generateLocation(config.TEMP_PATH_LINE_NUMBERS)
path.writeLineNumbersToFile(lineNumbersFile)
conditionsFile = generateLocation(config.TEMP_PATH_CONDITIONS)
path.writeConditionsToFile(conditionsFile)
conditionEdgesFile = \
generateLocation(config.TEMP_PATH_CONDITION_EDGES)
path.writeConditionEdgesToFile(conditionEdgesFile)
conditionTruthsFile = \
generateLocation(config.TEMP_PATH_CONDITION_TRUTHS)
path.writeConditionTruthsToFile(conditionTruthsFile)
arrayAccessesFile = \
generateLocation(config.TEMP_PATH_ARRAY_ACCESSES)
path.writeArrayAccessesToFile(arrayAccessesFile)
aggIndexExprsFile = \
generateLocation(config.TEMP_PATH_AGG_INDEX_EXPRS)
path.writeAggIndexExprsToFile(aggIndexExprsFile)
smtQueryFile = "%s.smt" % generateLocation(config.TEMP_PATH_QUERY)
path.smtQuery.writeSmtQueryToFile(smtQueryFile)
smtModelFile = generateLocation(config.TEMP_SMT_MODEL)
path.smtQuery.writeModelToFile(smtModelFile)
caseFile = generateLocation(config.TEMP_CASE)
path.writeAssignmentsToFile(caseFile)
predictedValueFile = \
generateLocation(config.TEMP_PATH_PREDICTED_VALUE)
path.writePredictedValueToFile(predictedValueFile)
measuredValueFile = \
generateLocation(config.TEMP_PATH_MEASURED_VALUE)
path.writeMeasuredValueToFile(measuredValueFile)
```
#### File: src/cli/__init__.py
```python
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
import os
import subprocess
import sys
import webbrowser
from gametime.defaults import config, logger, sourceDir
from gametime.updateChecker import isUpdateAvailable
def startCli():
"""Prepares and starts the command-line interface to GameTime."""
logger.info("Welcome to GameTime!")
logger.info("")
logger.info("Checking for any available updates...")
updateAvailable, latestVersionInfo = isUpdateAvailable()
if updateAvailable:
version = latestVersionInfo["version"]
infoUrl = latestVersionInfo["info_url"]
logger.info("An updated version of GameTime (%s) is available. "
"The current version is %s." % (version, config.VERSION))
choice = raw_input("Would you like to download and install "
"this version? Please enter `[Y]es` "
"or `[N]o`: ").lower()
while choice not in ["y", "yes", "n", "no"]:
choice = raw_input("Please enter `[Y]es` or `[N]o`: ").lower()
if choice in ["y", "yes"]:
logger.info("Exiting GameTime...")
try:
webbrowser.open(infoUrl)
sys.exit(0)
except webbrowser.Error as e:
logger.warning("Unable to open a web browser to display "
"information about the updated version: %s " % e)
logger.warning("Please visit %s to download and install "
"the updated version." % infoUrl)
else:
logger.info("Update not installed.")
logger.info("Please visit %s to download and install "
"an updated version of GameTime." % infoUrl)
elif not latestVersionInfo:
logger.warning("Unable to obtain information about available updates.")
logger.warning("Please check %s for the latest version of GameTime "
"and for available updates." % config.WEBSITE_URL)
else:
logger.warning("No updates to GameTime are available.")
logger.info("")
# Construct the location of the directory that contains
# the batch file that initializes the GameTime command-line interface.
cliInitBatchFile = os.path.join(sourceDir,
os.path.join("bin", "gametime-cli.bat"))
subprocess.call([cliInitBatchFile], shell=True)
```
#### File: src/gui/guiHelper.py
```python
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
import bz2
import os
import pickle
from PySide import QtGui
from PySide import QtCore
from PySide.QtCore import Qt
import gametime.pulpHelper as pulpHelper
from gametime import GameTime
from gametime import GameTimeError
from gametime import PathType
from gametime.defaults import config
from gametime.projectConfiguration import getFilePaths
from gametime.projectConfiguration import getFuncNames
from gametime.projectConfiguration import ProjectConfiguration
from gametime.histogram import writeHistogramToFile
# tempStartLabel = "TEMPSTART"
# tempEndLabel = "TEMPEND"
class Window(object):
""" Position of window to display information. """
# No view.
NONE = 0
# Left window.
LEFT = -1
# Right window.
RIGHT = 1
class FileItem(object):
def __init__(self, fileName, origLocation, data,
mainWin=None, assign=False):
"""A FileItem object is created for every file that has been opened
by the user or that has been generated by the Analyzer. This object
promotes easy file loading and line highlighting.
"""
self.mainWindow = mainWin
self.fileList = mainWin.fileSelectWidget.widget()
self.basisValuesFile = None
self.numBasisPaths = 0
self.basisPaths = []
self.worstPaths = []
self.bestPaths = []
self.randomPaths = []
self.allPaths = []
self.basisValues = []
self.preprocessedFileItem = None
#: True if an Analyzer object can be created for this file,
#: False otherwise. A file cannot be analyzed and highlighted
#: at the same time. The default value is True.
self.canAnalyze = True
#: Analyzer object associated with this file.
self.analyzer = None
self.highlightText = None
#: Path object associated with this file.
self.highlightPath = None
#: Name of the file as displayed in the FileSelect list.
self.originalName = origLocation
self.displayName = fileName
#: Current viewing mode.
self.window = Window.NONE
#: Order in which a FileItem is placed in a FileSelect list.
self.displayIndex = None
#: Maintains the name of the file being analyzed. This should
#: never be changed after creating this FileItem instance.
self.fileName = fileName
#: Name of the file as displayed in the fileSelect list.
self.origLocation = origLocation
#: Text that should be displayed in the text viewing window.
#: This will not contain the highlights.
self.displayText = data
self.assign = assign
# """
# Add line numbers to the display text
# if the display text is not assignments
# """
# if not assign:
# lines = self.displayText.split("\n")
# for i in range(len(lines)):
# lines[i]=str(i)+": "+lines[i]
# self.displayText = "\n".join(lines)
#: None if this FileItem contains the original file;
#: otherwise, the FileItem that this is a path through.
self.parentFile = None
#: List of the FileItem objects that contain paths through
#: this FileItem object.
self.children = []
self.startLine = ""
self.startFile = ""
self.endLine = ""
self.endFile = ""
def addToMainWindow(self):
"""
Add this object to the dictionary of FileItem objects stored
in the main window.
"""
self.mainWindow.openItems[self.displayName] = self
def setAnalyze(self, analyzeBool):
"""Sets the ``canAnalyze`` flag, which determines if a file
can be analyzed.
Arguments:
analyzeBool:
New truth value for the ``canAnalyze`` flag.
"""
self.canAnalyze = analyzeBool
def makeAnalyzer(self):
"""Make the ``Analyzer`` object that corresponds to the file
currently being displayed, if possible.
"""
self.analyzer = GameTime.analyze(self.projectConfig)
def setHighlightPath(self, highlightPath):
self.highlightPath = highlightPath
def getHighlightPath(self):
return self.highlightPath
def setPreprocessedFileItem(self, fileItem):
self.preprocessedFileItem = fileItem
def getAnalyzeItem(self):
analyzeItem = self
while not analyzeItem or not analyzeItem.canAnalyze:
analyzeItem = analyzeItem.getParent()
return analyzeItem
def importBasisValues(self, fileName, values):
"""Imports basis values from a file."""
self.basisValuesFile = fileName
self.basisValues = values
for i in range(len(self.basisPaths)):
path = self.basisPaths[i]
lines = path.displayText.split("\n")
valIndex = lines.index("Value:")+1
lines[valIndex] = str(values[i])
path.displayText = "\n".join(lines)
def appendToFileSelect(self):
"""
Appends the display name of the current file to the end of the fileList.
"""
if self.parentFile == None:
self.fileList.addItem(self.displayName)
self.displayIndex = self.fileList.count() - 1
else:
#Note: This is called after adding the child to the parentFile, so
# if the parent is at index 0, the first child will be at 1.
self.displayIndex = self.parentFile.displayIndex + \
self.parentFile.getChildren().index(self)+1
self.fileList.insertItem(self.displayIndex, self.displayName)
def setAsLeftView(self):
"""Associates this FileItem instance with the left display window."""
self.window = Window.LEFT
# self.fileList.replaceItem(self.origLocation, self.displayIndex)
self.mainWindow.leftTextEdit.setItem(self)
def setAsRightView(self):
"""Associates this FileItem instance with the right display window."""
self.window = Window.RIGHT
# self.fileList.replaceItem(self.origLocation, self.displayIndex)
self.mainWindow.rightTextEdit.setItem(self)
def removeFromView(self):
"""
Removes this FileItem instance from either the left or the right
display window. Sets display name back to normal.
"""
if self.window == Window.RIGHT:
self.mainWindow.rightTextEdit.setItem(None)
else:
self.mainWindow.leftTextEdit.setItem(None)
self.window = Window.NONE
def getHighlightText(self):
# Still looking for a better way to do this function.
"""Make the highlightText from the displayText.
What this function does right now is reopen the original (non-Path())
file, read each line, and check to see if it is a highlightable
line. If it is, add the appropriate HTML tag (i.e. highlight the line).
Then add the line to the highlightText.
The bug here is that the current file may not have the same data as
the original file. TODO(lisa): Still working on a way to do this.
"""
def sanitize(text):
text = text.replace("&", "&")
return text.replace(
"<",
"<"
).replace(
">",
">"
).replace(
"\"",
"""
).replace(
"'",
"'"
)
if not self.highlightText:
pathLineNumbers = self.highlightPath.lineNumbers
self.highlightText = "<pre>"
with open(self.origLocation, "r") as origFileHandler:
origFileLines = origFileHandler.readlines()
for lineNum, line in enumerate(origFileLines):
self.highlightText += (
"%s%s%s" %
(("<span style='background-color: #8DB6CD'>"
if lineNum + 1 in pathLineNumbers else ""),
sanitize(line),
("</span>" if lineNum + 1 in pathLineNumbers else ""))
)
self.highlightText = "%s</pre>" % self.highlightText
return self.highlightText
def getFileSelectIndex(self):
"""Get the index of this FileItem instance in the fileList."""
return self.displayIndex
def getFileName(self):
"""
Get the (unchanged by display) fileName of this FileItem instance.
@retval (unchanged by display) fileName of this FileItem instance.
"""
return self.fileName
def getDisplayText(self):
"""Get the (unchanged by highlighting) display text."""
path = self.highlightPath
if path:
self.displayText = ("Assignments:\n%s\n\nPredicted Value:\n%s\n\n"
"Measured Value:\n%s" %
(path.getAssignments(),
path.getPredictedValue(),
path.getMeasuredValue()))
return self.displayText
def getChildren(self):
"""Get the children of this FileItem."""
return self.children
def addChild(self, newChild):
"""Add a child FileItem."""
self.children.append(newChild)
def removeChild(self, child):
"""Remove a child FileItem."""
self.children.remove(child)
if child in self.basisPaths:
self.basisPaths.remove(child)
elif child in self.worstPaths:
self.worstPaths.remove(child)
elif child in self.bestPaths:
self.bestPaths.remove(child)
elif child in self.randomPaths:
self.randomPaths.remove(child)
elif child in self.allPaths:
self.allPaths.remove(child)
if child.origLocation == " (Preprocessed)":
self.preprocessedFileItem = None
def setParent(self, sourceFileItem):
"""Set the parent of this FileItem."""
oldName = self.displayName
if self.assign:
self.parentFile = sourceFileItem
self.parentFile.addChild(self)
else:
self.displayName = "- %s" % self.displayName
self.displayName = "%s %s" % (sourceFileItem.displayName,
self.displayName)
if oldName in self.mainWindow.openItems:
self.mainWindow.openItems[self.displayName] = self
self.mainWindow.openItems.pop(oldName)
def getParent(self):
"""Get the parent of this FileItem."""
return self.parentFile
def getBasisPaths(self):
"""Return the basis paths stored for this FileItem."""
return self.basisPaths
def getWorstPaths(self):
return self.worstPaths
def getBestPaths(self):
"""Return the best paths stored for this FileItem."""
return self.bestPaths
def setProjectConfig(self, projectConfig):
self.projectConfig = projectConfig
class FileSelectList(QtGui.QListWidget):
"""
A FileSelectList is the list widget associated with the list of fileNames
located to the left of the main text displays. This class provides
operations to maintain the list.
"""
def __init__(self, mainWin):
super(FileSelectList, self).__init__()
self.mainWindow = mainWin
self.addItem("No files currently loaded.")
self.activeLeft = self.activeRight = None
# Double-clicking an item loads its contents in the main display.
self.itemDoubleClicked.connect(self.loadFileToDisplay)
def keyPressEvent(self, event):
# Pressing the Return key, the Enter key (from the keypad) or
# the spacebar on an item loads its contents in the main display.
keyPressed = event.key()
if keyPressed in [Qt.Key_Return, Qt.Key_Enter, Qt.Key_Space]:
self.loadFileToDisplay(self.currentItem())
elif keyPressed == Qt.Key_Up:
self.setCurrentRow(max(0, self.currentRow() - 1))
elif keyPressed == Qt.Key_Down:
self.setCurrentRow(min(self.currentRow() + 1, self.count() - 1))
def addFileName(self, fileItemToAdd, window):
"""
Add or change a fileName in this fileSelect instance. If the FileItem
has not been added to the fileSelect yet, call appendToFileSelect.
Otherwise, the position of the FileItem's fileName should not change
in the list. Either way, the FileItem's display text will be brought
to the main text displays via the viewMode (options described below).
@param fileItemToAdd {FileItem} even if it exists already
@param viewMode {int} 0: no prefix; -1: left display, 1: right display
@retval {bool} True if there is a display change, False if the FileItem
was already displayed
"""
if fileItemToAdd in [self.activeLeft, self.activeRight]:
return False
# If nothing has been added yet, clear default text.
# TODO(jkotker): What if you open multiple files?
if not self.activeLeft:
self.takeItem(0)
if fileItemToAdd.getFileSelectIndex() is None:
fileItemToAdd.appendToFileSelect()
#newDisplayValue = None
if window is Window.LEFT:
# if self.activeLeft:
# self.activeLeft.removeFromView()
fileItemToAdd.setAsLeftView()
if self.activeRight is not None and \
self.activeRight.getParent() != fileItemToAdd:
self.activeRight.removeFromView()
self.activeRight = None
self.activeLeft = fileItemToAdd
# newDisplayValue = False
else:
return True
def loadFileToDisplay(self, item):
"""Slot called when a double-click signal has been triggered o
on an item. If the item selected is already displayed, print
a simple note to the status bar.
"""
displayText = str(item.text())
fileItemClicked = self.mainWindow.openItems.get(displayText, None)
if fileItemClicked:
if "+" not in fileItemClicked.displayName:
fileItemClicked.setAsLeftView()
self.activeLeft = fileItemClicked
if self.activeRight is not None:
self.activeRight.removeFromView()
self.activeRight = None
else:
fileItemClicked.setAsRightView()
self.activeRight = fileItemClicked
self.mainWindow.slotShowHighlights()
self.mainWindow.printToConsole("Loaded %s." % displayText)
def removeItem(self, item):
"""Removes an item in the FileSelectList."""
self.takeItem(item.displayIndex)
for i in range(item.displayIndex, self.count()):
itemName = str(self.item(i).text())
itemName = itemName # Keep pyflakes happy.
self.mainWindow.openItems[itemName].displayIndex -= 1
def insertItem(self, index, item):
for i in range(index, self.count()):
itemName = str(self.item(i).text())
itemName = itemName # Keep pyflakes happy.
self.mainWindow.openItems[itemName].displayIndex += 1
QtGui.QListWidget.insertItem(self, index, item)
def removeGroup(self, parent):
#Have to update indices of later items
#Some other problem as well...
grandParent = parent.getParent()
if grandParent:
return self.removeGroup(grandParent)
numToRemove = len(parent.getChildren())+1
startIndex = parent.displayIndex
for i in range(startIndex, startIndex+numToRemove):
item = self.takeItem(startIndex)
self.mainWindow.openItems.pop(str(item.text()))
for i in range(startIndex, self.count()):
itemName = str(self.item(i).text())
itemName = itemName # Keep pyflakes happy.
self.mainWindow.openItems[itemName].displayIndex -= numToRemove
if self.count() == 0:
self.addItem("No files currently loaded")
self.mainWindow.leftTextEdit.setItem(None)
self.mainWindow.rightTextEdit.setItem(None)
self.activeLeft = self.activeRight = None
elif startIndex == self.count():
# Load the previous index to view.
self.loadFileToDisplay(self.item(startIndex-1))
else:
self.loadFileToDisplay(self.item(startIndex))
def wheelEvent(self, event):
modifiers = QtGui.QApplication.keyboardModifiers()
if modifiers == Qt.ControlModifier:
mFont = self.font()
if event.delta() > 0:
diff = 2
else:
diff = -2
if mFont.pointSize()+diff <= 0:
return
mFont.setPointSize(mFont.pointSize()+diff)
self.setFont(mFont)
else:
super(FileSelectList, self).wheelEvent(event)
class TextEditObject(QtGui.QTextEdit):
def __init__(self, args):
"""
A TextEditObject instance is one of the two main text displays of
the main window. In general, it has an associated FileItem instance,
and it displays the displayText of that FileItem. However, if
displayHighlights is called, it will display the highlightText of
the instance, if available.
"""
super(TextEditObject, self).__init__(args)
self.setReadOnly(True)
self.setWindowTitle("No file loaded.")
self.analyzer = None
self.fileItemObject = None
self.setFont(QtGui.QFont("Courier", 10))
self.setTextCursor(QtGui.QTextCursor())
self.setTextInteractionFlags(Qt.TextSelectableByMouse |
Qt.TextSelectableByKeyboard)
def setMainWindow(self, mainWin):
self.mainWindow = mainWin
def mousePressEvent(self, e):
"""
When the mouse is clicked somewhere inside this TextEditObject instance,
the statusBar of the main window will display the windowTitle
of the TextEditObject instance, which will be the fileName of the
associated FileItem object.
"""
self.mainWindow.statusBar().showMessage(self.windowTitle())
def setItem(self, fileItemToSet):
"""Change FileItem instances and set displayText to new instance.
Also change windowTitle.
"""
if fileItemToSet is None:
self.fileItemObject = None
self.setText("")
self.setWindowTitle("No file loaded.")
else:
self.fileItemObject = fileItemToSet
self.setText(fileItemToSet.getDisplayText())
self.setWindowTitle(self.fileItemObject.getFileName())
def displayHighlights(self):
"""
Try accessing the highlightText of the fileItemObject instance, if it
exists. If it exists, set highlightText as display text. Otherwise,
do nothing but print out a console message.
"""
if not self.fileItemObject:
msg = "No path has been selected to be highlighted."
self.mainWindow.printToConsole(msg)
else:
openFileItems = self.mainWindow.openItems.get(
self.fileItemObject.getParent().displayName,
None
)
if openFileItems:
self.mainWindow.addToWindow(openFileItems, Window.LEFT)
if self.mainWindow.highlightsEnabled:
self.mainWindow.leftTextEdit.setHtml(
self.fileItemObject.getHighlightText()
)
else:
parentFile = self.fileItemObject.parentFile
self.mainWindow.leftTextEdit.setText(
parentFile.preprocessedFileItem.displayText
)
def clear(self):
self.setText("")
self.setWindowTitle("No file loaded.")
self.analyzer = None
self.fileItemObject = None
class GenericDialog(QtGui.QDialog):
"""Covers all dialogs that pop up when the user wants to generate feasible
paths for a particular file.
The title is displayed according to the enumCommand supplied. In the dialog
itself, the user must choose between two options: left or right display.
The dialog returns True if the left is chosen, and False otherwise.
Small sidenote: exec_() disables access to all other windows once this
dialog is created until it is destroyed. When it is destroyed, exec_()
returns the value of done(), which signals the end of a dialog.
"""
def __init__(self, enumCommand, caller, showfileNamesFlag,
fileItemToDisplay=None):
"""
@param enumCommand {int} Depending on the integer supplied, the title
of the dialog will vary.
@param caller {Object} The caller of this class.
@param showfileNamesFlag {bool} If True, show fileNames next to radio
buttons. If False, do not show fileNames.
@param fileItemToDisplay {FileItem} If available, display the FileItem.
"""
QtGui.QDialog.__init__(self)
self.caller = caller
self.resize(508, 300)
self.layout = QtGui.QVBoxLayout(self)
# Set title of dialog.
title = "Default Choice Dialog"
if enumCommand == 0:
title = "Generate basis paths."
elif enumCommand == 1:
title = "Generate worst-case feasible paths."
elif enumCommand == 2:
title = "Generate best-case feasible paths."
elif enumCommand == 3:
title = "Highlight line numbers for analyzed file."
elif enumCommand == 4:
title = "Select display to save."
self.taskTitleLabel = QtGui.QLabel(title, self)
self.layout.addWidget(self.taskTitleLabel)
if fileItemToDisplay:
# Show fileName of FileItem to display.
fileName = "File selected: %s" % fileItemToDisplay.getFileName()
self.layout.addWidget(QtGui.QLabel(fileName, self))
# Create radio buttons so that the user can only select either left
# or right (no other choices). The window title of each display is
# given only if the showfileNamesFlag is on.
selectView = QtGui.QGroupBox("Which file?", self)
vboxFileSelect = QtGui.QVBoxLayout(selectView)
self.leftView = QtGui.QRadioButton("Left")
self.rightView = QtGui.QRadioButton("Right")
if showfileNamesFlag:
self.leftView.setText(
"Left: %s" %
caller.mainWindow.leftTextEdit.windowTitle()
)
self.rightView.setText(
"Right: %s" %
caller.mainWindow.rightTextEdit.windowTitle()
)
self.leftView.setChecked(1)
vboxFileSelect.addWidget(self.leftView)
vboxFileSelect.addWidget(self.rightView)
vboxFileSelect.addStretch()
self.layout.addWidget(selectView)
# OK/Cancel buttons. "OK" is linked to the on_accept slot when
# pressed, and Cancel to the on_reject slot.
buttonBox = QtGui.QDialogButtonBox(Qt.Horizontal, self)
buttonBox.setStandardButtons(
QtGui.QDialogButtonBox.Cancel |
QtGui.QDialogButtonBox.Ok
)
buttonBox.accepted.connect(self.on_accept)
buttonBox.rejected.connect(self.on_reject)
self.layout.addWidget(buttonBox)
def leftViewChecked(self):
"""After execution, will be set to the result of
the radio selection.
"""
return self.leftView.isChecked()
def on_accept(self):
"""If OK is pressed, return a True/False as described above."""
self.accept()
def on_reject(self):
"""If cancel is pressed, print to Console that no file was selected."""
self.mainWindow.printToConsole("No file selected.")
self.reject()
class NumPathsDialog(QtGui.QDialog):
# If the parameter `requireOvercompleteBasis` is True then the dialog
# does not give the user a choice to choose OB_EXTRACTION flag, but sets
# it the flag to True by default
def __init__(self, caller, winType, requireOvercompleteBasis=False):
QtGui.QDialog.__init__(self)
self.caller = caller
self.caller.useObExtraction = requireOvercompleteBasis
self.numPathsEdit = QtGui.QLineEdit()
self.numPathsEdit.setText(str(self.caller.numPaths))
# Row at which to show the OK / Cancel buttonw
standardRow = 2
self.setWindowTitle("Number of paths")
layout = QtGui.QGridLayout()
layout.setSpacing(5)
layout.addWidget(self.numPathsEdit, 1, 0)
if winType == "Worst":
numPathsTitle = QtGui.QLabel("How many worst-case feasible paths "
"would you like to generate?")
elif winType == "Best":
numPathsTitle = QtGui.QLabel("How many best-case feasible paths "
"would you like to generate?")
elif winType == "Random":
numPathsTitle = QtGui.QLabel("How many random feasible paths "
"would you like to generate?")
if winType == "Worst" or winType == "Best":
if (requireOvercompleteBasis):
layout.addWidget(QtGui.QLabel(
"Overcomplete basis generated, using overcomplete-basis "
"extraction"))
else:
self.newExtraction = \
QtGui.QCheckBox("Use Overcomplete-Basis Extraction")
layout.addWidget(self.newExtraction, 2, 0)
# If interested in the longest path, offer both path extraction
# algorithms and move standard buttons one row lower
standardRow = 3
layout.addWidget(numPathsTitle, 0, 0, 1, 3)
fileSelectWidget = caller.mainWindow.fileSelectWidget
currentFile = fileSelectWidget.widget().activeLeft.getAnalyzeItem()
if currentFile.analyzer is not None:
maxPaths = QtGui.QLabel("Maximum of %i" %
currentFile.analyzer.dag.numPaths)
layout.addWidget(maxPaths, 1, 1)
self.layout = layout
self.setLayout(self.layout)
#Add standard buttons here at the bottom
okButton = QtGui.QPushButton("OK")
cancelButton = QtGui.QPushButton("Cancel")
self.layout.addWidget(okButton, standardRow, 1)
self.layout.addWidget(cancelButton, standardRow, 2)
okButton.clicked.connect(self.on_accept)
cancelButton.clicked.connect(self.on_reject)
self.show()
def on_accept(self):
try:
self.caller.numPaths = int(str(self.numPathsEdit.text()))
if (hasattr(self, 'newExtraction')):
self.caller.useObExtraction = self.newExtraction.isChecked()
except ValueError:
self.caller.mainWindow.printToConsole("Please enter a valid "
"integer value.")
return
self.accept()
def on_reject(self):
self.caller.mainWindow.printToConsole("Generation of worst "
"cases cancelled.")
self.reject()
class AllPathsDialog(QtGui.QDialog):
# If the parameter `requireOvercompleteBasis` is True then the dialog
# does not give the user a choice to choose OB_EXTRACTION flag, but sets
# it the flag to True by default
def __init__(self, caller, requireOvercompleteBasis=False):
QtGui.QDialog.__init__(self)
self.caller = caller
self.caller.useObExtraction = requireOvercompleteBasis
self.setWindowTitle("Generating all feasible paths")
layout = QtGui.QGridLayout()
layout.setSpacing(5)
fileSelectWidget = caller.mainWindow.fileSelectWidget
currentFile = fileSelectWidget.widget().activeLeft.getAnalyzeItem()
layout.addWidget(QtGui.QLabel("WARNING: This may take a long time, as "
"there are %i possible paths that can be "
"generated.\n Are you sure you wish to "
"continue?"
% currentFile.analyzer.dag.numPaths),
0, 0)
if (requireOvercompleteBasis):
layout.addWidget(QtGui.QLabel(
"Overcomplete basis generated, using overcomplete-basis "
"extraction"))
else:
self.newExtraction = \
QtGui.QCheckBox("Use Overcomplete-Basis Extraction")
layout.addWidget(self.newExtraction)
self.layout = layout
self.setLayout(self.layout)
#Add standard buttons here at the bottom
okButton = QtGui.QPushButton("OK")
cancelButton = QtGui.QPushButton("Cancel")
self.layout.addWidget(okButton, 2, 1)
self.layout.addWidget(cancelButton, 2, 2)
okButton.clicked.connect(self.on_accept)
cancelButton.clicked.connect(self.on_reject)
self.show()
def on_accept(self):
try:
if (hasattr(self, 'newExtraction')):
self.caller.useObExtraction = self.newExtraction.isChecked()
except ValueError:
return
self.accept()
def on_reject(self):
self.caller.mainWindow.printToConsole(
"Generation of all feasible paths cancelled.")
self.reject()
class HistogramDialog(QtGui.QDialog):
def __init__(self, caller):
QtGui.QDialog.__init__(self)
self.mainWindow = caller
self.setWindowTitle("Generate Histogram")
layout = QtGui.QGridLayout()
layout.setSpacing(5)
self.shouldChangeLowerBound = True
self.shouldChangeUpperBound = True
#bin size (label: text edit)
#lower bound (label: text edit)
#upper bound (label: text edit)
#paths to use (label: dropdown)
numBinsTitle = QtGui.QLabel("Number of bins")
self.numBinsEdit = QtGui.QLineEdit()
layout.addWidget(numBinsTitle, 0, 0)
layout.addWidget(self.numBinsEdit, 0, 1)
self.numBinsEdit.setFixedWidth(50)
lowerBoundTitle = QtGui.QLabel("Lower bound")
self.lowerBoundEdit = QtGui.QLineEdit()
layout.addWidget(lowerBoundTitle, 1, 0)
layout.addWidget(self.lowerBoundEdit, 1, 1)
self.lowerBoundEdit.setFixedWidth(50)
QtCore.QObject.connect(self.lowerBoundEdit,
QtCore.SIGNAL("textChanged(QString)"),
self.lowerBoundChanged)
upperBoundTitle = QtGui.QLabel("Upper bound")
self.upperBoundEdit = QtGui.QLineEdit()
layout.addWidget(upperBoundTitle, 1, 2)
layout.addWidget(self.upperBoundEdit, 1, 3)
self.upperBoundEdit.setFixedWidth(50)
QtCore.QObject.connect(self.upperBoundEdit,
QtCore.SIGNAL("textChanged(QString)"),
self.upperBoundChanged)
saveLocationTitle = QtGui.QLabel("Save location")
self.saveLocationEdit = QtGui.QLineEdit()
layout.addWidget(saveLocationTitle, 3, 0)
layout.addWidget(self.saveLocationEdit, 3, 1, 1, 2)
browseButton = QtGui.QPushButton("Browse...")
layout.addWidget(browseButton, 3, 3)
browseButton.clicked.connect(self.browseFile)
pathTitle = QtGui.QLabel("Paths to use")
self.pathEdit = QtGui.QComboBox()
self.pathItems = self.getPathNamesAndItems()
for i, name in enumerate(self.pathItems.keys()):
self.pathEdit.insertItem(i, name)
QtCore.QObject.connect(self.pathEdit,
QtCore.SIGNAL("currentIndexChanged(QString)"),
self.changeBounds)
layout.addWidget(pathTitle, 4, 0)
layout.addWidget(self.pathEdit, 4, 1, 1, 3)
valueText1 = QtGui.QLabel("Use")
self.valueType = QtGui.QComboBox()
self.valueType.insertItem(0, "measured")
self.valueType.insertItem(1, "predicted")
QtCore.QObject.connect(self.valueType,
QtCore.SIGNAL("currentIndexChanged(QString)"),
self.changeBounds)
valueText2 = QtGui.QLabel("values.")
layout.addWidget(valueText1, 5, 0)
layout.addWidget(self.valueType, 5, 1, 1, 2)
layout.addWidget(valueText2, 5, 3)
self.layout = layout
self.setLayout(self.layout)
#Add standard buttons here at the bottom
okButton = QtGui.QPushButton("OK")
cancelButton = QtGui.QPushButton("Cancel")
self.layout.addWidget(okButton, 6, 2)
self.layout.addWidget(cancelButton, 6, 3)
okButton.clicked.connect(self.on_accept)
cancelButton.clicked.connect(self.on_reject)
self.changeBounds(None)
self.show()
def lowerBoundChanged(self, _):
self.shouldChangeLowerBound = False
def upperBoundChanged(self, _):
self.shouldChangeUpperBound = False
def changeBounds(self, _):
if self.valueType.currentText() == "measured":
measured = True
else:
measured = False
pathItems = self.pathItems[str(self.pathEdit.currentText())]
if measured:
pathValues = [item.measuredValue for item in pathItems]
else:
pathValues = [item.predictedValue for item in pathItems]
if self.shouldChangeLowerBound:
self.lowerBoundEdit.setText(str(min(pathValues)))
self.shouldChangeLowerBound = True
if self.shouldChangeUpperBound:
self.upperBoundEdit.setText(str(max(pathValues)))
self.shouldChangeUpperBound = True
def on_accept(self):
try:
errorMessage = "Invalid number of bins."
numBins = int(str(self.numBinsEdit.text()))
if self.valueType.currentText() == "measured":
measured = True
else:
measured = False
errorMessage = "Invalid paths chosen."
pathItems = self.pathItems[str(self.pathEdit.currentText())]
if measured:
pathValues = [item.measuredValue for item in pathItems]
else:
pathValues = [item.predictedValue for item in pathItems]
errorMessage = "Invalid lower bound."
lowerString = str(self.lowerBoundEdit.text())
if lowerString == "":
lower = min(pathValues)
else:
lower = float(lowerString)
errorMessage = "Invalid upper bound."
upperString = str(self.upperBoundEdit.text())
if upperString == "":
upper = max(pathValues)
else:
upper = float(upperString)
errorMessage = "Save location cannot be empty."
histogramLocation = str(self.saveLocationEdit.text())
if histogramLocation == "":
raise Exception("Bad save location")
except Exception:
self.mainWindow.printToConsole(errorMessage)
return
if max(pathValues) > upper or min(pathValues) < lower:
if ((not ConfirmationDialog(
"Some path values are outside of the specified range, and "
"will not be included in the histogram.\n"
"Are you sure you want to continue?"
))).exec_():
return
histogramRange = (lower, upper)
writeHistogramToFile(histogramLocation,
pathItems,
numBins,
histogramRange,
measured)
self.accept()
def on_reject(self):
self.mainWindow.printToConsole("Histogram generation canceled.")
self.reject()
def browseFile(self):
fileDialog = QtGui.QFileDialog()
fileName, _ = fileDialog.getSaveFileName(self, "Save File", ".")
if not fileName:
return
self.saveLocationEdit.setText(fileName)
def getPathNamesAndItems(self):
openFileItems = self.mainWindow.openItems.values()
pathNamesAndValues = {}
for item in openFileItems:
if len(item.children) > 0:
if len(item.basisPaths) > 0:
pathTitle = ("%s: Basis paths (%d)" %
(item.displayName, len(item.basisPaths)))
pathValues = [i.getHighlightPath() for i in item.basisPaths]
pathNamesAndValues[pathTitle] = pathValues
if len(item.worstPaths) > 0:
pathTitle = ("%s: Worst-case paths (%d)" %
(item.displayName, len(item.worstPaths)))
pathValues = [i.getHighlightPath() for i in item.worstPaths]
pathNamesAndValues[pathTitle] = pathValues
if len(item.bestPaths) > 0:
pathTitle = ("%s: Best-case paths (%d)" %
(item.displayName, len(item.bestPaths)))
pathValues = [i.getHighlightPath() for i in item.bestPaths]
pathNamesAndValues[pathTitle] = pathValues
if len(item.randomPaths) > 0:
pathTitle = ("%s: Random paths (%d)" %
(item.displayName, len(item.randomPaths)))
pathValues = [i.getHighlightPath() for i
in item.randomPaths]
pathNamesAndValues[pathTitle] = pathValues
if len(item.allPaths) > 0:
pathTitle = ("%s: All paths (%d)" %
(item.displayName, len(item.allPaths)))
pathValues = [i.getHighlightPath() for i in item.allPaths]
pathNamesAndValues[pathTitle] = pathValues
return pathNamesAndValues
class XmlFileDialog(QtGui.QDialog):
def __init__(self, caller, projectConfig):
super(XmlFileDialog, self).__init__()
self.setWindowTitle("Project Configuration")
self.mainWindow = caller
# TODO: May change/make scrollable later.
# self.resize(300, 500)
self.layout = QtGui.QGridLayout(self)
self.layout.setSpacing(5)
textFieldNames = [
"location",
"analysis-function",
"start-label",
"end-label",
"include",
"merge",
"inline",
"ilp-solver",
"smt-solver",
]
textFieldNamesToTitles = {
"location": "Location of C file",
"analysis-function": "Function to analyze",
"start-label": "Label to start analysis at",
"end-label": "Label to end analysis at",
"include": "Paths of directories to include",
"merge": "Paths of files to merge",
"inline": "Names of functions to inline",
"ilp-solver": "ILP solver",
"smt-solver": "SMT solver",
}
booleanFieldNames = []#["unroll-loops"]
booleanFieldNamesToTitles = {
# "detect-loops",
# "unroll-loops": "Unroll loops in the code",
# "draw-cfg",
# "dump-instruction-trace",
# "dump-ir",
# "dump-path",
# "dump-z3",
# "keep-cil-temps"
}
self.textFields = textFields = {}
self.booleanFields = booleanFields = {}
self.projectConfig = projectConfig
fieldVals = {
"location": projectConfig.locationOrigFile,
"analysis-function": projectConfig.func,
"start-label": projectConfig.startLabel,
"end-label": projectConfig.endLabel,
"smt-solver": str(projectConfig.smtSolver),
"ilp-solver": pulpHelper.getIlpSolverName(projectConfig.ilpSolver),
"include": " ".join(projectConfig.included),
"inline": " ".join(projectConfig.inlined),
"unroll-loops":projectConfig.UNROLL_LOOPS
}
for textFieldNum, textFieldName in enumerate(textFieldNames):
fieldTitle = QtGui.QLabel(textFieldNamesToTitles[textFieldName])
valueText = fieldVals.get(textFieldName, "")
if textFieldName == "smt-solver":
fieldEdit = QtGui.QComboBox()
entries = [
"z3",
"boolector",
"boolector-lingeling",
"boolector-minisat",
"boolector-picosat"
]
for entryNum, entry in enumerate(entries):
fieldEdit.insertItem(entryNum, entry)
if valueText == entry:
fieldEdit.setCurrentIndex(entryNum)
elif textFieldName == "ilp-solver":
fieldEdit = QtGui.QComboBox()
entries = [
"",
"cbc",
"cbc-pulp",
"cplex",
"glpk",
"gurobi",
"xpress"
]
for entryNum, entry in enumerate(entries):
fieldEdit.insertItem(entryNum, entry)
if valueText == entry:
fieldEdit.setCurrentIndex(entryNum)
else:
fieldEdit = QtGui.QLineEdit(valueText)
textFields[textFieldName] = fieldEdit
self.layout.addWidget(fieldTitle, textFieldNum + 1, 0)
self.layout.addWidget(fieldEdit, textFieldNum + 1, 1)
numTextFields = len(textFieldNamesToTitles)
for booleanFieldNum, booleanFieldName in enumerate(booleanFieldNames):
fieldTitle = QtGui.QLabel(
booleanFieldNamesToTitles[booleanFieldName]
)
fieldEdit = QtGui.QCheckBox()
fieldEdit.setChecked(fieldVals.get(booleanFieldName, False))
booleanFields[booleanFieldName] = fieldEdit
self.layout.addWidget(
fieldTitle,
numTextFields + booleanFieldNum + 1,
0
)
self.layout.addWidget(
fieldEdit,
numTextFields + booleanFieldNum + 1,
1
)
numBooleanFields = len(booleanFieldNamesToTitles)
# fieldTitle = QtGui.QLabel("Location of project configuration "
# "XML file")
# valueText = self.xmlLocation if isXmlFile else ""
# self.xmlNameEdit = QtGui.QLineEdit(valueText)
# self.layout.addWidget(
# fieldTitle,
# numTextFields + numBooleanFields + 1,
# 0
# )
# self.layout.addWidget(
# self.xmlNameEdit,
# numTextFields + numBooleanFields + 1,
# 1
# )
# browseButton = QtGui.QPushButton("Browse...")
# self.layout.addWidget(
# browseButton,
# numTextFields + numBooleanFields + 2,
# 1
# )
# browseButton.clicked.connect(self.browseFile)
# OK/Cancel buttons. OK is linked to the on_accept slot when
# pressed, and Cancel to the on_reject slot.
buttonBox = QtGui.QDialogButtonBox(Qt.Horizontal, self)
buttonBox.setStandardButtons(
QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok
)
buttonBox.accepted.connect(self.on_accept)
buttonBox.rejected.connect(self.on_reject)
self.layout.addWidget(
buttonBox,
numTextFields + numBooleanFields + 3,
0
)
def browseFile(self):
# DEPRECATED.
baseDirectory = os.path.dirname(self.xmlLocation)
fileName, _ = QtGui.QFileDialog.getSaveFileName(
self,
"Save File",
baseDirectory
)
if not fileName:
return
self.xmlNameEdit.setText(fileName)
def on_accept(self):
textFields = self.textFields
for textField in textFields:
if textField in ["smt-solver", "ilp-solver"]:
fieldVal = str(textFields[textField].currentText())
else:
fieldVal = str(textFields[textField].text())
if textField == "location":
locationFile = fieldVal
elif textField == "analysis-function":
func = fieldVal
elif textField == "start-label":
startLabel = fieldVal
elif textField == "end-label":
endLabel = fieldVal
elif textField == "smt-solver":
smtSolverName = fieldVal
elif textField == "ilp-solver":
ilpSolverName = fieldVal
elif textField == "include":
if fieldVal != "":
# TODO (jkotker): Be smarter about relative locations.
# projectConfigDir = \
# os.path.dirname(os.path.abspath(self.xmlLocation))
included = getFilePaths(fieldVal) # , projectConfigDir)
else:
included = []
elif textField == "inline":
inlined = (getFuncNames(fieldVal) if fieldVal != "" else [])
# booleanFields = self.booleanFields
# for booleanField in booleanFields:
# fieldVal = booleanFields[booleanField].isChecked()
# if booleanField == "unroll-loops":
# unrollLoops = fieldVal
self.projectConfig = ProjectConfiguration(
locationFile=locationFile,
func=func,
smtSolverName=smtSolverName,
startLabel=startLabel,
endLabel=endLabel,
included=included,
inlined=inlined,
# unrollLoops=unrollLoops,
ilpSolverName=ilpSolverName
)
# self.fileName = str(self.xmlNameEdit.text())
# if self.fileName == "":
# self.mainWindow.printToConsole("Please provide a location "
# "where this project will be saved.")
# return
# self.projectConfig.writeToXmlFile(self.fileName)
self.accept()
def on_reject(self):
self.reject()
class LoopBoundsDialog(QtGui.QDialog):
"""Assumes that loops have been detected and
the loop config file has been generated."""
def __init__(self, caller):
QtGui.QDialog.__init__(self)
self.setWindowTitle("Loop Bounds")
self.mainWindow = caller
self.resize(300, 200)
self.layout = QtGui.QGridLayout(self)
self.layout.setSpacing(5)
fileSelectWidget = self.mainWindow.fileSelectWidget
self.currentFile = fileSelectWidget.widget().activeLeft.getAnalyzeItem()
self.projectConfig = self.currentFile.projectConfig
self.loopConfigFile = os.path.join(self.projectConfig.locationTempDir,
config.TEMP_LOOP_CONFIG)
#Do we want an import for this as well?
boundDirective = "Fill in the loop bounds below"
boundDirectiveLabel = QtGui.QLabel(boundDirective, self)
self.layout.addWidget(boundDirectiveLabel, 0, 0)
buttonBox = QtGui.QDialogButtonBox(Qt.Horizontal, self)
buttonBox.setStandardButtons(
QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok
)
buttonBox.accepted.connect(self.on_accept)
buttonBox.rejected.connect(self.on_reject)
self.layout.addWidget(buttonBox, 1, 0)
self.loopInfo = self.readLoopConfig()
self.boundEdits = []
for i, info in enumerate(self.loopInfo):
# fileName, lineNumber, currentValue, loopHeader = info
fileName, lineNumber, currentValue = info
loopFile = QtGui.QLabel(fileName)
lineLabel = QtGui.QLabel(lineNumber)
boundEdit = QtGui.QLineEdit(str(currentValue))
# headerLabel = QtGui.QLabel(loopHeader)
self.boundEdits.append(boundEdit)
self.layout.addWidget(loopFile, i+2, 0)
self.layout.addWidget(lineLabel, i+2, 1)
self.layout.addWidget(boundEdit, i+2, 2)
# self.layout.addWidget(headerLabel, 2*i+3, 0)
def on_accept(self):
#Check that loop bounds have been filled in and write them to the file
#then re-run the analyzer
loopBounds = []
try:
for boundEdit in self.boundEdits:
loopBounds.append(int(str(boundEdit.text())))
except ValueError:
self.mainWindow.printToConsole("Please input integer "
"values only.")
return
self.setLoopBounds(loopBounds)
if self.writeLoopConfig():
self.projectConfig.UNROLL_LOOPS = True
self.accept()
else:
self.mainWindow.printToConsole("There was a problem creating "
"the loop configuration file.")
def on_reject(self):
self.reject()
def setLoopBounds(self, newBounds):
newLoopInfo = []
for i, oldInfo in enumerate(self.loopInfo):
newInfo = (oldInfo[0], oldInfo[1], newBounds[i])
newLoopInfo.append(newInfo)
self.loopInfo = newLoopInfo
def readLoopConfig(self):
# currentLoop = []
loopInfo = []
with open(self.loopConfigFile, "r") as loopConfigReader:
for line in loopConfigReader:
# if line[0] == "#":
# currentLoop.append(line[1:])
# else:
fileName, lineNumber, value = line.split(",")
intValue = int(value)
loopInfo.append((fileName, str(int(lineNumber)), intValue))
return loopInfo
def writeLoopConfig(self):
loopOutput = ""
for info in self.loopInfo:
# fileName, lineNumber, currentValue, loopHeader = info
loopOutput += ("%s,%s,%i\n"%tuple(info))
# "#%s\n" %tuple(info))
try:
with open(self.loopConfigFile, "w") as loopConfigWriter:
loopConfigWriter.write(loopOutput)
except Exception:
return False
return True
class BasisValuesDialog(QtGui.QDialog):
"""
@param caller {Object} The caller of this class.
"""
def __init__(self, caller):
QtGui.QDialog.__init__(self)
self.setWindowTitle("Basis Values")
self.mainWindow = caller
self.resize(300, 200)
self.layout = QtGui.QGridLayout(self)
self.layout.setSpacing(5)
fileSelectWidget = self.mainWindow.fileSelectWidget
self.currentFile = fileSelectWidget.widget().activeLeft.getAnalyzeItem()
# Set title of dialog.
# title = "Basis Values"
# self.taskTitleLabel = QtGui.QLabel(title, self)
# self.layout.addWidget(self.taskTitleLabel, 0, 1)
importDirective = "Fill in the basis values below, or"
importDirectiveLabel = QtGui.QLabel(importDirective, self)
self.layout.addWidget(importDirectiveLabel, 0, 0)
importPathsButton = QtGui.QPushButton("Import from file...")
importPathsButton.clicked.connect(self.importBasisDialog)
self.layout.addWidget(importPathsButton, 0, 1)
# OK/Cancel buttons. OK is linked to the on_accept slot when
# pressed, and Cancel to the on_reject slot.
buttonBox = QtGui.QDialogButtonBox(Qt.Horizontal, self)
buttonBox.setStandardButtons(
QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok
)
buttonBox.accepted.connect(self.on_accept)
buttonBox.rejected.connect(self.on_reject)
self.layout.addWidget(buttonBox, 1, 0, 1, 2)
# pathTitles = []
self.pathEdits = []
self.numPaths = self.currentFile.numBasisPaths
if self.numPaths == 0:
self.mainWindow.printToConsole(
"You need to generate the basis paths first."
)
return
for i in range(self.numPaths):
pathTitle = QtGui.QLabel("Path %d" % (i+1))
valueText = ""
if self.currentFile.basisValues != []:
valueText = str(self.currentFile.basisValues[i])
pathEdit = QtGui.QLineEdit(valueText)
self.pathEdits.append(pathEdit)
self.layout.addWidget(pathTitle, i+2, 0)
self.layout.addWidget(pathEdit, i+2, 1)
def importBasisDialog(self):
analyzer = self.currentFile.analyzer
projectConfig = analyzer.projectConfig
fileDialog = QtGui.QFileDialog()
fileDialog.setFileMode(QtGui.QFileDialog.ExistingFile)
valueFile, _ = fileDialog.getOpenFileName(
self,
"Open File",
projectConfig.locationOrigDir
)
if not valueFile:
self.mainWindow.printToConsole("No file selected.")
return
try:
analyzer.loadBasisValuesFromFile(valueFile)
basisPaths = analyzer.basisPaths
for i, basisPath in enumerate(basisPaths):
self.pathEdits[i].setText(unicode(basisPath.getMeasuredValue()))
except GameTimeError:
self.mainWindow.printToConsole("Basis values file is not "
"properly formatted.")
def on_accept(self):
basisValues = []
try:
for value in self.pathEdits:
basisValues.append(float(str(value.text())))
except ValueError:
self.mainWindow.printToConsole("Please input numerical "
"values only.")
return
# analyzer = self.currentFile.analyzer
for i, value in enumerate(basisValues):
self.currentFile.analyzer.basisPaths[i].setMeasuredValue(value)
# # Write basis values to file
# basisValuesFile = \
# self.currentFile.analyzer.writeBasisValuesToFile(basisValues)
# self.currentFile.importBasisValues(basisValuesFile, basisValues)
# if self.mainWindow.fileSelectWidget.widget().activeRight:
# self.mainWindow.fileSelectWidget.widget().
# activeRight.setAsRightView()
self.accept()
def on_reject(self):
self.reject()
class BasisGenerationDialog(QtGui.QDialog):
def __init__(self, caller, maximumErrorScaleFactor):
QtGui.QDialog.__init__(self)
self.caller = caller
self.overcompleteBasisEdit = QtGui.QCheckBox("Generate overcomplete basis")
self.overcompleteBasisEdit.setChecked(False)
self.errorScaleFactorEdit = QtGui.QLineEdit()
self.errorScaleFactorEdit.setText(str(maximumErrorScaleFactor))
self.errorScaleFactorEdit.setReadOnly(True)
self.setWindowTitle("Generate Basis")
layout = QtGui.QGridLayout()
layout.setSpacing(5)
layout.addWidget(self.overcompleteBasisEdit, 0, 0, 1, 1)
layout.addWidget(self.errorScaleFactorEdit, 1, 1)
descriptionLabel = QtGui.QLabel("Maximum Error Scale Factor:")
layout.addWidget(descriptionLabel, 1, 0)
self.layout = layout
self.setLayout(self.layout)
self.overcompleteBasisEdit.clicked.connect(self.toggleActive)
#Add standard buttons here at the bottom
okButton = QtGui.QPushButton("OK")
cancelButton = QtGui.QPushButton("Cancel")
self.layout.addWidget(okButton, 2, 0)
self.layout.addWidget(cancelButton, 2, 1)
okButton.clicked.connect(self.on_accept)
cancelButton.clicked.connect(self.on_reject)
self.show()
def toggleActive(self):
self.errorScaleFactorEdit.setReadOnly(
not self.overcompleteBasisEdit.isChecked())
def on_accept(self):
try:
if float(str(self.errorScaleFactorEdit.text())) < 1:
raise ValueError
self.caller.maximumErrorScaleFactor = \
float(str(self.errorScaleFactorEdit.text()))
self.caller.generateOvercompleteBasis = \
self.overcompleteBasisEdit.isChecked()
except ValueError:
self.caller.mainWindow.printToConsole(
"Please enter a valid "
"floating-point value at least 1.0.")
return
self.accept()
def on_reject(self):
self.caller.mainWindow.printToConsole("Generation of basis "
"cancelled.")
self.reject()
class GenericAnalyzer(object):
"""Function called when a slot to find a feasible path is called.
GenericAnalyzer takes an enumCommand and creates a GenericDialog.
After determining which file to analyze from the GenericDialog,
GenericAnalyzer tries to create an Analyzer object for the associated
instance. If creating the Analyzer object succeeds, GenericAnalyzer
calls the function callEnumCommand. callEnumCommand then creates
the files for each Path object the Analyzer object generates.
"""
def __init__(self, enumCommand, mainWin):
self.mainWindow = mainWin
self.enumCommand = enumCommand
self.numPaths = 1
self.useObExtraction = False
def exec_(self):
leftTextEdit = self.mainWindow.leftTextEdit
itemToAnalyze = leftTextEdit.fileItemObject.getAnalyzeItem()
if not itemToAnalyze or not itemToAnalyze.canAnalyze:
# No file has been selected or the file can not be analyzed.
self.mainWindow.printToConsole("Not a valid file to analyze.")
else:
if ((itemToAnalyze.analyzer is None or
self.enumCommand == 0)):
try:
# Create a new analyzer every time the user wants
# to generate the basis paths.
itemToAnalyze.makeAnalyzer()
except:
self.mainWindow.printToConsole("Not a valid file "
"to analyze.")
itemToAnalyze.setAnalyze(False)
return self.callEnumCommand(itemToAnalyze)
def callEnumCommand(self, itemToAnalyze):
"""After an Analyzer object has been created for a given
itemToAnalyze, create the derived files using the generating
function specified by enumCommand.
Arguments:
itemToAnalyze:
FileItem object for which the Path-derived files are
being created.
"""
paths = None
if itemToAnalyze.analyzer is None:
itemToAnalyze.makeAnalyzer()
if self.enumCommand == 0:
# currentName = itemToAnalyze.getFileName()
# if currentName[-11:] == "-gtTEMP.xml":
# itemToAnalyze.fileName = "%s.xml" % currentName[:-11]
# self.writeLabels(itemToAnalyze)
# Add inlining/including/unroll XML updates here.
paths = itemToAnalyze.analyzer.generateBasisPaths()
# if currentName[-11:] == "-gtTEMP.xml":
# itemToAnalyze.fileName = "%s.xml" % currentName[:-11]
elif self.enumCommand == 1:
paths = itemToAnalyze.analyzer.generatePaths(
self.numPaths, PathType.BEST_CASE, None, self.useObExtraction
)
elif self.enumCommand == 2:
paths = itemToAnalyze.analyzer.generatePaths(
self.numPaths, PathType.WORST_CASE, None, self.useObExtraction
)
elif self.enumCommand == 3:
paths = itemToAnalyze.analyzer.generatePaths(
self.numPaths, PathType.RANDOM
)
elif self.enumCommand == 4:
paths = itemToAnalyze.analyzer.generatePaths(
self.numPaths, PathType.ALL_DECREASING, None,
self.useObExtraction
)
elif self.enumCommand == 5:
paths = itemToAnalyze.analyzer.generatePaths(
self.numPaths, PathType.ALL_INCREASING, None,
self.useObExtraction
)
else:
return
# The rest is handled in WorkerThread in gui.py.
return paths
class Highlighter(object):
"""
Wrapper class that calls GenericDialog on the enumCommand 3.
The reason this class needs a wrapper is because all callers of
GenericDialog must have a mainWindow attribute. Since the caller of
GenericDialog for the highlighting case is the mainWindow itself,
a wrapper class must be created.
"""
def __init__(self, mainWindow):
self.mainWindow = mainWindow
def exec_(self):
self.mainWindow.rightTextEdit.displayHighlights()
class Saver(object):
def __init__(self, mainWindow, fileItemToSave):
self.mainWindow = mainWindow
self.fileItemToSave = fileItemToSave
def exec_(self):
projectConfig = self.fileItemToSave.projectConfig
fileDialog = QtGui.QFileDialog()
choice, _ = fileDialog.getSaveFileName(
self.mainWindow,
"Save GameTime GUI state to...",
projectConfig.locationOrigDir,
"GameTime GUI state files (*.gtg)"
)
if not choice:
self.mainWindow.printToConsole("No file was selected.")
return False
with bz2.BZ2File(choice, "w") as fileHandler:
# Save the fileList and mainWindow, which are not serializable,
# to be restored later.
tempWin = self.fileItemToSave.mainWindow
tempList = self.fileItemToSave.fileList
tempIndex = self.fileItemToSave.displayIndex
self.fileItemToSave.mainWindow = None
self.fileItemToSave.fileList = None
self.fileItemToSave.displayIndex = None
childIndexes = []
for child in self.fileItemToSave.children:
childIndexes.append(child.displayIndex)
child.displayIndex = None
child.mainWindow = None
child.fileList = None
# Dump the FileItem.
pickle.dump(self.fileItemToSave, fileHandler)
originalName = self.fileItemToSave.originalName
self.mainWindow.printToConsole(
"Current state of the analysis of the file "
"located at %s was saved to %s." % (originalName, choice)
)
# Restore the mainWindow and fileList.
self.fileItemToSave.mainWindow = tempWin
self.fileItemToSave.fileList = tempList
self.fileItemToSave.displayIndex = tempIndex
for i, child in enumerate(self.fileItemToSave.children):
child.displayIndex = childIndexes[i]
child.mainWindow = tempWin
child.fileList = tempList
return True
class Loader(object):
def __init__(self, mainWindow):
self.mainWindow = mainWindow
def exec_(self):
fileDialog = QtGui.QFileDialog()
fileDialog.setFileMode(QtGui.QFileDialog.ExistingFile)
choice, _ = fileDialog.getOpenFileName(
self.mainWindow,
"Load GameTime GUI state from...",
".",
"GameTime GUI state files (*.gtg)"
)
if not choice:
self.mainWindow.printToConsole("No file was selected.")
return False
with bz2.BZ2File(choice, "r") as fileHandler:
try:
fileItemToLoad = pickle.load(fileHandler)
except Exception:
self.mainWindow.printToConsole("Invalid state file: %s" %
choice)
return False
else:
fileSelect = self.mainWindow.fileSelectWidget.widget()
toLoadDisplayName = fileItemToLoad.displayName
if toLoadDisplayName in self.mainWindow.openItems:
if ((not ConfirmationDialog(
"There is currently an analysis for the file "
"named %s loaded in the GUI.\nDo you want to "
"overwrite that analysis?" % toLoadDisplayName
))).exec_():
return False
else:
fileSelect.removeGroup(
self.mainWindow.openItems[toLoadDisplayName]
)
fileItemToLoad.mainWindow = self.mainWindow
fileItemToLoad.fileList = fileSelect
fileItemToLoad.addToMainWindow()
self.mainWindow.addToWindow(fileItemToLoad, Window.LEFT)
for child in fileItemToLoad.children:
child.mainWindow = self.mainWindow
child.fileList = fileItemToLoad.fileList
child.addToMainWindow()
self.mainWindow.addToWindow(child, Window.RIGHT)
self.mainWindow.printToConsole(
"State of the analysis of the file "
"located at %s was loaded from %s." %
(fileItemToLoad.originalName, choice)
)
return True
class ConfirmationDialog(QtGui.QDialog):
def __init__(self, text):
QtGui.QDialog.__init__(self)
self.setWindowTitle("Confirmation")
# self.resize(300, 200)
self.layout = QtGui.QGridLayout(self)
self.layout.setSpacing(5)
confirmText = QtGui.QLabel(text)
self.layout.addWidget(confirmText, 0, 0)
buttonBox = QtGui.QDialogButtonBox(Qt.Horizontal, self)
buttonBox.setStandardButtons(
QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok
)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
self.layout.addWidget(buttonBox, 1, 0)
class MessageDialog(QtGui.QDialog):
def __init__(self, text, title="Message"):
super(MessageDialog, self).__init__()
self.setWindowTitle(title)
# self.resize(300, 200)
self.layout = QtGui.QGridLayout(self)
self.layout.setSpacing(5)
confirmText = QtGui.QLabel(text)
self.layout.addWidget(confirmText, 0, 0, 1, 2)
buttonBox = QtGui.QDialogButtonBox(Qt.Horizontal, self)
buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
buttonBox.accepted.connect(self.accept)
self.layout.addWidget(buttonBox, 1, 0)
class BasisMessageDialog(MessageDialog):
def __init__(self, text, mainWindow, title="Message"):
self.mainWindow = mainWindow
super(BasisMessageDialog, self).__init__(text, title)
enterValuesButton = QtGui.QPushButton("Enter basis values...")
enterValuesButton.clicked.connect(self.basisValues)
self.layout.addWidget(enterValuesButton, 1, 1)
def basisValues(self):
self.accept()
self.mainWindow.slotBasisValuesDialog()
class ExceptionMessageBox(QtGui.QMessageBox):
def __init__(self, message, detailedMessage):
super(ExceptionMessageBox, self).__init__(
QtGui.QMessageBox.Critical,
unicode("Error"),
unicode("An exception has occurred.")
)
self.setInformativeText(message)
self.setDetailedText(detailedMessage)
```
#### File: src/gui/gui.py
```python
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
import os
import subprocess
import sys
import threading
import webbrowser
from PySide import QtCore
from PySide import QtGui
from PySide.QtCore import Qt
from PySide.QtCore import Signal
import gametime.gui.guiHelper
from gametime.gui.guiHelper import BasisValuesDialog
from gametime.gui.guiHelper import LoopBoundsDialog
from gametime.gui.guiHelper import ConfirmationDialog
from gametime.gui.guiHelper import MessageDialog
from gametime.gui.guiHelper import BasisMessageDialog
from gametime.gui.guiHelper import BasisGenerationDialog
from gametime.gui.guiHelper import ExceptionMessageBox
from gametime.gui.guiHelper import HistogramDialog
from gametime.gui.guiHelper import FileItem
from gametime.gui.guiHelper import FileSelectList
from gametime.gui.guiHelper import GenericAnalyzer
from gametime.gui.guiHelper import Highlighter
from gametime.gui.guiHelper import Loader
from gametime.gui.guiHelper import NumPathsDialog
from gametime.gui.guiHelper import AllPathsDialog
from gametime.gui.guiHelper import Saver
from gametime.gui.guiHelper import TextEditObject
from gametime.gui.guiHelper import Window
from gametime.gui.guiHelper import XmlFileDialog
from gametime.projectConfiguration import ProjectConfiguration
from gametime.projectConfiguration import readProjectConfigFile
from gametime.updateChecker import isUpdateAvailable
class GameTimeGui(QtGui.QMainWindow):
"""
The GUI main window. Inherits QtGui.QMainWindow. Maintains any actions
dealing with the menubar, console, and adding/deleting widgets.
"""
def __init__(self):
"""
Initializes the main window and creates all the necessary widgets in
the main window: a menubar, a file selection sidebar, a status console,
two text displays, and a status bar.
A quick note: All main windows have a centralWidget, a menuBar
(accessible through self.menuBar()), and optional dockWidgets.
DockWidgets must be created as dockWidgets with empty content, and
widgets like QtGui.QTextEdit and QListView are substituted in for
content.
"""
super(GameTimeGui, self).__init__()
self.setWindowTitle("GameTime")
self.showFullScreen()
self.showMaximized()
self.tempFiles = set([])
self.guiThread = threading.currentThread()
sys.excepthook = self.handleException
### PARAMETERS ###
#: Left and right text display widgets, respectively.
self.leftTextEdit = None
self.rightTextEdit = None
#: File select widget.
self.fileSelectWidget = None
#: Console dock widget.
self.consoleWidget = None
#: Queue of functions to analyze. This allows slots to run
#: prerequisite slots while still allowing the user to have
#: control over the GUI.
self.funcQueue = []
#: Cache of all currently open FileItem objects. These will all be
#: displayed in the leftmost display.
#: Key: {string} Name of a unique file.
#: Value: {FileItem} FileItem object that corresponds to a unique file.
self.openItems = {}
#: List of actions that should be disabled while analysis is running.
self.analysisActions = []
#: List of actions that can be performed if overcomplete basis was
#: generated
self.overcompleteSupportedActions = []
#: Specifies whether an overcomplete basis has been generated, so that
#: we show only menu items that can handle overcomplete basis
self.generatedOvercompleteBasis = False
### SETUP ###
#: Set up text displays.
self.setupCenterLayout()
#: Set up file select widget, console widget and menubar widget.
self.setupFileSelect()
self.setupConsole()
self.setupMenubar()
#: Show status bar. In this case, the status bar serves more as an
#: informational bar about certain actions, such as menubar action
#: descriptions.
self.statusBar().showMessage("Ready")
#: Toggle for highlights (default on).
self.highlightsEnabled = True
self.analysisThread = WorkerThread(self, None)
threadSignals = self.analysisThread.signals
threadSignals.doneAnalyzing.connect(self.slotFinishAnalysis)
threadSignals.showLoopDialog.connect(self.slotShowLoopDialog)
threadSignals.printToConsole.connect(self.printToConsole)
threadSignals.showException.connect(self.showException)
self.showExceptionSignal = threadSignals.showException
self._checkForAvailableUpdates()
def handleException(self, eType, eInstance, tb):
import traceback
className = "%s: " % eType.__name__
message = eInstance.message
stackTrace = "".join(traceback.format_tb(tb))
detailedTrace = "%s%s%s" % (stackTrace, className, message)
if self.guiThread == threading.currentThread():
self.showException(message, detailedTrace)
else:
self.showExceptionSignal.emit(message, detailedTrace)
def showException(self, message, detailedTrace):
ExceptionMessageBox(message,
("Traceback (most recent call last):\n%s" %
detailedTrace)).exec_()
def setupCenterLayout(self):
"""
The centralWidget here is actually a generic Widget with
a QtGui.QHBoxLayout (horizontal layout) of two TextEditObjects
placed side-by-side. Documentation and code for TextEditObject
is available in guiHelper.py.
Initializes leftTextEdit and rightTextEdit.
"""
centerWindow = QtGui.QWidget()
centerLayout = QtGui.QHBoxLayout()
self.leftTextEdit = TextEditObject("")
self.leftTextEdit.setMainWindow(self)
self.leftTextEdit.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.rightTextEdit = TextEditObject("")
self.rightTextEdit.setMainWindow(self)
self.rightTextEdit.setLineWrapMode(QtGui.QTextEdit.NoWrap)
# Splitter allows for windows to be resized.
centerSplitter = QtGui.QSplitter()
centerSplitter.addWidget(self.leftTextEdit)
centerSplitter.addWidget(self.rightTextEdit)
centerSplitter.setChildrenCollapsible(False)
centerLayout.addWidget(centerSplitter)
centerWindow.setLayout(centerLayout)
centerWindow.show()
self.setCentralWidget(centerWindow)
def setupFileSelect(self):
"""
Sets up fileSelectWidget, the leftmost dockWidget.
This widget displays all the files that have been opened or created
druing execution, such as those opened by the user or created
through GameTime-related actions. Documentation and code
for FileSelectList is available in guiHelper.py.
"""
self.fileSelectWidget = QtGui.QDockWidget("Currently open")
fileSelect = FileSelectList(self)
self.fileSelectWidget.setWidget(fileSelect)
self.addDockWidget(Qt.DockWidgetArea(Qt.LeftDockWidgetArea),
self.fileSelectWidget)
def setupConsole(self):
"""
Set up consoleWidget, the dock widget at the bottom of the main window.
consoleWidget contains a read-only console, which displays errors,
file analysis statuses, and so on. Creates self.consoleWidget.
"""
self.consoleWidget = QtGui.QDockWidget("Console")
console = QtGui.QTextEdit()
console.setReadOnly(True)
self.consoleWidget.setWidget(console)
self.addDockWidget(Qt.DockWidgetArea(Qt.BottomDockWidgetArea),
self.consoleWidget)
def setupMenubar(self):
"""Creates the menu bar."""
menubar = self.menuBar()
fileMenu = menubar.addMenu("&File")
openAction = QtGui.QAction("Open project...", self)
openAction.setShortcut("Ctrl+O")
openAction.setStatusTip("Open a C file or open an XML file to "
"configure a project for GameTime analysis.")
openAction.triggered.connect(self.slotOpenProjectDialog)
loadAction = QtGui.QAction("Load state...", self)
loadAction.setShortcut("Ctrl+L")
loadAction.setStatusTip("Load the saved state of the GameTime GUI for "
"a GameTime analysis from a previous session.")
loadAction.triggered.connect(self.slotLoadStateDialog)
saveAction = QtGui.QAction("Save state...", self)
saveAction.setShortcut("Ctrl+S")
saveAction.setStatusTip("Save the state of the GameTime GUI for "
"the current GameTime analysis.")
saveAction.triggered.connect(self.slotSaveStateDialog)
resetAction = QtGui.QAction("Reset state", self)
resetAction.setStatusTip("Resets the state of the GameTime GUI for "
"the current GameTime analysis.")
resetAction.triggered.connect(self.slotResetAction)
self.analysisActions.append(resetAction)
self.overcompleteSupportedActions.append(resetAction)
changeConfigAction = QtGui.QAction("Change configuration...", self)
# changeConfigAction.setShortcut("Ctrl+C")
changeConfigAction.setStatusTip("Change the configuration of "
"the current project.")
changeConfigAction.triggered.connect(self.slotChangeConfigDialog)
saveConfigAction = QtGui.QAction("Save configuration...", self)
# saveConfigAction.setShortcut("Ctrl+S")
saveConfigAction.setStatusTip("Save the configuration of the current "
"project to an XML file.")
saveConfigAction.triggered.connect(self.slotSaveConfigDialog)
closeAction = QtGui.QAction("Close project", self)
closeAction.setStatusTip("Close the current project.")
closeAction.triggered.connect(self.slotCloseAction)
self.analysisActions.append(closeAction)
self.overcompleteSupportedActions.append(closeAction)
exitAction = QtGui.QAction("Exit", self)
exitAction.setShortcut("Ctrl+Q")
exitAction.setStatusTip("Exit GameTime.")
exitAction.triggered.connect(QtGui.qApp.quit)
fileMenu.addAction(openAction)
fileMenu.addSeparator()
fileMenu.addAction(loadAction)
fileMenu.addAction(saveAction)
fileMenu.addAction(resetAction)
fileMenu.addSeparator()
fileMenu.addAction(changeConfigAction)
fileMenu.addAction(saveConfigAction)
fileMenu.addSeparator()
fileMenu.addAction(closeAction)
fileMenu.addAction(exitAction)
# runMenu: Generate basis paths and feasible paths.
runMenu = menubar.addMenu("&Run")
basisPathsAction = QtGui.QAction("Generate basis paths", self)
basisPathsAction.setShortcut("Ctrl+B")
# basisPathsAction.setStatusTip()
basisPathsAction.triggered.connect(self.slotFindBasisPaths)
worstCasesAction = QtGui.QAction("Generate worst-case feasible paths",
self)
worstCasesAction.setShortcut("Ctrl+W")
# worstCasesAction.setStatusTip()
worstCasesAction.triggered.connect(self.slotLongestCases)
bestCasesAction = QtGui.QAction("Generate best-case feasible paths",
self)
bestCasesAction.setShortcut("Ctrl+V")
# bestCasesAction.setStatusTip()
bestCasesAction.triggered.connect(self.slotShortestCases)
randomPathsAction = QtGui.QAction("Generate random feasible paths",
self)
randomPathsAction.setShortcut("Ctrl+R")
randomPathsAction.triggered.connect(self.slotFindRandomPaths)
allDecPathsAction = QtGui.QAction("Generate all feasible paths "
"(decreasing order)",
self)
allDecPathsAction.setShortcut("Ctrl+D")
allDecPathsAction.triggered.connect(self.slotAllPathsDec)
allIncPathsAction = QtGui.QAction("Generate all feasible paths "
"(increasing order)",
self)
allIncPathsAction.setShortcut("Ctrl+I")
allIncPathsAction.triggered.connect(self.slotAllPathsInc)
# writeWorstAction = QtGui.QAction("Write worst paths to files", self)
# writeWorstAction.triggered.connect(self.slotWriteWorst)
# writeBestAction = QtGui.QAction("Write best paths to files", self)
# writeBestAction.triggered.connect(self.slotWriteBest)
# writeRandomAction = QtGui.QAction("Write random paths to files", self)
# writeRandomAction.triggered.connect(self.slotWriteRandom)
# writeAllIncAction = QtGui.QAction("Write all feasible paths to files "
# "(increasing order)", self)
# writeAllIncAction.triggered.connect(self.slotWriteAllInc)
# writeAllDecAction = QtGui.QAction("Write all feasible paths to files "
# "(decreasing order)", self)
# writeAllDecAction.triggered.connect(self.slotWriteAllDec)
histogramAction = QtGui.QAction("Generate histogram", self)
histogramAction.triggered.connect(self.slotGenerateHistogram)
self.cancelAction = QtGui.QAction("Cancel current analysis", self)
self.cancelAction.triggered.connect(self.slotCancelAction)
self.analysisActions.append(basisPathsAction)
self.analysisActions.append(worstCasesAction)
self.analysisActions.append(bestCasesAction)
self.analysisActions.append(randomPathsAction)
self.analysisActions.append(allDecPathsAction)
self.analysisActions.append(allIncPathsAction)
self.overcompleteSupportedActions.append(basisPathsAction)
self.overcompleteSupportedActions.append(worstCasesAction)
self.overcompleteSupportedActions.append(bestCasesAction)
self.overcompleteSupportedActions.append(allDecPathsAction)
self.overcompleteSupportedActions.append(allIncPathsAction)
self.overcompleteSupportedActions.append(randomPathsAction)
# self.analysisActions.append(writeWorstAction)
# self.analysisActions.append(writeBestAction)
self.analysisActions.append(histogramAction)
runMenu.addAction(basisPathsAction)
runMenu.addSeparator()
runMenu.addAction(worstCasesAction)
runMenu.addAction(bestCasesAction)
runMenu.addSeparator()
runMenu.addAction(randomPathsAction)
runMenu.addAction(allDecPathsAction)
runMenu.addAction(allIncPathsAction)
runMenu.addSeparator()
runMenu.addAction(histogramAction)
runMenu.addSeparator()
# runMenu.addAction(writeWorstAction)
# runMenu.addAction(writeBestAction)
# runMenu.addSeparator()
runMenu.addAction(self.cancelAction)
editMenu = menubar.addMenu("&Edit")
basisValuesAction = QtGui.QAction("Enter basis values...", self)
basisValuesAction.setStatusTip(
"Manually enter values for the basis paths or import "
"a file that contains the values."
)
basisValuesAction.triggered.connect(self.slotBasisValuesDialog)
# cutAction = QtGui.QAction("Add labels...", self)
# cutAction.setStatusTip("Select a smaller section of the code "
# "to analyze.")
# cutAction.triggered.connect(self.slotCutDialog)
saveBasisValuesAction = QtGui.QAction("Save basis values...", self)
saveBasisValuesAction.setStatusTip("Save the basis values to a file.")
saveBasisValuesAction.triggered.connect(self.slotSaveBasisValues)
self.analysisActions.append(basisValuesAction)
self.overcompleteSupportedActions.append(basisValuesAction)
# self.analysisActions.append(cutAction)
editMenu.addAction(basisValuesAction)
# editMenu.addAction(cutAction)
# editMenu.addAction(saveBasisValuesAction) # TODO: Buggy. This too.
# viewMenu: File Select checkable, Console checkable
viewMenu = menubar.addMenu("&View")
highlightPath = QtGui.QAction("&Highlight path", self)
highlightPath.triggered.connect(self.slotTogglePathHighlight)
highlightPath.setCheckable(True)
highlightPath.setChecked(True)
zoomIn = QtGui.QAction("Increase font size", self)
zoomIn.triggered.connect(self.zoomIn)
zoomIn.setShortcut("Ctrl++")
zoomOut = QtGui.QAction("Decrease font size", self)
zoomOut.triggered.connect(self.zoomOut)
zoomOut.setShortcut("Ctrl+-")
viewMenu.addAction(self.fileSelectWidget.toggleViewAction())
viewMenu.addAction(self.consoleWidget.toggleViewAction())
viewMenu.addAction(highlightPath)
viewMenu.addAction(zoomIn)
viewMenu.addAction(zoomOut)
def _checkForAvailableUpdates(self):
from gametime.defaults import config
updateAvailable, latestVersionInfo = isUpdateAvailable()
if updateAvailable:
version = latestVersionInfo["version"]
infoUrl = latestVersionInfo["info_url"]
updateAvailableMsg = ("An updated version of GameTime (%s) is "
"available. The current version is %s." %
(version, config.VERSION))
updateAvailableMsgBox = QtGui.QMessageBox(
QtGui.QMessageBox.Information,
unicode("Update available"),
updateAvailableMsg
)
updateAvailableInfoMsg = ("Would you like to download and install "
"this version?")
updateAvailableMsgBox.setInformativeText(updateAvailableInfoMsg)
buttons = QtGui.QMessageBox.Ok
buttons |= QtGui.QMessageBox.Cancel
updateAvailableMsgBox.setStandardButtons(buttons)
updateAvailableMsgBox.setDefaultButton(QtGui.QMessageBox.Ok)
choice = updateAvailableMsgBox.exec_()
if choice == QtGui.QMessageBox.Ok:
try:
webbrowser.open(infoUrl)
sys.exit(0)
except Exception:
browserNotOpenMsg = ("Unable to open a web browser "
"to display information about "
"the updated version.")
# TODO (jkotker): Display the Exception information.
browserNotOpenMsgBox = QtGui.QMessageBox(
QtGui.QMessageBox.Warning,
unicode("Unable to open web browser"),
browserNotOpenMsg
)
browserNotOpenMsgBox.setInformativeText(
"Please visit %s to download and install "
"the updated version." % infoUrl
)
buttons = QtGui.QMessageBox.Ok
browserNotOpenMsgBox.setStandardButtons(buttons)
browserNotOpenMsgBox.setDefaultButton(QtGui.QMessageBox.Ok)
choice = browserNotOpenMsgBox.exec_()
elif choice == QtGui.QMessageBox.Cancel:
downloadLaterMsg = "Update not installed."
downloadLaterMsgBox = QtGui.QMessageBox(
QtGui.QMessageBox.Warning,
unicode("Update not installed"),
downloadLaterMsg
)
downloadLaterMsgBox.setInformativeText(
"Please visit %s to download and install "
"an updated version of GameTime." % infoUrl
)
buttons = QtGui.QMessageBox.Ok
downloadLaterMsgBox.setStandardButtons(buttons)
downloadLaterMsgBox.setDefaultButton(QtGui.QMessageBox.Ok)
choice = downloadLaterMsgBox.exec_()
elif not latestVersionInfo:
self.printToConsole("Unable to obtain information about "
"available updates.")
self.printToConsole("Please check %s for the latest version of "
"GameTime and for available updates." %
config.WEBSITE_URL)
else:
self.printToConsole("No updates to GameTime are available.")
### HELPER FUNCTIONS ###
def zoomIn(self):
self.changeFontSize(2)
def zoomOut(self):
self.changeFontSize(-2)
def changeFontSize(self, amount):
widgets = [
QtGui.QApplication,
self.rightTextEdit,
self.leftTextEdit,
self.consoleWidget.widget(),
self.fileSelectWidget.widget()
]
for widget in widgets:
currentFont = widget.font()
currentSize = currentFont.pointSize()
if currentSize + amount > 0:
currentFont.setPointSize(currentSize + amount)
widget.setFont(currentFont)
def printToConsole(self, message):
"""Prints the provided message to the console and scrolls the viewport
if the viewport was not already at the bottom of the console.
Arguments:
message:
Message to print.
"""
console = self.consoleWidget.widget()
console.append(str(message))
vertBar = console.verticalScrollBar()
if vertBar.value() == vertBar.maximum():
vertBar.setValue(vertBar.maximum())
def reset(self):
"""Resets the GUI to initial conditions. This is used when
a new file is loaded.
"""
self.fileSelectWidget.widget().clear()
self.leftTextEdit.clear()
self.rightTextEdit.clear()
### SLOTS ###
def slotResetAction(self):
fileSelect = self.fileSelectWidget.widget()
currentFileItem = fileSelect.activeLeft.getAnalyzeItem()
for _ in range(len(currentFileItem.children)):
child = currentFileItem.children[0]
currentFileItem.removeChild(child)
fileSelect.removeItem(child)
def slotOpenProjectDialog(self):
"""Creates a QFileDialog and obtains a file name, which it
then passes to openFile.
"""
fileDialog = QtGui.QFileDialog()
fileDialog.setFileMode(QtGui.QFileDialog.ExistingFile)
fileTypes = "XML files (*.xml);;C files (*.c)"
fileName, _ = fileDialog.getOpenFileName(
self,
"Open File",
".",
fileTypes
)
if not fileName:
self.printToConsole("No file was selected to be opened.")
return
self.openFile(fileName)
def openFile(self, fileName):
"""If the file name provided does not belong to a FileItem
that was previously created, make a new FileItem. Then,
set the new/existing FileItem to the left text display
using addToWindow.
"""
self.fileName = fileName
_, fileNameExt = os.path.splitext(fileName)
if fileNameExt in [".c", ".xml"]:
if fileNameExt == ".xml":
projectConfig = readProjectConfigFile(fileName)
else:
projectConfig = ProjectConfiguration(fileName, "", "z3")
xmlDialog = XmlFileDialog(self, projectConfig)
returnValue = xmlDialog.exec_()
if returnValue == 0:
self.printToConsole("GameTime analysis was cancelled "
"for %s." % fileName)
return
else:
self.printToConsole("GameTime project created and configured "
"from %s." % fileName)
projectConfig = xmlDialog.projectConfig
else:
self.printToConsole("Either a C file or a project configuration "
"XML file was expected.")
return
self._loadFromProjectConfig(projectConfig)
def _loadFromProjectConfig(self, projectConfig):
fileItemToAdd = None
locationOrigFile = projectConfig.locationOrigFile
displayName = projectConfig.nameOrigFile
# If fileName is already in fileSelect,
# bypass loading from disk.
if displayName in self.openItems:
fileItemToAdd = self.openItems[displayName]
else:
# If the fileName has not been encountered yet, create a new
# FileItem object and load the proper data from disk.
try:
origFileHandler = open(locationOrigFile, "r")
except EnvironmentError as err:
errno, strerror = err
self.printToConsole(
"I/O error({0}): {1}.".format(errno, strerror)
)
except Exception as errno:
self.printToConsole("Error({0})".format(errno))
finally:
with origFileHandler:
fileText = origFileHandler.read()
fileItemToAdd = FileItem(displayName,
locationOrigFile,
fileText,
self)
fileItemToAdd.addToMainWindow()
fileItemToAdd.setProjectConfig(projectConfig)
self.addToWindow(fileItemToAdd, Window.LEFT)
def slotLoadStateDialog(self):
loadSelect = Loader(self)
if not loadSelect.exec_():
self.printToConsole("No new GameTime GUI state was loaded.")
def slotSaveStateDialog(self):
"""Opens a QFileDialog that specifies the name to which
to save the file.
"""
currentFile = self.fileSelectWidget.widget().activeLeft.getAnalyzeItem()
saveSelect = Saver(self, currentFile)
if not saveSelect.exec_():
self.printToConsole("No GameTime GUI state was saved.")
def slotChangeConfigDialog(self):
currentFile = self.fileSelectWidget.widget().activeLeft.getAnalyzeItem()
xmlDialog = XmlFileDialog(self, currentFile.projectConfig)
val = xmlDialog.exec_()
if val == 0:
self.printToConsole("No change made to the configuration of "
"the project for the file located at %s." %
currentFile.originalName)
else:
currentFile.setProjectConfig(xmlDialog.projectConfig)
self.printToConsole("Changes successfully made to "
"the configuration of the project for "
"the file located at %s." %
currentFile.originalName)
def slotSaveConfigDialog(self):
"""Creates a QFileDialog and obtains a file location, where
it then saves an XML file that contains the configuration of
the current project.
"""
projectConfig = self.leftTextEdit.fileItemObject.projectConfig
fileDialog = QtGui.QFileDialog()
fileName, _ = fileDialog.getSaveFileName(
self,
"Save project configuration to...",
projectConfig.locationOrigDir,
"XML files (*.xml)"
)
if not fileName:
self.printToConsole("No file was selected to save "
"the configuration of the current project to.")
return
projectConfig.writeToXmlFile(fileName)
self.printToConsole("Configuration of the current project was "
"saved to %s." % fileName)
def slotCloseAction(self):
"""Closes the file currently in the left text display and
any paths or subsections of it.
"""
fileSelect = self.fileSelectWidget.widget()
fileSelect.removeGroup(fileSelect.activeLeft)
def slotFindBasisPaths(self):
"""Starts the generation of basis paths in a new thread,
if possible. This allows the user to interact with the GUI
while the GameTime analysis is running.
"""
if self.leftTextEdit.fileItemObject is None:
self.printToConsole("There is currently no file for which "
"GameTime can generate basis paths.")
return
itemToAnalyze = self.leftTextEdit.fileItemObject.getAnalyzeItem()
if len(itemToAnalyze.children) > 0:
val = ConfirmationDialog("This will delete all paths currently \n"
"generated for this file. Are you sure \n"
"you want to continue?").exec_()
if val == 0:
return
basisAnalyzer = GenericAnalyzer(0, self)
if BasisGenerationDialog(
basisAnalyzer,
itemToAnalyze.projectConfig.MAXIMUM_ERROR_SCALE_FACTOR).exec_() \
==1:
itemToAnalyze.projectConfig.OVER_COMPLETE_BASIS = \
basisAnalyzer.generateOvercompleteBasis
itemToAnalyze.projectConfig.MAXIMUM_ERROR_SCALE_FACTOR = \
basisAnalyzer.maximumErrorScaleFactor
self.generatedOvercompleteBasis = \
basisAnalyzer.generateOvercompleteBasis
else: return
for _ in range(len(itemToAnalyze.children)):
child = itemToAnalyze.children[0]
itemToAnalyze.removeChild(child)
itemToAnalyze.fileList.removeItem(child)
self.disableAnalysis()
self.analysisThread.setAnalyzer(basisAnalyzer)
self.analysisThread.setItem(itemToAnalyze)
self.analysisThread.setFunc(self.findBasisPathsHelper, [basisAnalyzer])
self.analysisThread.start()
def findBasisPathsHelper(self, basisAnalyzer):
"""Function, which is run in the new thread, that handles running
the analyzer to find basis paths.
"""
return basisAnalyzer.exec_()
def slotFindRandomPaths(self):
"""Begins generation of random feasible paths in a new thread,
if possible. This allows the user to interact with the GUI while
analysis is running. The only files that can have random paths are
valid .c files that have been opened by the user.
"""
if self.leftTextEdit.fileItemObject is None:
self.printToConsole("There is currently no file for which GameTime "
"can generate random feasible paths.")
return
itemToAnalyze = self.leftTextEdit.fileItemObject.getAnalyzeItem()
if itemToAnalyze.numBasisPaths == 0:
self.printToConsole("Basis paths have not been generated for "
"this file. Generating the paths now...")
self.slotFindBasisPaths()
self.funcQueue.append(self.slotFindRandomPaths)
return
randomAnalyzer = GenericAnalyzer(3, self)
if NumPathsDialog(randomAnalyzer, "Random").exec_() == 1:
for _ in range(len(itemToAnalyze.randomPaths)):
path = itemToAnalyze.randomPaths[0]
itemToAnalyze.removeChild(path)
itemToAnalyze.fileList.removeItem(path)
self.disableAnalysis()
self.analysisThread.setAnalyzer(randomAnalyzer)
self.analysisThread.setItem(itemToAnalyze)
self.analysisThread.setFunc(self.findPathsHelper, [randomAnalyzer])
self.analysisThread.start()
def slotAllPathsInc(self):
"""Begins generation of all paths, in order of increasing value,
in a new thread, if possible. This allows the user to interact with
the GUI while analysis is running. The only files that can have paths
are valid .c files that have been opened by the user.
"""
if self.leftTextEdit.fileItemObject is None:
self.printToConsole("There is currently no file for which GameTime "
"can generate all feasible paths in order of "
"increasing value.")
return
itemToAnalyze = self.leftTextEdit.fileItemObject.getAnalyzeItem()
if itemToAnalyze.numBasisPaths == 0:
self.printToConsole("Basis paths have not been generated for "
"this file. Generating them now...")
self.slotFindBasisPaths()
self.funcQueue.append(self.slotAllPathsInc)
return
allPathAnalyzer = GenericAnalyzer(5, self)
if AllPathsDialog(allPathAnalyzer,
itemToAnalyze.projectConfig.OVER_COMPLETE_BASIS).exec_() == 1:
for _ in range(len(itemToAnalyze.allPaths)):
path = itemToAnalyze.allPaths[0]
itemToAnalyze.removeChild(path)
itemToAnalyze.fileList.removeItem(path)
self.disableAnalysis()
self.analysisThread.setAnalyzer(allPathAnalyzer)
self.analysisThread.setItem(itemToAnalyze)
self.analysisThread.setFunc(self.findPathsHelper, [allPathAnalyzer])
self.analysisThread.start()
def slotAllPathsDec(self):
"""Begins generation of all paths, in order of decreasing value,
in a new thread, if possible. This allows the user to interact with
the GUI while analysis is running. The only files that can have paths
are valid .c files that have been opened by the user.
"""
if self.leftTextEdit.fileItemObject is None:
self.printToConsole("There is currently no file for which GameTime "
"can generate all feasible paths in order of "
"decreasing value.")
return
itemToAnalyze = self.leftTextEdit.fileItemObject.getAnalyzeItem()
if itemToAnalyze.numBasisPaths == 0:
self.printToConsole("Basis paths have not been generated for "
"this file. Generating them now...")
self.slotFindBasisPaths()
self.funcQueue.append(self.slotAllPathsDe)
return
allPathAnalyzer = GenericAnalyzer(4, self)
if AllPathsDialog(allPathAnalyzer,
itemToAnalyze.projectConfig.OVER_COMPLETE_BASIS).exec_() == 1:
for _ in range(len(itemToAnalyze.allPaths)):
path = itemToAnalyze.allPaths[0]
itemToAnalyze.removeChild(path)
itemToAnalyze.fileList.removeItem(path)
self.disableAnalysis()
self.analysisThread.setAnalyzer(allPathAnalyzer)
self.analysisThread.setItem(itemToAnalyze)
self.analysisThread.setFunc(self.findPathsHelper, [allPathAnalyzer])
self.analysisThread.start()
def slotWriteRandom(self):
"""
Checks if the worst cases have already been generated for
the currently active file.
If they have been, then it just writes them to files.
If they have not been, then it prompts the user how many they
would like to generate and generates them before writing them
to files.
"""
currentFile = self.fileSelectWidget.widget().activeLeft.getAnalyzeItem()
if not currentFile:
self.printToConsole("Not a valid file to analyze")
return
if currentFile.getBasisPaths() == []:
self.printToConsole("No basis paths have been generated "
"yet. Generating them now...")
self.slotFindBasisPaths()
self.funcQueue.append(self.slotWriteRandom)
return
if currentFile.getRandomPaths() == []:
self.printToConsole("No random feasible paths have been generated "
"yet. Generating them now...")
self.slotFindRandomPaths()
self.funcQueue.append(self.slotWriteRandom)
return
fileDialog = QtGui.QFileDialog()
fileNameChoice = fileDialog.getSaveFileName(
self,
"Save File",
"."
)
pathText = ""
for pathItem in currentFile.getRandomPaths():
path = pathItem.getHighlightPath()
for var, val in path.assignments.items():
pathText += "%s=%s," % (var, val)
pathText += "%s\n" % path.value
if not fileNameChoice:
self.printToConsole("No file was selected to save values to.")
return
fileWriter = open(fileNameChoice, "w")
fileWriter.write(pathText)
fileWriter.close()
self.printToConsole("Random paths saved to %s" % fileNameChoice)
def slotWriteAllInc(self):
"""
Checks if the worst cases have already been generated for
the currently active file.
If they have been, then it just writes them to files.
If they have not been, then it prompts the user how many they
would like to generate and generates them before writing them
to files.
"""
currentFile = self.fileSelectWidget.widget().activeLeft.getAnalyzeItem()
if not currentFile:
self.printToConsole("Not a valid file to analyze.")
return
if currentFile.getBasisPaths() == []:
self.printToConsole("No basis paths have been generated "
"yet. Generating them now...")
self.slotFindBasisPaths()
self.funcQueue.append(self.slotWriteAllInc)
return
if currentFile.getAllPaths() == []:
self.printToConsole("All feasible paths have not been generated "
"yet. Generating them now...")
self.slotFindAllPathsInc()
self.funcQueue.append(self.slotWriteWriteAllInc)
return
fileDialog = QtGui.QFileDialog()
fileNameChoice = fileDialog.getSaveFileName(
self,
"Save File",
"."
)
pathText = ""
for pathItem in currentFile.getAllPaths():
path = pathItem.getHighlightPath()
for var, val in path.assignments.items():
pathText += "%s=%s," % (var, val)
pathText += "%s\n" % path.value
if not fileNameChoice:
self.printToConsole("No file was selected to save values to.")
return
fileWriter = open(fileNameChoice, "w")
fileWriter.write(pathText)
fileWriter.close()
self.printToConsole("All paths saved to %s" %fileNameChoice)
def slotWriteAllDec(self):
"""
Checks if the worst cases have already been generated for
the currently active file.
If they have been, then it just writes them to files.
If they have not been, then it prompts the user how many they
would like to generate and generates them before writing them
to files.
"""
currentFile = self.fileSelectWidget.widget().activeLeft.getAnalyzeItem()
if not currentFile:
self.printToConsole("Not a valid file to analyze.")
return
if currentFile.getBasisPaths() == []:
self.printToConsole("No basis paths have been generated "
"yet. Generating them now...")
self.slotFindBasisPaths()
self.funcQueue.append(self.slotWriteAllDec)
return
if currentFile.getAllPaths() == []:
self.printToConsole("All feasible paths, in order of decreasing "
"value, have not been generated yet. "
"Generating them now...")
self.slotAllPathsDec()
self.funcQueue.append(self.slotWriteAllDec)
return
fileDialog = QtGui.QFileDialog()
fileNameChoice = fileDialog.getSaveFileName(
self,
"Save File",
"."
)
pathText = ""
for pathItem in currentFile.getAllPaths():
path = pathItem.getHighlightPath()
for var, val in path.assignments.items():
pathText += "%s=%s," % (var, val)
pathText += "%s\n" % path.value
if not fileNameChoice:
self.printToConsole("No file was selected to save values to.")
return
fileWriter = open(fileNameChoice, "w")
fileWriter.write(pathText)
fileWriter.close()
self.printToConsole("All feasible paths saved to %s." % fileNameChoice)
def findPathsHelper(self, analyzer):
"""Function, which is run in the new thread, that handles running
the analyzer to find paths.
"""
return analyzer.exec_()
def slotCancelAction(self):
if self.analysisThread.isRunning():
if self.analysisThread.analyzer.enumCommand == 0:
self.analysisThread.itemToAnalyze.numBasisPaths = 0
self.analysisThread.terminate()
self.printToConsole("Current analysis has been cancelled.")
else:
self.printToConsole("There was no analysis to cancel.")
self.enableAnalysis()
self.funcQueue = []
def slotWriteBest(self):
"""Checks if the best case feasible paths have already been
generated for the currently active file. If they have been generated,
then it just writes them to files. If they have not been
generated, then it prompts the user how many they would
like to generate and generates them before writing them to files.
"""
currentFile = self.fileSelectWidget.widget().activeLeft.getAnalyzeItem()
if not currentFile:
self.printToConsole("Not a valid file to analyze.")
return
if currentFile.getBasisPaths() == []:
self.printToConsole("No basis paths have been generated "
"yet. Generating them now...")
self.slotFindBasisPaths()
self.funcQueue.append(self.slotWriteBest)
return
if currentFile.getBestPaths() == []:
self.printToConsole("No best-case feasible paths have been "
"generated yet. Generating them now...")
self.slotShortestCases()
self.funcQueue.append(self.slotWriteBest)
return
# bestPaths = [path.getHighlightPath() for path
# in currentFile.getBestPaths()]
# Open the file dialog to choose a new file to save to
#Write the paths out, csv atm
#Write the assignments then value to file
fileDialog = QtGui.QFileDialog()
fileNameChoice = fileDialog.getSaveFileName(
self,
"Save File",
"."
)
pathText = ""
for pathItem in currentFile.getBestPaths():
path = pathItem.getHighlightPath()
for var, val in path.assignments.items():
pathText += "%s=%s," % (var, val)
pathText += "%s\n" % path.value
if not fileNameChoice:
self.printToConsole("No file was selected to save values to.")
return
fileWriter = open(fileNameChoice, "w")
fileWriter.write(pathText)
fileWriter.close()
self.printToConsole("Best paths saved to %s" % fileNameChoice)
def slotWriteWorst(self):
"""
Checks if the worst cases have already been generated for
the currently active file.
If they have been, then it just writes them to files.
If they have not been, then it prompts the user how many they
would like to generate and generates them before writing them
to files.
"""
currentFile = self.fileSelectWidget.widget().activeLeft.getAnalyzeItem()
if not currentFile:
self.printToConsole("Not a valid file to analyze.")
return
if currentFile.getBasisPaths() == []:
self.printToConsole("No basis paths have been generated "
"yet. Generating them now...")
self.slotFindBasisPaths()
self.funcQueue.append(self.slotWriteWorst)
return
if currentFile.getWorstPaths() == []:
self.printToConsole("No worst-case feasible paths have been "
"generated yet. Generating them now...")
self.slotLongestCases()
self.funcQueue.append(self.slotWriteWorst)
return
fileDialog = QtGui.QFileDialog()
fileNameChoice = fileDialog.getSaveFileName(
self,
"Save File",
"."
)
pathText = ""
for pathItem in currentFile.getWorstPaths():
path = pathItem.getHighlightPath()
for var, val in path.assignments.items():
pathText += "%s=%s," % (var, val)
pathText += "%s\n" % path.value
if not fileNameChoice:
self.printToConsole("No file was selected to save values to.")
return
fileWriter = open(fileNameChoice, "w")
fileWriter.write(pathText)
fileWriter.close()
self.printToConsole("Worst paths saved to %s" %fileNameChoice)
def slotBasisValuesDialog(self):
"""
Brings up a dialog that allows the user to either manually
enter in values for each basis path or select a properly
formatted file (-pathNum- -pathValue-\n) that contains values.
"""
currentFile = self.fileSelectWidget.widget().activeLeft.getAnalyzeItem()
if not currentFile:
self.printToConsole("No file loaded to add basis values for.")
return
elif currentFile.numBasisPaths == 0:
self.printToConsole("No basis paths have been generated yet. "
"Generating them now.")
self.slotFindBasisPaths()
# self.funcQueue.append(self.slotBasisValuesDialog)
return
value = BasisValuesDialog(self).exec_()
if value == 1:
self.printToConsole("Basis values entered.")
else:
self.printToConsole("No new basis values entered.")
def slotSaveBasisValues(self):
fileDialog = QtGui.QFileDialog()
fileName, _ = fileDialog.getSaveFileName(self, "Save File", ".")
# Check that values have been entered
# if not valuesEntered:
# self.printToConsole("No values to save")
# return
if not fileName:
self.printToConsole("No file was selected to save values to.")
return
# Write values in correct format.
currentFile = self.fileSelectWidget.widget().activeLeft.getAnalyzeItem()
basisValues = currentFile.basisValues
self.saveBasisValues(basisValues, fileName)
def saveBasisValues(self, values, fileName):
analyzer = self.analysisThread.analyzer
for pathNum, basisPath in enumerate(analyzer.basisPaths):
basisPath.value = values[pathNum]
analyzer.writeBasisValuesToFile(fileName)
self.printToConsole("Basis Values saved")
def addToWindow(self, fileItemToAdd, window):
"""
Add FileItem provided to left or right window, depending on which
side is to be loaded next.
Arguments:
fileItemToAdd:
FileItem to add to the window.
window:
Window to add the file item to.
"""
# If fileItemToAdd was not previously displayed, print that
# it was loaded. Otherwise, print nothing.
if self.fileSelectWidget.widget().addFileName(fileItemToAdd, window):
if not fileItemToAdd.getParent():
self.printToConsole("Loaded %s." % fileItemToAdd.displayName)
def slotShortestCases(self):
"""
The action associated with accessing the menu bar item Run->Find Best
Paths. Begins generation of best paths in a new thread if possible.
This allows the user to interact with the GUI while analysis
is running. The only files that can have best paths are valid .c files
that have been opened by the user.
"""
if self.leftTextEdit.fileItemObject is None:
self.printToConsole("There is currently no file for which GameTime "
"can generate best-case feasible paths.")
return
itemToAnalyze = self.leftTextEdit.fileItemObject.getAnalyzeItem()
if itemToAnalyze.numBasisPaths == 0:
self.printToConsole("Basis paths have not been generated "
"for this file. Generating them now...")
self.slotFindBasisPaths()
self.funcQueue.append(self.slotShortestCases)
return
shortAnalyzer = GenericAnalyzer(1, self)
if NumPathsDialog(shortAnalyzer, "Best",
itemToAnalyze.projectConfig.OVER_COMPLETE_BASIS).exec_() == 1:
for _ in range(len(itemToAnalyze.bestPaths)):
path = itemToAnalyze.bestPaths[0]
itemToAnalyze.removeChild(path)
itemToAnalyze.fileList.removeItem(path)
self.disableAnalysis()
self.analysisThread.setAnalyzer(shortAnalyzer)
self.analysisThread.setItem(itemToAnalyze)
self.analysisThread.setFunc(self.bestCasesHelper,
[shortAnalyzer])
self.analysisThread.start()
def bestCasesHelper(self, shortAnalyzer):
"""
This is the function that is run in the new thread that handles running
the analyzer to find best paths.
"""
return shortAnalyzer.exec_()
def slotLongestCases(self):
"""
The action associated with accessing the menu bar item Run->Find Worst
Paths. Begins generation of worst paths in a new thread if possible.
This allows the user to interact with the GUI while analysis
is running. The only files that can have worst paths are valid .c files
that have been opened by the user.
"""
if self.leftTextEdit.fileItemObject is None:
self.printToConsole("There is currently no file for which GameTime "
"can generate worst-case feasible paths.")
return
itemToAnalyze = self.leftTextEdit.fileItemObject.getAnalyzeItem()
if itemToAnalyze.numBasisPaths == 0:
self.printToConsole("Basis paths have not been generated for "
"this file. Generating them now...")
self.slotFindBasisPaths()
self.funcQueue.append(self.slotLongestCases)
return
longAnalyzer = GenericAnalyzer(2, self)
if NumPathsDialog(longAnalyzer, "Worst",
itemToAnalyze.projectConfig.OVER_COMPLETE_BASIS).exec_() == 1:
for _ in range(len(itemToAnalyze.worstPaths)):
path = itemToAnalyze.worstPaths[0]
itemToAnalyze.removeChild(path)
itemToAnalyze.fileList.removeItem(path)
self.disableAnalysis()
self.analysisThread.setAnalyzer(longAnalyzer)
self.analysisThread.setItem(itemToAnalyze)
self.analysisThread.setFunc(self.worstCasesHelper,
[longAnalyzer])
self.analysisThread.start()
def worstCasesHelper(self, longAnalyzer):
"""
This is the function that is run in the new thread that handles running
the analyzer to find worst paths.
"""
return longAnalyzer.exec_()
def slotGenerateHistogram(self):
return HistogramDialog(self).exec_()
def slotTogglePathHighlight(self):
"""
The action associated with accessing the menu bar item View->Show
Highlights. Highlights a displayed file, if possible.
The only files that can be highlighted are those generated from
executing GameTime; these are files that are derivatives of a
parent file and that have Path objects associated with them.
"""
self.highlightsEnabled = not self.highlightsEnabled
fileSelect = self.fileSelectWidget.widget()
if fileSelect.activeRight is not None:
self.slotShowHighlights()
def slotShowHighlights(self):
"""Highlight the currently selected path."""
highlighter = Highlighter(self)
return highlighter.exec_()
def closeEvent(self, e):
while self.tempFiles != set([]):
temp = self.tempFiles.pop()
os.remove(temp)
super(GameTimeGui, self).closeEvent(e)
def slotUpdateGui(self, path):
path.addToMainWindow()
self.addToWindow(path, Window.RIGHT)
def slotFinishAnalysis(self):
self.enableAnalysis()
if len(self.funcQueue) > 0:
nextFunc = self.funcQueue.pop()
nextFunc()
def slotProgress(self, currPathNum):
self.printToConsole("Path %i has been computed" % currPathNum)
def disableAnalysis(self):
self.cancelAction.setDisabled(False)
print "Disable analysis"
for action in self.analysisActions:
action.setDisabled(True)
def enableAnalysis(self):
self.cancelAction.setDisabled(True)
print "enableAnalysis"
for action in self.analysisActions:
if self.generatedOvercompleteBasis and \
not (action in self.overcompleteSupportedActions): continue
action.setDisabled(False)
def slotShowLoopDialog(self):
if ConfirmationDialog("Loops in the code have been detected. "
"To analyze the code these loops must be "
"unrolled. Please specify bounds for each "
"loop.").exec_():
loopDialog = LoopBoundsDialog(self)
if loopDialog.exec_() == 0:
self.enableAnalysis()
self.funcQueue = []
self.printToConsole("Current analysis was cancelled.")
else:
self.analysisThread.start()
else:
self.enableAnalysis()
self.funcQueue = []
self.printToConsole("Current analysis was cancelled.")
def showMessageDialog(self, message, basis=False, title="Message"):
if basis:
BasisMessageDialog(message, self, title).exec_()
else:
MessageDialog(message, title).exec_()
class WorkerThreadSignals(QtCore.QObject):
updateGui = Signal(FileItem)
doneAnalyzing = Signal()
showLoopDialog = Signal()
showMessage = Signal(str, bool)
printToConsole = Signal(str)
showException = Signal(str, str)
class WorkerThread(QtCore.QThread):
def __init__(self, gui, func, args=None):
super(WorkerThread, self).__init__()
self.gui = gui
self.func = func
self.args = args
self.signals = WorkerThreadSignals()
self.signals.showMessage.connect(self.gui.showMessageDialog)
def run(self):
if self.analyzer.enumCommand == 0:
self.signals.printToConsole.emit("Generating basis paths...")
elif self.analyzer.enumCommand == 1:
self.signals.printToConsole.emit(
"Generating %d best-case feasible path%s..." %
(
self.analyzer.numPaths,
"s" if self.analyzer.numPaths > 1 else ""
)
)
elif self.analyzer.enumCommand == 2:
self.signals.printToConsole.emit(
"Generating %d worst-case feasible path%s..." %
(
self.analyzer.numPaths,
"s" if self.analyzer.numPaths > 1 else ""
)
)
elif self.analyzer.enumCommand == 3:
self.signals.printToConsole.emit(
"Generating %d random feasible path%s..." %
(
self.analyzer.numPaths,
"s" if self.analyzer.numPaths > 1 else ""
)
)
elif self.analyzer.enumCommand == 4:
self.signals.printToConsole.emit(
"Generating all feasible paths in decreasing order of value..."
)
elif self.analyzer.enumCommand == 5:
self.signals.printToConsole.emit(
"Generating all feasible paths in increasing order of value..."
)
paths = self.func(*self.args)
numPaths = len(paths)
if numPaths > 0 or self.analyzer.enumCommand == 0:
if paths == []:
self.signals.printToConsole.emit(
"Loops were detected in the code."
)
self.signals.showLoopDialog.emit()
return
elif not paths:
paths = []
else:
itemToAnalyze = self.itemToAnalyze
if itemToAnalyze.preprocessedFileItem is None:
projectConfig = itemToAnalyze.projectConfig
preprocessedFile = projectConfig.locationTempFile
fileText = ""
with open(preprocessedFile) as preprocessedReader:
fileText = preprocessedReader.read()
preprocessedFileItem = FileItem(
" (Preprocessed)",
preprocessedFile,
fileText,
self.analyzer.mainWindow,
assign=True
)
preprocessedFileItem.originalName = \
itemToAnalyze.origLocation
preprocessedFileItem.setParent(self.itemToAnalyze)
preprocessedFileItem.setAnalyze(False)
itemToAnalyze.setPreprocessedFileItem(
preprocessedFileItem
)
self.signals.updateGui.connect(self.gui.slotUpdateGui)
self.signals.updateGui.emit(preprocessedFileItem)
text = ""
numPaths = self.analyzer.numPaths
isBasis = False
if self.analyzer.enumCommand == 0:
text = "Basis paths have been generated."
isBasis = True
elif self.analyzer.enumCommand == 1:
text = ("%d best-case feasible path%s been generated." %
(numPaths, "s have" if numPaths > 1 else " has"))
elif self.analyzer.enumCommand == 2:
text = ("%d worst-case feasible path%s been generated." %
(numPaths, "s have" if numPaths > 1 else " has"))
elif self.analyzer.enumCommand == 3:
text = ("%d random feasible path%s been generated." %
(numPaths, "s have" if numPaths > 1 else " has"))
elif self.analyzer.enumCommand == 4:
text = ("All feasible paths have been generated in "
"decreasing order of value.")
elif self.analyzer.enumCommand == 5:
text = ("All feasible paths have been generated in "
"increasing order of value.")
self.signals.printToConsole.emit(text)
self.signals.showMessage.emit(text, isBasis)
caseNumber = 1
toWrite = []
if self.analyzer.enumCommand == 0:
self.itemToAnalyze.numBasisPaths = 0
pathList = self.itemToAnalyze.basisPaths
label = "+ Basis Path "
elif self.analyzer.enumCommand == 1:
pathList = self.itemToAnalyze.bestPaths
label = "+ Best Path "
elif self.analyzer.enumCommand == 2:
pathList = self.itemToAnalyze.worstPaths
label = "+ Worst Path "
elif self.analyzer.enumCommand == 3:
pathList = self.itemToAnalyze.randomPaths
label = "+ Random Path "
elif self.analyzer.enumCommand == 4 or self.analyzer.enumCommand == 5:
pathList = self.itemToAnalyze.allPaths
label = "+ Path "
for path in paths:
if ((caseNumber > self.analyzer.numPaths and
self.analyzer.enumCommand in [1, 2, 3])):
break
if self.analyzer.enumCommand == 0:
self.itemToAnalyze.numBasisPaths += 1
toWrite.append(path)
caseData = ("Assignments:\n%s\n\nPredicted Value:\n%s\n\n"
"Measured Value:\n%s" %
(path.getAssignments(),
path.getPredictedValue(),
path.getMeasuredValue()))
preprocessedFileItem = self.itemToAnalyze.preprocessedFileItem
pathItem = FileItem(
"%s%d" % (label, caseNumber),
preprocessedFileItem.origLocation,
caseData,
self.analyzer.mainWindow,
assign=True
)
pathItem.originalName = self.itemToAnalyze.origLocation
pathItem.setParent(self.itemToAnalyze)
pathList.append(pathItem)
pathItem.setHighlightPath(path)
pathItem.setAnalyze(False)
# self.signals.progress.emit(caseNumber)
caseNumber += 1
self.signals.updateGui.connect(self.gui.slotUpdateGui)
self.signals.updateGui.emit(pathItem)
# if self.analyzer.enumCommand == 0:
# self.itemToAnalyze.analyzer.writeBasisPathsToFiles(toWrite)
self.signals.doneAnalyzing.emit()
def setFunc(self, func, args=None):
self.func = func
self.args = args or []
def setAnalyzer(self, analyzer):
self.analyzer = analyzer
def setItem(self, item):
self.itemToAnalyze = item
def showMainWindow():
"""Creates the application for the GUI and shows the main window."""
from gametime.defaults import logger
logger.info("Starting up the GameTime GUI...")
# One application created for the GUI.
# TODO (jokotker): What is happening here?
# Maintains the main window.
app_gametime = QtGui.QApplication(sys.argv)
# Start an instance of the GUI.
gui_instance = GameTimeGui()
gui_instance.show()
# Execute the application.
sys.exit(app_gametime.exec_())
logger.info("GameTime GUI closed.")
def startGui():
# Construct the location of the directory that contains
# the batch file that prepares and starts
# the GameTime graphical user interface.
from gametime.defaults import sourceDir
guiInitBatchFile = os.path.join(sourceDir,
os.path.join("bin", "gametime-gui.bat"))
subprocess.call([guiInitBatchFile], shell=True)
if __name__ == "__main__":
showMainWindow()
```
#### File: gametime/src/__init__.py
```python
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
import analyzer
import cilHelper
import configuration
import defaults
import fileHelper
import gametimeError
import indexExpression
import inliner
import loopHandler
import merger
import nxHelper
import path
import phoenixHelper
import projectConfiguration
import pulpHelper
import simulators
import smt
import updateChecker
from analyzer import Analyzer
from defaults import logger
from gametimeError import GameTimeError
from pathGenerator import PathType
class GameTime(object):
"""Contains methods and variables that allow a user to import
GameTime as a module.
"""
@staticmethod
def analyze(projectConfig):
"""
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
Returns:
:class:`~gametime.analyzer.Analyzer` object for the project
configuration provided.
"""
try:
analyzer = Analyzer(projectConfig)
analyzer.createDag()
return analyzer
except GameTimeError as e:
logger.error(str(e))
raise e
```
#### File: gametime/src/loggingHelper.py
```python
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
import logging
import sys
def initialize(logger):
"""Initializes the logger provided with
:class:`~logging.Formatter` and :class:`~logging.StreamHandler`
objects appropriate for GameTime.
Arguments:
logger:
Logger to initialize.
"""
logger.setLevel(logging.DEBUG)
logger.propagate = False
formatter = logging.Formatter("%(message)s")
stdoutHandler = logging.StreamHandler(sys.stdout)
stdoutHandler.setLevel(logging.INFO)
stdoutHandler.setFormatter(formatter)
logger.addHandler(stdoutHandler)
stderrHandler = logging.StreamHandler(sys.stderr)
stderrHandler.setLevel(logging.ERROR)
stderrHandler.setFormatter(formatter)
logger.addHandler(stderrHandler)
```
#### File: gametime/src/loopHandler.py
```python
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
import os
import subprocess
from defaults import config, sourceDir
class HandlerMode(object):
"""Represents the mode that the loop handler works in."""
#: Detect loops.
DETECTOR = 0
#: Unroll loops.
UNROLLER = 1
def _generateHandlerCommand(projectConfig, handlerMode):
"""Generates the system call that runs the loop handler
with appropriate inputs.
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
handlerMode:
Mode that the loop handler should run in.
Returns:
Appropriate system call as a list that contains the program
to be run and the proper arguments.
"""
# Set the environment variable that allows the Cilly driver to find
# the path to the configuration file for the Findlib OCaml module.
os.environ["OCAMLFIND_CONF"] = os.path.join(sourceDir,
"ocaml/conf/findlib.conf")
# Set the environment variable that allows the Cilly driver to find
# the path to the folder that contains the compiled OCaml files.
os.environ["OCAMLPATH"] = os.path.join(sourceDir, "ocaml/lib")
# Set the environment variable that configures the Cilly driver to load
# the features that will be needed for the loop handler.
os.environ["CIL_FEATURES"] = "cil.default-features,loopHandler.loopHandler"
command = []
command.append(os.path.join(config.TOOL_CIL, "bin/cilly.bat"))
command.append("--doloopHandler")
command.append("--loopHandler-detect"
if handlerMode is HandlerMode.DETECTOR
else "--loopHandler-unroll")
command.append("--loopHandler-analyze=%s" % projectConfig.func)
loopConfigFile = os.path.join(projectConfig.locationTempDir,
config.TEMP_LOOP_CONFIG)
command.append("--loopHandler-config='%s'" % loopConfigFile)
for inlineName in projectConfig.inlined:
command.append("--inline='%s'" % inlineName)
analysisFile = ("%s%s.c" % (projectConfig.locationTempNoExtension,
config.TEMP_SUFFIX_LINE_NUMS)
if handlerMode is HandlerMode.DETECTOR
else projectConfig.locationTempFile)
command.append(analysisFile)
command.append("-I'%s'" % projectConfig.locationOrigDir)
command.append("--save-temps='%s'" % projectConfig.locationTempDir)
command.append("-c")
command.append("-o")
command.append("'%s.out'" % projectConfig.locationTempNoExtension)
return command
def runDetector(projectConfig):
"""Conducts the sequence of system calls that will detect loops
for the function currently being analyzed. The output of the
detector will be placed in a loop configuration file that the
user has to modify: this file contains the line numbers of each
loop header, and the user has to specify bounds for each loops
by changing the number beside the line numbers, which is set to
1 by default.
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
Returns:
Zero if the inlining was successful; a non-zero value otherwise.
"""
command = _generateHandlerCommand(projectConfig, HandlerMode.DETECTOR)
proc = subprocess.call(command, shell=True)
return proc
def runUnroller(projectConfig):
"""Conducts the sequence of system calls that will unroll loops
in the function currently being analyzed. The output of the
detector will be a temporary file for GameTime analysis where
all of the loops have been unrolled using user-specified bounds.
Precondition: The loop detector has already been run, and the user
has already specified bounds for each loop in the loop configuration
file generated by the loop detector.
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
Returns:
Zero if the inlining was successful; a non-zero value otherwise.
"""
command = _generateHandlerCommand(projectConfig, HandlerMode.UNROLLER)
proc = subprocess.call(command, shell=True)
return proc
```
#### File: smt/parsers/modelLexer.py
```python
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
import ply.lex as lex
from gametime.defaults import config
from gametime.gametimeError import GameTimeError
class ModelLexer(object):
"""Performs the lexical analysis of the models generated by SMT solvers."""
def __init__(self):
#: Underlying lexer (object of the `Lexer` class).
self.plyLexer = None
self._addRulesFromConfig()
def _addRulesFromConfig(self):
"""Adds the rules for tokens that depend
on the GameTime configuration.
"""
ModelLexer.t_CONSTRAINT = config.IDENT_CONSTRAINT
ModelLexer.t_EFC = config.IDENT_EFC
ModelLexer.t_TEMPINDEX = config.IDENT_TEMPINDEX
# List of token names.
tokens = ("LANGLE", "RANGLE", "AT",
"CONSTRAINT", "EFC", "TEMPINDEX",
"NUMBER", "WORD")
# Rules for the tokens.
t_LANGLE = r"\<"
t_RANGLE = r"\>"
t_AT = r"@"
def t_NUMBER(self, t):
r"\d+"
t.value = int(t.value)
return t
t_WORD = r"\w+"
# Rule that allows us to track line numbers.
# From http://www.dabeaz.com/ply/ply.html.
def t_newline(self, t):
r"(\n|\r\n)+"
t.lexer.lineno += len(t.value)
# Rules to ignore characters.
t_ignore = " \t\f\v"
# Error handling rule.
def t_error(self, t):
errMsg = "Illegal character `%s'" % t.value[0]
raise GameTimeError(errMsg)
def build(self, **kwargs):
"""
Builds this lexer. This method is an interface to the `lex'
function of the `lex' module, which builds the underlying lexer.
The keyworded arguments accepted are thus the same as those
of the `lex' function.
@param kwargs Keyworded, variable-length argument list that will
be passed to the `lex' function.
"""
self._addRulesFromConfig()
self.plyLexer = lex.lex(module=self, **kwargs)
def input(self, data):
"""
Stores a new string in this lexer. This method is an interface to
the `input' function of the underlying lexer.
@param data New string to store in this lexer.
"""
self.plyLexer.input(data)
def token(self):
"""
Gets the new token from this lexer. This method is an interface to
the `token' function of the underlying lexer.
@retval New token from this lexer.
"""
return self.plyLexer.token()
def __getstate__(self):
"""
Returns the pickled representation of a ModelLexer object.
@retval Pickled representation of a ModelLexer object.
"""
objectDict = self.__dict__.copy()
if "plyLexer" in objectDict:
del objectDict["plyLexer"]
return objectDict
def __setstate__(self, pickled):
"""
Unpickles the provided pickled representation of a ModelLexer object.
@param pickled Pickled representation of a ModelLexer object.
"""
self.__dict__.update(pickled)
self.plyLexer = None
```
#### File: smt/parsers/z3ModelLexer.py
```python
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
from gametime.smt.parsers.modelLexer import ModelLexer
class Z3ModelLexer(ModelLexer):
"""
This class performs the lexical analysis of the models
generated by Z3, the SMT solver from Microsoft.
"""
def __init__(self):
"""Constructor for the Z3ModelLexer class."""
super(Z3ModelLexer, self).__init__()
# List of token names.
tokens = ModelLexer.tokens + ("LPAREN", "RPAREN", "BANG", "EQUALS",
"DEFINEFUN", "BOOL", "BITVEC",
"ARRAY", "ASARRAY",
"TRUE", "BINNUMBER", "HEXNUMBER")
# Rules for the tokens.
t_LPAREN = r"\("
t_RPAREN = r"\)"
t_BANG = r"!"
t_EQUALS = r"="
t_DEFINEFUN = r"define-fun"
t_BOOL = r"Bool"
t_BITVEC = r"BitVec"
t_ARRAY = r"Array"
t_ASARRAY = "as-array"
t_TRUE = r"true"
def t_BINNUMBER(self, t):
r"\#b[01]+"
t.value = int(t.value[2:], 2)
return t
def t_HEXNUMBER(self, t):
r"\#x[\da-fA-F]+"
t.value = int(t.value[2:], 16)
return t
```
#### File: smt/solvers/solver.py
```python
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
class Solver(object):
"""Maintains a representation of an SMT solver."""
def __init__(self, name=""):
"""
Constructor for the Solver class.
@param name Name of the SMT solver that this object represents.
"""
# Name of the SMT solver that this object represents.
self.name = name
def checkSat(self, query):
"""
Checks and updates the satisfiability of the SMT query
represented by the Query object provided. If the SMT query
is satisfiable, the Query object is updated with a satisfying
model; if the query is unsatisfiable, the Query object is
updated with an unsatisfiable core.
@param query Query object that represents an SMT query.
"""
errMsg = "Method has not yet been implemented."
raise NotImplementedError(errMsg)
def __str__(self):
"""
Returns a string representation of this Solver object.
@retval String representation of this Solver object.
"""
return self.name
```
|
{
"source": "jerryedebua/python-backend",
"score": 2
}
|
#### File: flask/app/routes.py
```python
from app import App
@App.route('/')
@App.route('/index')
@App.route('/api')
@App.route('/api/index')
def index():
key = 'state'
return { key: 'Okay' }
@App.route('/user')
@App.route('/api/user')
def user():
return {
'id': 1, 'name': 'Harvey'
}
```
|
{
"source": "jerryendrina/535Project",
"score": 4
}
|
#### File: 535Project/apps/trendlines.py
```python
"""
Created on Wed Nov 24 06:47:44 2021
@author: jeremiasendrinajr
"""
import streamlit as st #to create interactive dashboard
import requests #to query with covid-1 API
import pandas as pd #to work with dataframes
import plotly.express as px #to produce pretty plots
def app():
st.title('Trend Line')
st.markdown("""
This is the trend line page. Plotting our time series data help us see
trends visually. This can give us an initial idea of how our future predictions
might look like. The sidebar provides variable and country options that we
want to see in the plot. Plotly is a great visualization package where we can zoom-in
and see closely the values of the plot.
""")
@st.cache
### function to generate data ####
def countryData(country):
url1 = "https://api.covid19api.com/dayone/country/"
country = country
url = url1+country
payload={}
headers = {'X-Access-Token': '<PASSWORD>'}
response = requests.request("GET", url, headers=headers, data=payload)
data = response.json()
df = pd.DataFrame(data)
return df
######################## generate data ###################################
countriesCap = ["Brunei Darussalam", "Myanmar", "Cambodia", "Timor-Leaste",
"Indonesia", "Malaysia", "Philippines", "Singapore",
"Thailand", "Viet Nam"]
countryLine = st.sidebar.selectbox('Which country would you like to plot?',
countriesCap, 6)
#text to show while loading data
data_load_state = st.text('Loading data and creating trend line...')
#query data from API
data = countryData(countryLine)
data = data.iloc[:-1 , :]
#notify data is loaded
data_load_state.text("Data loaded and trend line created!")
#Create daily cases
data['DailyCases'] = data['Confirmed'].diff()
data = data.dropna()
data['DailyCases'] = data['DailyCases'].astype(int)
#Create daily deaths
data['DailyDeaths'] = data['Deaths'].diff()
data = data.dropna()
data['DailyDeaths'] = data['DailyDeaths'].astype(int)
data['Date'] = pd.to_datetime(data['Date']).dt.strftime('%Y-%m-%d')
varsOptions = ['Confirmed','Deaths','Recovered','Active','DailyCases',
'DailyDeaths']
varSelected = st.sidebar.selectbox('Which variable would you like to plot?',
varsOptions, 4)
######################## Create Trend Lines ##############################
st.subheader(f"Trend Line of the Variable '{varSelected}' for {countryLine}")
dataScat = data[['Country', varSelected, 'Date']]
fig2 = px.line(dataScat, x="Date", y=varSelected)
st.write(fig2)
```
#### File: jerryendrina/535Project/multiapp.py
```python
import streamlit as st
#write a multi app object-oriented-program
class MultiApp:
def __init__(self):
self.apps = []
#a function to create a page
def add_app(self, title, func):
self.apps.append({
"title": title,
"function": func
})
#a function to create a sidebar in the main page
def run(self):
app = st.sidebar.radio(
'Click the page to display:',
self.apps,
format_func=lambda app: app['title'])
app['function']()
```
|
{
"source": "Jerry-FaGe/nonebot_plugin_pixivrank_search",
"score": 2
}
|
#### File: nonebot_plugin_pixivrank_search/pixivrank_search/__init__.py
```python
from nonebot.typing import T_State
from nonebot.adapters.cqhttp import Bot, MessageEvent
from nonebot import on_command
from .util import UserExistLimiter, is_number
from .data_source import get_pixiv_urls, download_pixiv_imgs, search_pixiv_urls
import time
from nonebot.adapters.cqhttp.exception import NetworkError
from asyncio.exceptions import TimeoutError
from aiohttp.client_exceptions import ClientConnectorError
__plugin_name__ = 'P站'
__rank_usage__ = '''P站排行榜帮助:
可选参数:
类型:
1. 日排行
2. 周排行
3. 月排行
4. 原创排行
5. 新人排行
6. R18日排行
7. R18周排行
8. R18受男性欢迎排行
9. R18重口排行【慎重!】
【使用时选择参数序号即可,R18仅可私聊】
p站排行榜 [参数] [数量](可选) [日期](可选)
示例:
p站排行榜 (无参数默认为日榜)
p站排行榜 1
p站排行榜 1 5
p站排行榜 1 5 2018-4-25
【注意空格!!】【在线搜索会较慢】
'''
__search_usage__ = '''P站搜图帮助:
可选参数:
1.热度排序
2.时间排序
【使用时选择参数序号即可,R18仅可私聊】
搜图 [关键词] [数量](可选) [排序方式](可选) [r18](可选)
示例:
搜图 樱岛麻衣
搜图 樱岛麻衣 5 1
搜图 樱岛麻衣 5 2 r18
【默认为 热度排序】
【注意空格!!】【在线搜索会较慢】【数量可能不符】
'''
rank_dict = {
'1': 'day',
'2': 'week',
'3': 'month',
'4': 'week_original',
'5': 'week_rookie',
'6': 'day_r18',
'7': 'week_r18',
'8': 'day_male_r18',
'9': 'week_r18g'
}
_ulmt = UserExistLimiter()
pixiv_rank = on_command('p站排行', aliases={'P站排行榜', 'p站排行榜', 'P站排行榜'}, priority=5, block=True)
pixiv_keyword = on_command('搜图', priority=5, block=True)
@pixiv_rank.handle()
async def _(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.get_message()).strip()
if msg in ['帮助']:
await pixiv_rank.finish(__rank_usage__)
msg = msg.split(' ')
msg = [m for m in msg if m]
if not msg:
msg = ['1']
if msg[0] in ['6', '7', '8', '9']:
if event.message_type == 'group':
await pixiv_rank.finish('羞羞脸!私聊里自己看!', at_sender=True)
# print(msg)
if _ulmt.check(event.user_id):
await pixiv_rank.finish("P站排行榜正在搜索噢,不要重复触发命令呀")
_ulmt.set_True(event.user_id)
if len(msg) == 0 or msg[0] == '':
text_list, urls, code = await get_pixiv_urls(rank_dict.get('1'))
elif len(msg) == 1:
if msg[0] not in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:
_ulmt.set_False(event.user_id)
await pixiv_rank.finish("要好好输入要看什么类型的排行榜呀!", at_sender=True)
text_list, urls, code = await get_pixiv_urls(rank_dict.get(msg[0]))
elif len(msg) == 2:
text_list, urls, code = await get_pixiv_urls(rank_dict.get(msg[0]), int(msg[1]))
elif len(msg) == 3:
if not check_date(msg[2]):
_ulmt.set_False(event.user_id)
await pixiv_rank.finish('日期格式错误了', at_sender=True)
text_list, urls, code = await get_pixiv_urls(rank_dict.get(msg[0]), int(msg[1]), msg[2])
else:
_ulmt.set_False(event.user_id)
await pixiv_rank.finish('格式错了噢,看看帮助?', at_sender=True)
if code != 200:
_ulmt.set_False(event.user_id)
await pixiv_keyword.finish(text_list[0])
else:
if not text_list or not urls:
_ulmt.set_False(event.user_id)
await pixiv_rank.finish('没有找到啊,等等再试试吧~V', at_sender=True)
for i in range(len(text_list)):
try:
await pixiv_rank.send(text_list[i] + await download_pixiv_imgs(urls[i], event.user_id))
except (NetworkError, TimeoutError, ClientConnectorError):
await pixiv_keyword.send('这张图网络炸了!', at_sender=True)
_ulmt.set_False(event.user_id)
@pixiv_keyword.handle()
async def _(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.get_message()).strip()
if msg in ['帮助']:
await pixiv_rank.finish(__search_usage__)
if event.message_type == 'group':
if msg.find('r18') != -1:
await pixiv_keyword.finish('(脸红#) 你不会害羞的 八嘎!', at_sender=True)
if msg.find('r18') == -1:
r18 = 1
else:
r18 = 2
msg = msg.replace('r18', '').strip()
if _ulmt.check(event.user_id):
await pixiv_rank.finish("P站关键词正在搜索噢,不要重复触发命令呀")
_ulmt.set_True(event.user_id)
msg = msg.split(' ')
msg = [m for m in msg if m]
if len(msg) == 1:
keyword = msg[0].strip()
num = 5
order = 'popular'
elif len(msg) == 2:
keyword = msg[0].strip()
if not is_number(msg[1].strip()):
_ulmt.set_False(event.user_id)
await pixiv_keyword.finish('图片数量必须是数字!', at_sender=True)
num = int(msg[1].strip())
order = 'popular'
elif len(msg) == 3:
keyword = msg[0].strip()
if not is_number(msg[1].strip()):
_ulmt.set_False(event.user_id)
await pixiv_keyword.finish('图片数量必须是数字!', at_sender=True)
num = int(msg[1].strip())
if not is_number(msg[2].strip()):
_ulmt.set_False(event.user_id)
await pixiv_keyword.finish('排序方式必须是数字!', at_sender=True)
if msg[2].strip() == '1':
order = 'popular'
else:
order = 'xxx'
else:
_ulmt.set_False(event.user_id)
await pixiv_keyword.finish('参数不正确,一定要好好看看帮助啊!', at_sender=True)
text_list, urls, code = await search_pixiv_urls(keyword, num, order, r18)
if code != 200:
_ulmt.set_False(event.user_id)
await pixiv_keyword.finish(text_list[0])
else:
for i in range(len(text_list)):
try:
await pixiv_keyword.send(text_list[i] + await download_pixiv_imgs(urls[i], event.user_id))
except (NetworkError, TimeoutError, ClientConnectorError):
await pixiv_keyword.send('这张图网络炸了!', at_sender=True)
_ulmt.set_False(event.user_id)
def check_date(date):
try:
time.strptime(date, "%Y-%m-%d")
return True
except:
return False
```
|
{
"source": "Jerry-FaGe/pyCraft",
"score": 3
}
|
#### File: clientbound/play/combat_event_packet.py
```python
from abc import ABCMeta, abstractmethod
from minecraft import PRE
from minecraft.networking.packets import Packet
from minecraft.networking.types import (
VarInt, Integer, String, MutableRecord
)
# Note: this packet was removed in Minecraft 21w07a (protocol PRE|15)
# and replaced with the separate EnterCombatEvent, EndCombatEvent, and
# DeathCombatEvent packets. These are subclasses of CombatEventPacket, so
# that code written to listen for CombatEventPacket instances should in most
# cases continue to work without modification.
class CombatEventPacket(Packet):
@classmethod
def get_id(cls, context):
return cls.deprecated() if context.protocol_later_eq(PRE | 15) else \
0x31 if context.protocol_later_eq(741) else \
0x32 if context.protocol_later_eq(721) else \
0x33 if context.protocol_later_eq(550) else \
0x32 if context.protocol_later_eq(471) else \
0x30 if context.protocol_later_eq(451) else \
0x2F if context.protocol_later_eq(389) else \
0x2E if context.protocol_later_eq(345) else \
0x2D if context.protocol_later_eq(336) else \
0x2C if context.protocol_later_eq(332) else \
0x2D if context.protocol_later_eq(318) else \
0x2C if context.protocol_later_eq(86) else \
0x2D if context.protocol_later_eq(80) else \
0x2C if context.protocol_later_eq(67) else \
0x42
packet_name = 'combat event'
fields = 'event',
# The abstract type of the 'event' field of this packet.
class EventType(MutableRecord, metaclass=ABCMeta):
__slots__ = ()
type_from_id_dict = {}
# Read the fields of the event (not including the ID) from the file.
@abstractmethod
def read(self, file_object):
pass
# Write the fields of the event (not including the ID) to the buffer.
@abstractmethod
def write(self, packet_buffer):
pass
@classmethod
def type_from_id(cls, event_id):
subcls = cls.type_from_id_dict.get(event_id)
if subcls is None:
raise ValueError('Unknown combat event ID: %s.' % event_id)
return subcls
class EnterCombatEvent(EventType):
__slots__ = ()
id = 0
def read(self, file_object):
pass
def write(self, packet_buffer):
pass
EventType.type_from_id_dict[EnterCombatEvent.id] = EnterCombatEvent
class EndCombatEvent(EventType):
__slots__ = 'duration', 'entity_id'
id = 1
def read(self, file_object):
self.duration = VarInt.read(file_object)
self.entity_id = Integer.read(file_object)
def write(self, packet_buffer):
VarInt.send(self.duration, packet_buffer)
Integer.send(self.entity_id, packet_buffer)
EventType.type_from_id_dict[EndCombatEvent.id] = EndCombatEvent
class EntityDeadEvent(EventType):
__slots__ = 'player_id', 'entity_id', 'message'
id = 2
def read(self, file_object):
self.player_id = VarInt.read(file_object)
self.entity_id = Integer.read(file_object)
self.message = String.read(file_object)
def write(self, packet_buffer):
VarInt.send(self.player_id, packet_buffer)
Integer.send(self.entity_id, packet_buffer)
String.send(self.message, packet_buffer)
EventType.type_from_id_dict[EntityDeadEvent.id] = EntityDeadEvent
def read(self, file_object):
if self.context and self.context.protocol_later_eq(PRE | 15):
self.deprecated()
event_id = VarInt.read(file_object)
self.event = CombatEventPacket.EventType.type_from_id(event_id)()
self.event.read(file_object)
def write_fields(self, packet_buffer):
if self.context and self.context.protocol_later_eq(PRE | 15):
self.deprecated()
VarInt.send(self.event.id, packet_buffer)
self.event.write(packet_buffer)
@staticmethod
def deprecated():
raise NotImplementedError(
'`CombatEventPacket` was removed in Minecraft snapshot 21w07a '
'(protocol version 2**30 + 15). In this and later versions, one '
'of the subclasses '
+ repr(SpecialisedCombatEventPacket.__subclasses__()) + ' must be '
'used directly for usage like that which generates this message.')
# Contains the behaviour common to all concrete CombatEventPacket subclasses
class SpecialisedCombatEventPacket(CombatEventPacket):
def __init__(self, *args, **kwds):
super(SpecialisedCombatEventPacket, self).__init__(*args, **kwds)
# Prior to Minecraft 21w07a, instances of CombatEventPacket had a
# single 'event' field giving a 'MutableRecord' of one of three types
# corresponding to the type of combat event represented. For backward
# compatibility, we here present a similar interface, giving the packet
# object itself as the 'event', which should work identically in most
# use cases, since it is a virtual subclass of, and has attributes of
# the same names and contents as those of, the previous event records.
self.event = self
# The 'get_id', 'fields', 'read', and 'write_fields' attributes of the
# 'Packet' base class are all overridden in 'CombatEventPacket'. We desire
# the default behaviour of these attributes, so we restore them here:
get_id = Packet.__dict__['get_id']
fields = Packet.__dict__['fields']
read = Packet.__dict__['read']
write_fields = Packet.__dict__['write_fields']
@CombatEventPacket.EnterCombatEvent.register # virtual subclass
class EnterCombatEventPacket(SpecialisedCombatEventPacket):
packet_name = 'enter combat event'
id = 0x34
definition = []
@CombatEventPacket.EndCombatEvent.register # virtual subclass
class EndCombatEventPacket(SpecialisedCombatEventPacket):
packet_name = 'end combat event'
id = 0x33
definition = [
{'duration': VarInt},
{'entity_id': Integer}]
@CombatEventPacket.EntityDeadEvent.register # virtual subclass
class DeathCombatEventPacket(SpecialisedCombatEventPacket):
packet_name = 'death combat event'
id = 0x35
definition = [
{'player_id': VarInt},
{'entity_id': Integer},
{'message': String}]
```
|
{
"source": "jerrygaoLondon/jgtextrank",
"score": 2
}
|
#### File: jgtextrank/jgtextrank/decorators.py
```python
from __future__ import absolute_import
from functools import wraps
from jgtextrank.exceptions import MissingCorpusError
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Credit to <NAME>, author of bottle.py.
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def requires_nltk_corpus(func):
"""Wraps a function that requires an NLTK corpus. If the corpus isn't found,
raise a :exc:`MissingCorpusError`.
"""
@wraps(func)
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except LookupError as err:
print(err)
raise MissingCorpusError()
return decorated
```
#### File: jgtextrank/jgtextrank/metrics.py
```python
import logging
import math
from typing import Tuple, Dict
import numpy as np
from jgtextrank.utility import MultiprocPool
_logger = logging.getLogger("jgtextrank.metrics")
__author__ = '<NAME> <<EMAIL>>'
__all__ = ["_get_max_score", "_get_average_score", "_get_sum_score", "_term_size_normalize",
"_log_normalise", "_probability_density", "_gaussian_normalise", "_get_plus_score",
"TermGraphValue", "GCValue"]
def _get_max_score(all_syntactic_units, all_vertices):
"""
get max term unit score (normalised by term unit frequency in MWTs)
:param all_syntactic_units:
:param all_vertices:
:return:
"""
# print("all_vertices: ", all_vertices)
# print("collapsed_term: ", collapsed_term)
# max_score = max([all_vertices[term_unit] / float(all_syntactic_units.count(term_unit)) for term_unit in collapsed_term.split(' ')])
max_score = max(
[all_vertices[term_unit] / float(all_syntactic_units.count(term_unit)) for term_unit in all_syntactic_units])
return max_score
def _get_average_score(all_syntactic_units, all_vertices, unit_size):
"""
get average score from single candidate term
:param all_syntactic_units: tokens of single candidate term
:param all_vertices: all the vertices used for computing combined weight
:param unit_size: size of multi-word candidate term
:return:
"""
avg_score = _get_sum_score(all_syntactic_units, all_vertices) / float(unit_size)
return avg_score
def _get_sum_score(all_syntactic_units, all_vertices):
return sum(
[all_vertices[term_unit] / float(all_syntactic_units.count(term_unit)) for term_unit in all_syntactic_units])
def _term_size_normalize(base_score, unit_size):
return base_score / float(unit_size)
def _log_normalise(base_score, mu, unit_size):
if unit_size > 1:
# print("_log_normalise with mu=", mu, " , unit_size:", unit_size)
base_score = base_score / math.log(unit_size, mu)
return base_score
def _probability_density(x_value, mu, sigma):
"""
probability density of the normal distribution
see also https://en.wikipedia.org/wiki/Normal_distribution
:param x_value:
:param mu:
:param sigma:
:return:
"""
pd = (1 / (sigma * np.sqrt(2 * math.pi))) * math.exp(- math.pow((x_value - mu), 2) / (2 * math.pow(sigma, 2)))
return pd
def _gaussian_normalise(base_score, mu, sigma, unit_size):
"""
gaussian normalisation of 'base' weight
:param base_score: float, base weight of candidate terms
:param mu: int, mean value to set a center point (default to 5) in order to rank the candidates higher that are near the central point
This param is only required for normalisation based MWT weighting method
:param sigma: float64, standard deviation of term length in MWTs
:param unit_size: int, size of MWTs
:return:float
"""
norm_value = 1 - _probability_density(unit_size, mu, sigma)
return base_score * float(norm_value)
def _get_plus_score(all_syntactic_units, boosted_term_size_range, boosted_word_length_range, combined_weight,
unit_size):
"""
Experimental weighting method to provide extra small fraction weight to the final score
More weight can be given to longer term
:type all_syntactic_units: list (of str)
:param all_syntactic_units: all the tokens of a candidate term(SWT or MWT)
:type boosted_term_size_range: (int, int) | None
:param boosted_term_size_range: range of token size of a candidate term that will be boosted with a small weight fraction
:type boosted_word_length_range: (int, int) | None
:param boosted_word_length_range: range of word length (number of character) that will be boosted with a small weight fraction
:type combined_weight: float
:param combined_weight: combined the weight (i.e., 'avg' or 'max') of current candidate term
This weight is important and used as base value for final boosted weight
:type unit_size: int
:param unit_size: token size of current candidate term
:return: a small weight fraction that can be added to the final weight
"""
all_syntactic_units_lengths = [len(term_unit) for term_unit in all_syntactic_units]
min_word_length = min(all_syntactic_units_lengths)
max_word_length = max(all_syntactic_units_lengths)
avg_word_length = sum(all_syntactic_units_lengths) / unit_size
plus_weight = combined_weight
if boosted_word_length_range is not None and boosted_term_size_range is not None \
and unit_size in boosted_term_size_range and min_word_length in boosted_word_length_range \
and max_word_length in boosted_word_length_range:
# add a small fraction to the final weight when all the syntactic unit length in in a normal range
plus_weight = combined_weight * math.log(avg_word_length, 2)
elif boosted_word_length_range is None and boosted_term_size_range is not None and unit_size in boosted_term_size_range:
plus_weight = combined_weight * math.log(avg_word_length, 2)
elif boosted_word_length_range is not None and boosted_term_size_range is None and \
min_word_length in boosted_word_length_range and max_word_length in boosted_word_length_range:
plus_weight = combined_weight * math.log(avg_word_length, 2)
return plus_weight
class TermGraphValue(object):
"""
Metrics to weigh Multi-Word Terms(MWTs)
"""
def __init__(self, weight_comb="norm_max", mu=5, parallel_workers=1):
self._logger = logging.getLogger("jgtextrank.metrics")
self._logger.info(self.__class__.__name__)
self.parallel_workers = parallel_workers
self.weight_comb = weight_comb
self.mu = mu
@staticmethod
def g_value(collapsed_term, all_vertices, weight_comb="norm_sum", mu=5, **kwargs):
final_score = float(0)
log2a = 0
avg_score = 0
sum_score = 0
max_score = 0
sigma = 0
if "sigma" in kwargs:
sigma = kwargs["sigma"]
# compute term length (i.e.,number of words/tokens)
# all_syntactic_units = collapsed_term.split(' ')
all_syntactic_units = collapsed_term
unit_size = len(all_syntactic_units)
if "len_log" in weight_comb:
# log(a + 0.1) to smooth unigrams
log2a = math.log2(unit_size + 0.1)
if "avg" in weight_comb:
avg_score = _get_average_score(all_syntactic_units, all_vertices, unit_size)
if "sum" in weight_comb:
sum_score = _get_sum_score(all_syntactic_units, all_vertices)
if "max" in weight_comb:
max_score = _get_max_score(all_syntactic_units, all_vertices)
if weight_comb == "avg":
final_score = avg_score
elif weight_comb == "norm_avg":
final_score = _term_size_normalize(avg_score, unit_size)
elif weight_comb == "log_norm_avg":
final_score = _log_normalise(avg_score, mu, unit_size)
elif weight_comb == "gaussian_norm_avg":
final_score = _gaussian_normalise(avg_score, mu, sigma, unit_size)
elif weight_comb == "len_log_norm_avg":
final_score = log2a * avg_score
elif weight_comb == "sum":
final_score = sum_score
elif weight_comb == "norm_sum":
final_score = _term_size_normalize(sum_score, unit_size)
elif weight_comb == "log_norm_sum":
final_score = _log_normalise(sum_score, mu, unit_size)
elif weight_comb == "gaussian_norm_sum":
final_score = _gaussian_normalise(sum_score, mu, sigma, unit_size)
elif weight_comb == "len_log_norm_sum":
final_score = log2a * sum_score
elif weight_comb == "max":
final_score = max_score
elif weight_comb == "norm_max":
final_score = _term_size_normalize(max_score, unit_size)
elif weight_comb == "log_norm_max":
final_score = _log_normalise(max_score, mu, unit_size)
elif weight_comb == "gaussian_norm_max":
final_score = _gaussian_normalise(max_score, mu, sigma, unit_size)
elif weight_comb == "len_log_norm_max":
final_score = log2a * max_score
else:
raise ValueError("Unsupported weight combination option: '%s'", weight_comb)
return round(final_score, 5)
def _is_top_t_vertices_connection(self, collapsed_term, top_t_vertices):
"""
:type collapsed_term: list [of list [of string]]
:param collapsed_term: list of tokenised terms collapsed from original context that will form Single-word term or Multi-word Term
:param top_t_vertices: top T weighted vertices
:return: True if the input contains any of top T vertex
"""
return any(top_t_vertex[0] in collapsed_term for top_t_vertex in top_t_vertices)
def _concatenate_terms(self, weighted_candidates) -> Dict[str, float]:
return dict((" ".join(tokenised_term), score) for tokenised_term, score in weighted_candidates)
def _get_sigma_from_all_candidates(self, collapsed_terms):
"""
compute standard deviation of term length in MWTs
:param collapsed_terms: list, list of tokenised terms
:rtype: ndarray
:return: standard_deviation
"""
all_terms_size = [len(collapsed_term) for collapsed_term in collapsed_terms]
return np.std(all_terms_size)
def weighing(self, all_candidates, all_vertices, top_t_vertices) -> Dict[str, float]:
if all_candidates is None or len(all_candidates) == 0:
self._logger.info("No candidate found. Skip weighing.")
return {}
self._logger.info(" Total [%s] candidates to weigh...", len(all_candidates))
sigma = 0
if "norm" in self.weight_comb:
sigma = self._get_sigma_from_all_candidates(all_candidates)
with MultiprocPool(processes=int(self.parallel_workers)) as pool:
optional_params = dict()
optional_params["weight_comb"] = self.weight_comb
optional_params["mu"] = self.mu
if sigma != 0:
optional_params["sigma"] = sigma
weighted_all_candidates = pool.starmap(TermGraphValue.calculate,
[(candidate, all_candidates, all_vertices, optional_params) for
candidate
in all_candidates if
self._is_top_t_vertices_connection(candidate, top_t_vertices)])
return self._concatenate_terms(weighted_all_candidates)
@staticmethod
def calculate(candidate_term, all_candidates, all_vertices, optional_params=None) -> Tuple[str, float]:
if optional_params is None:
optional_params = dict()
weight_comb = "norm_max"
if "weight_comb" in optional_params:
weight_comb = optional_params["weight_comb"]
mu = 5
if "mu" in optional_params:
mu = optional_params["mu"]
sigma = 0
if "sigma" in optional_params:
sigma = optional_params["sigma"]
final_score = TermGraphValue.g_value(candidate_term, all_vertices,
weight_comb, mu, sigma=sigma)
return (candidate_term, final_score)
class GCValue(TermGraphValue):
"""
Experimental metrics to weight MWTs
"""
def __init__(self, weight_comb="len_log_norm_avg", mu=5, parallel_workers=1):
super().__init__(weight_comb, mu, parallel_workers)
@staticmethod
def _get_longer_terms(term, all_candidates):
"""
the number of candidate terms that contain current term
Simply term normalisation is applied. Could be extended with "solr_term_normaliser"
params:
term, current term tokens
all candidates: all candidates
return longer term list
"""
try:
return [longer_term for longer_term in all_candidates
if term != longer_term and set(term).issubset(set(longer_term))]
except AttributeError:
import traceback
_logger.error(traceback.format_exc())
_logger.error("AttributeError when processing candidate term [%s]", term)
return []
def weighing(self, all_candidates, all_vertices, top_t_vertices) -> Dict[str, float]:
if all_candidates is None or len(all_candidates) == 0:
self._logger.info("No candidate found. Skip weighing.")
return {}
self._logger.info(" Total [%s] candidates to weigh...", len(all_candidates))
with MultiprocPool(processes=int(self.parallel_workers)) as pool:
weighted_all_candidates = pool.starmap(GCValue.calculate,
[(candidate, all_candidates, all_vertices) for candidate
in all_candidates if
self._is_top_t_vertices_connection(candidate, top_t_vertices)])
self._logger.info(" all candidates gc-value computation is completed.")
return super()._concatenate_terms(weighted_all_candidates)
@staticmethod
def _sum_ga_candidates(candidate_list, all_vertices):
return sum([TermGraphValue.g_value(candidate, all_vertices, weight_comb="len_log_norm_avg") for candidate in
candidate_list])
@staticmethod
def calculate(candidate_term, all_candidates, all_vertices, optional_params=None) -> Tuple[str, float]:
if optional_params is None:
optional_params = dict()
longer_terms = GCValue._get_longer_terms(candidate_term, all_candidates)
a = len(candidate_term)
# log(a + 0.1) for unigrams smoothing
log2a = math.log(a + 0.1, 2)
g_a = TermGraphValue.g_value(candidate_term, all_vertices, weight_comb="len_log_norm_avg")
if longer_terms:
p_ta = len(longer_terms)
sum_gb = GCValue._sum_ga_candidates(longer_terms, all_vertices)
term_gcvalue = log2a * (g_a - (1 / p_ta) * sum_gb)
else:
term_gcvalue = log2a * g_a
return (candidate_term, round(term_gcvalue, 5))
```
#### File: jgtextrank/tests/test_textrank.py
```python
import unittest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'jgtextrank'))
sys.path.append(os.path.join(os.path.dirname(__file__)))
import types
import warnings
from collections import Counter
import networkx as nx
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
from jgtextrank.utility import sort_dict_by_value, flatten
from jgtextrank.core import preprocessing, preprocessing_tokenised_context, _syntactic_filter, \
_get_cooccurs_from_single_context, _get_cooccurs, build_cooccurrence_graph, \
_build_vertices_representations, keywords_extraction, _is_top_t_vertices_connection, \
_collapse_adjacent_keywords
from jgtextrank.core import GCValue
def ignore_warnings(test_func):
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ResourceWarning)
test_func(self, *args, **kwargs)
return do_test
class TestTextRank(unittest.TestCase):
def test_syntactic_filtering(self):
tagged_abstract_context_list = [[('Compatibility', 'NN'), ('of', 'IN'), ('systems', 'NNS'), ('of', 'IN'),
('linear', 'JJ'), ('constraints', 'NNS'), ('over', 'IN'), ('the', 'DT'),
('set', 'NN'), ('of', 'IN'), ('natural', 'JJ'), ('numbers', 'NNS'),
('.', '.')], [('Criteria', 'NNP'), ('of', 'IN'), ('compatibility', 'NN'),
('of', 'IN'), ('a', 'DT'), ('system', 'NN'), ('of', 'IN'), ('linear', 'JJ'),
('Diophantine', 'NNP'), ('equations', 'NNS'), (',', ','), ('strict', 'JJ'),
('inequations', 'NNS'), (',', ','), ('and', 'CC'), ('nonstrict', 'JJ'),
('inequations', 'NNS'), ('are', 'VBP'), ('considered', 'VBN'), ('.', '.')]]
filtered_context_syntactic_units = _syntactic_filter(tagged_abstract_context_list)
assert isinstance(filtered_context_syntactic_units, types.GeneratorType)
all_filtered_context = []
for context_syntactic_units in filtered_context_syntactic_units:
assert isinstance(context_syntactic_units, list)
all_filtered_context.append(context_syntactic_units)
flattened_all_filtered_context = flatten(all_filtered_context)
assert len(flattened_all_filtered_context) == 17
assert ('of', 'IN') not in flattened_all_filtered_context
assert ('.', '.') not in flattened_all_filtered_context
assert ('a', 'DT') not in flattened_all_filtered_context
assert ('and', 'CC') not in flattened_all_filtered_context
assert ('Compatibility', 'NN') in flattened_all_filtered_context
assert ('linear', 'JJ') in flattened_all_filtered_context
assert ('considered', 'VBN') not in flattened_all_filtered_context
tagged_abstract_context_list2 = [[('Compatibility', 'NN'), ('of', 'IN'), ('systems', 'NNS'), ('of', 'IN'),
('linear', 'JJ'), ('constraints', 'NNS'), ('over', 'IN'), ('the', 'DT'),
('set', 'NN'), ('of', 'IN'), ('natural', 'JJ'), ('numbers', 'NNS'),
('.', '.')], [('Criteria', 'NNP'), ('of', 'IN'), ('compatibility', 'NN'),
('of', 'IN'), ('a', 'DT'), ('system', 'NN'), ('of', 'IN'), ('[', 'NN'), ('linear', 'JJ'),
('Diophantine', 'NNP'), ('equations', 'NNS'), (']', 'NN'), (',', ','), ('strict', 'JJ'),
('inequations', 'NNS'), (',', ','), ('and', 'CC'), ('nonstrict', 'JJ'),
('inequations', 'NNS'), ('are', 'VBP'), ('considered', 'VBN'), ('.', '.')]]
filtered_context_syntactic_units = _syntactic_filter(tagged_abstract_context_list2)
assert isinstance(filtered_context_syntactic_units, types.GeneratorType)
all_filtered_context = []
for context_syntactic_units in filtered_context_syntactic_units:
assert isinstance(context_syntactic_units, list)
all_filtered_context.append(context_syntactic_units)
flattened_all_filtered_context = flatten(all_filtered_context)
assert len(flattened_all_filtered_context) == 17, "punctuations should be removed from filtered context"
assert ('[', 'NN') not in flattened_all_filtered_context
assert (']', 'NN') not in flattened_all_filtered_context
def test_pre_processing(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
# original_tokenised_sentences, syntactic_units
syntactic_filtered_context = preprocessing(example_abstract)
assert isinstance(syntactic_filtered_context, types.GeneratorType)
all_tokenised_context = []
all_filtered_context = []
for tokenised_context, context_syntactic_units in syntactic_filtered_context:
assert isinstance(tokenised_context, list)
assert isinstance(context_syntactic_units, list)
assert len(tokenised_context) > 0
assert len(context_syntactic_units) > 0
assert isinstance(context_syntactic_units[0], tuple)
all_tokenised_context.append(tokenised_context)
all_filtered_context.append(context_syntactic_units)
assert len(all_tokenised_context) == 4, "Context size should be 4. The default context is sentence level."
assert len(all_filtered_context) == 4, "PoS filtered context should be 4. " \
"The default context is sentence level."
flatten_all_tokenised_context = flatten(all_tokenised_context)
assert len(flatten_all_tokenised_context) == 91, "total tokens are 91"
flatten_all_filtered_context = flatten(all_filtered_context)
assert len(flatten_all_filtered_context) == 41, "total size of filtered context tokens are 41"
check_filtered_context = [True if filtered_token[1] == 'NN' or filtered_token[1] == 'NNS'
or filtered_token[1] == 'JJ' or filtered_token[1] == 'NNP'
else False for filtered_token in flatten_all_filtered_context]
assert len(set(check_filtered_context)) == 1, "the default 'noun_adjective_filter' should be applied."
assert "." not in flatten_all_filtered_context
assert ('solutions', 'NNS') in flatten_all_filtered_context
assert ('minimal', 'JJ') in flatten_all_filtered_context
assert ('equations', 'NNS') in flatten_all_filtered_context
def test_get_cooccurs_from_single_context(self):
filtered_context = ['Compatibility', 'systems', 'linear', 'constraints', 'set', 'natural', 'numbers']
syntactic_unit_1 = 'systems'
cooccur_context_1_1 = _get_cooccurs_from_single_context(syntactic_unit_1,filtered_context)
assert len(cooccur_context_1_1) == 3, "the number of co-occur words of 'systems' in windows=2 context should be 3"
assert 'Compatibility' in cooccur_context_1_1, "Left side context window contains 'Compatibility'"
assert 'linear' in cooccur_context_1_1
assert 'constraints' in cooccur_context_1_1
cooccur_context_1_2 = _get_cooccurs_from_single_context(syntactic_unit_1,filtered_context, window_size=1)
assert len(cooccur_context_1_2) == 2, "the number of co-occur words of 'systems' in windows=1 context should be 2"
assert 'Compatibility' in cooccur_context_1_2, "Left side context window contains 'Compatibility'"
assert 'linear' in cooccur_context_1_2
syntactic_unit_2 = 'Compatibility'
cooccur_context_2_1 = _get_cooccurs_from_single_context(syntactic_unit_2, filtered_context, window_size=2)
assert len(cooccur_context_2_1) == 2, "the number of co-occur words of 'Compatibility' in windows=2 context should be 2"
assert 'systems' in cooccur_context_2_1
assert 'linear' in cooccur_context_2_1
syntactic_unit_3 = 'constraints'
cooccur_context_3_1 = _get_cooccurs_from_single_context(syntactic_unit_3, filtered_context)
assert len(cooccur_context_3_1) == 4
assert 'linear' in cooccur_context_3_1
assert 'systems' in cooccur_context_3_1
assert 'set' in cooccur_context_3_1
assert 'natural' in cooccur_context_3_1
cooccur_context_3_2 = _get_cooccurs_from_single_context(syntactic_unit_3, filtered_context, window_size=3)
assert len(cooccur_context_3_2) == 6
assert 'Compatibility' in cooccur_context_3_2
assert 'systems' in cooccur_context_3_2
assert 'linear' in cooccur_context_3_2
assert 'set' in cooccur_context_3_2
assert 'natural' in cooccur_context_3_2
assert 'numbers' in cooccur_context_3_2
cooccur_context_3_3 = _get_cooccurs_from_single_context(syntactic_unit_3, filtered_context, window_size=4)
assert len(cooccur_context_3_3) == 6
assert 'Compatibility' in cooccur_context_3_3
assert 'systems' in cooccur_context_3_3
assert 'linear' in cooccur_context_3_3
assert 'set' in cooccur_context_3_3
assert 'natural' in cooccur_context_3_3
assert 'numbers' in cooccur_context_3_3
syntactic_unit_4 = 'numbers'
cooccur_context_4_1 = _get_cooccurs_from_single_context(syntactic_unit_4,filtered_context)
assert len(cooccur_context_4_1) == 2
assert 'set' in cooccur_context_4_1
assert 'natural' in cooccur_context_4_1
def test_get_cooccurs(self):
filtered_context_corpus = [['Compatibility', 'systems', 'linear', 'constraints', 'set', 'natural', 'numbers'],
['criteria', 'corresponding', 'algorithms', 'minimal', 'supporting', 'set',
'solutions', 'solving', 'types', 'systems', 'systems', 'mixed', 'types']]
syntactic_unit_1 = 'systems'
all_cooccur_context_1_1 = _get_cooccurs(syntactic_unit_1, filtered_context_corpus)
assert len(all_cooccur_context_1_1) == 7
assert 'Compatibility' in all_cooccur_context_1_1
assert 'linear' in all_cooccur_context_1_1
assert 'constraints' in all_cooccur_context_1_1
assert 'solving' in all_cooccur_context_1_1
assert 'types' in all_cooccur_context_1_1
assert 'mixed' in all_cooccur_context_1_1
assert 'systems' in all_cooccur_context_1_1
syntactic_unit_2 = 'numbers'
all_cooccur_context_2_1 = _get_cooccurs(syntactic_unit_2, filtered_context_corpus)
assert len(all_cooccur_context_2_1) == 2
assert 'set' in all_cooccur_context_2_1
assert 'natural' in all_cooccur_context_2_1
syntactic_unit_3 = 'set'
all_cooccur_context_3_1 = _get_cooccurs(syntactic_unit_3, filtered_context_corpus, window_size=1)
assert len(all_cooccur_context_3_1) == 4
assert 'constraints' in all_cooccur_context_3_1
assert 'natural' in all_cooccur_context_3_1
assert 'supporting' in all_cooccur_context_3_1
assert 'solutions' in all_cooccur_context_3_1
all_cooccur_context_3_2 = _get_cooccurs(syntactic_unit_3, filtered_context_corpus, window_size=2)
assert len(all_cooccur_context_3_2) == 8
assert 'linear' in all_cooccur_context_3_2
assert 'constraints' in all_cooccur_context_3_2
assert 'natural' in all_cooccur_context_3_2
assert 'numbers' in all_cooccur_context_3_2
assert 'minimal' in all_cooccur_context_3_2
assert 'supporting' in all_cooccur_context_3_2
assert 'solutions' in all_cooccur_context_3_2
assert 'solving' in all_cooccur_context_3_2
syntactic_unit_4 = 'criteria'
all_cooccur_context_4_1 = _get_cooccurs(syntactic_unit_4, filtered_context_corpus)
assert len(all_cooccur_context_4_1) == 2
assert 'corresponding' in all_cooccur_context_4_1
assert 'algorithms' in all_cooccur_context_4_1
def test_get_cooccurs_with_raw_context(self):
all_tokenised_context=[['Upper', 'bounds', 'for', 'components', 'of', 'a', 'minimal', 'set', 'of',
'solutions', 'and', 'algorithms', 'of', 'construction', 'of', 'minimal',
'generating', 'sets', 'of', 'solutions', 'for', 'all', 'types', 'of', 'systems',
'are', 'given', '.']]
filtered_context_corpus = ['Upper', 'bounds', 'components', 'minimal', 'solutions', 'algorithms',
'construction', 'minimal', 'generating', 'sets', 'solutions', 'types',
'systems']
syntactic_unit_1 = 'components'
all_cooccur_context_1_1 = _get_cooccurs(syntactic_unit_1, all_tokenised_context,
all_filtered_context_tokens=filtered_context_corpus)
#print("'", syntactic_unit_1, "' cooccurs: ", all_cooccur_context_1_1)
assert len(all_cooccur_context_1_1) == 1
assert 'bounds' in all_cooccur_context_1_1
#example with two occurrences in one context
syntactic_unit_2 = 'solutions'
all_cooccur_context_2_1 = _get_cooccurs(syntactic_unit_2, all_tokenised_context,
all_filtered_context_tokens=filtered_context_corpus)
#print("'", syntactic_unit_2, "' cooccurs: ", all_cooccur_context_2_1)
assert len(all_cooccur_context_2_1) == 2, "'solutions' has two occcurrences in current context. " \
"It should have two co-occurred words in two places."
assert 'algorithms' in all_cooccur_context_2_1
assert 'sets' in all_cooccur_context_2_1
def test_build_vertices_representations(self):
#original_tokenised_text = ['Here', 'are', 'details', 'from', 'the', '13th', 'Rail', 'Steel',
# 'Campaign','.', 'I', 'have', 'checked', 'the', 'Hydrogen', 'values',
# 'reported', 'to', 'you', 'by', 'our', 'IBM', 'mainframe', 'messages', '.']
filtered_context = ['details', 'rail', 'steel', 'campaign', 'hydrogen',
'values', 'ibm', 'mainframe']
#cooccurrence window size
window_size = 2
vertices = _build_vertices_representations(filtered_context, conn_with_original_ctx=False, window_size=window_size)
assert 8 == len(vertices)
for i in range(0, len(vertices)):
vertex = vertices[i]
if 'rail' == vertex.word_type:
rail_vertex = vertex
if 'ibm' == vertex.word_type:
ibm_vertex = vertex
if 'mainframe' == vertex.word_type:
mainframe_vertex = vertex
if 'hydrogen' == vertex.word_type:
hydrogen_vertex = vertex
assert len(rail_vertex.co_occurs) == 3
assert 'details' in rail_vertex.co_occurs
assert 'steel' in rail_vertex.co_occurs
assert 'campaign' in rail_vertex.co_occurs
assert len(ibm_vertex.co_occurs) == 3
assert 'mainframe' in ibm_vertex.co_occurs
assert 'values' in ibm_vertex.co_occurs
assert 'hydrogen' in ibm_vertex.co_occurs
assert len(mainframe_vertex.co_occurs) == 2
assert 'values' in mainframe_vertex.co_occurs
assert 'ibm' in mainframe_vertex.co_occurs
assert len(hydrogen_vertex.co_occurs) == 4
assert 'steel' in hydrogen_vertex.co_occurs
assert 'ibm' in hydrogen_vertex.co_occurs
assert 'values' in hydrogen_vertex.co_occurs
assert 'ibm' in hydrogen_vertex.co_occurs
def test_build_cooccurrence_graph(self):
# example abstract taken from [Mihalcea04]
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
syntactic_filtered_context = preprocessing(example_abstract)
cooccurrence_graph, original_tokenised_context = build_cooccurrence_graph(syntactic_filtered_context, conn_with_original_ctx=False)
#print("len(cooccurrence_graph.nodes()): ", len(cooccurrence_graph.nodes()))
assert 25 == len(cooccurrence_graph.nodes())
pr = nx.pagerank(cooccurrence_graph, tol=0.0001)
#import matplotlib.pyplot as plt
#nx.draw_networkx(cooccurrence_graph, pos=None, arrows=True, with_labels=True)
#plt.show()
pr_counter = Counter(pr)
top_t_vertices = pr_counter.most_common(10)
print("top t vertices: ", top_t_vertices)
assert 'set' == top_t_vertices[0][0]
assert 'minimal' == top_t_vertices[1][0]
assert 'solutions' == top_t_vertices[2][0]
assert 'linear' == top_t_vertices[3][0]
assert 'systems' == top_t_vertices[4][0]
assert 'algorithms' == top_t_vertices[5][0]
assert 'inequations' == top_t_vertices[6][0]
assert 'strict' == top_t_vertices[7][0]
assert 'types' == top_t_vertices[8][0]
assert 'equations' == top_t_vertices[9][0]
def test_syntactic_filtering_with_custom_filter(self):
tagged_abstract_tokens = [[('Compatibility', 'NN'), ('of', 'IN'), ('systems', 'NNS'), ('of', 'IN'),
('linear', 'JJ'), ('constraints', 'NNS'), ('over', 'IN'), ('the', 'DT'),
('set', 'NN'), ('of', 'IN'), ('natural', 'JJ'), ('numbers', 'NNS'),
('.', '.'), ('Criteria', 'NNP'), ('of', 'IN'), ('compatibility', 'NN'),
('of', 'IN'), ('a', 'DT'), ('system', 'NN'), ('of', 'IN'), ('linear', 'JJ'),
('Diophantine', 'NNP'), ('equations', 'NNS'), (',', ','), ('strict', 'JJ'),
('inequations', 'NNS'), (',', ','), ('and', 'CC'), ('nonstrict', 'JJ'),
('inequations', 'NNS'), ('are', 'VBP'), ('considered', 'VBN'), ('.', '.')]]
custom_filter = lambda t : filter(lambda a: a[1] == 'NNS' or a[1] == 'NNP' or a[1] == 'NN'
or a[1] == 'JJ' or a[1] == 'VBN', t)
syntactic_units = _syntactic_filter(tagged_abstract_tokens, pos_filter=custom_filter)
syntactic_units = list(syntactic_units)
assert len(syntactic_units) == 1
print("syntactic_units filtered with custom filter from pre-tagged text:")
print(syntactic_units)
print("len(syntactic_units): ", len(syntactic_units))
assert len(syntactic_units[0]) == 18, "filtered context token size should be 18."
assert ('of', 'IN') not in syntactic_units[0]
assert ('.', '.') not in syntactic_units[0]
assert ('a', 'DT') not in syntactic_units[0]
assert ('the', 'DT') not in syntactic_units[0]
assert ('and', 'CC') not in syntactic_units[0]
assert ('Compatibility', 'NN') in syntactic_units[0]
assert ('linear', 'JJ') in syntactic_units[0]
assert ('considered', 'VBN') in syntactic_units[0]
def test_term_betweeness_ranking_via_cooccur_graph(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
preprocessed_corpus_context = preprocessing(example_abstract)
cooccurrence_graph, original_tokenised_context = build_cooccurrence_graph(preprocessed_corpus_context)
betweenness = nx.betweenness_centrality(cooccurrence_graph)
#nx.draw_networkx(cooccurrence_graph, pos=None, arrows=True, with_labels=True)
#plt.show()
btweeness_ranked_terms = sort_dict_by_value(betweenness)
print("ranked terms via betweenness: ", btweeness_ranked_terms)
btweeness_ranked_terms = list(btweeness_ranked_terms)
assert "linear" == btweeness_ranked_terms[0]
assert "systems" == btweeness_ranked_terms[1]
assert "equations" == btweeness_ranked_terms[2]
assert "strict" == btweeness_ranked_terms[3]
assert "set" == btweeness_ranked_terms[4]
#assert "inequations" == btweeness_ranked_terms[5]
#assert "compatibility" == btweeness_ranked_terms[6]
def test_is_top_t_vertices_connection(self):
top_t_vertices = [('numbers', 1.46), ('inequations', 1.45), ('linear', 1.29),
('diophantine', 1.28), ('upper', 0.99), ('bounds', 0.99), ('strict', 0.77)]
term_candidate_1 = "linear constrains"
result_term_candidate_1 = _is_top_t_vertices_connection(term_candidate_1, top_t_vertices)
assert result_term_candidate_1 is True, "'"+result_term_candidate_1+"' is a top T vertex connection"
term_candidate_2 = "linear diophantine equations"
result_term_candidate_2 = _is_top_t_vertices_connection(term_candidate_2, top_t_vertices)
assert result_term_candidate_2 is True, "'"+result_term_candidate_2+"' is a top T vertex connection"
term_candidate_3 = "natural numbers"
result_term_candidate_3 = _is_top_t_vertices_connection(term_candidate_3, top_t_vertices)
assert result_term_candidate_3 is True, "'"+result_term_candidate_3+"' is a top T vertex connection"
term_candidate_4 = "nonstrict inequations"
result_term_candidate_4 = _is_top_t_vertices_connection(term_candidate_4, top_t_vertices)
assert result_term_candidate_4 is True, "'"+term_candidate_4+"' is a top T vertex connection"
term_candidate_5 = "strict inequations"
result_term_candidate_5 = _is_top_t_vertices_connection(term_candidate_5, top_t_vertices)
assert result_term_candidate_5 is True, "'"+term_candidate_5+"' is a top T vertex connection"
term_candidate_6 = "upper bounds"
result_term_candidate_6 = _is_top_t_vertices_connection(term_candidate_6, top_t_vertices)
assert result_term_candidate_6 is True, "'"+term_candidate_6+"' is a top T vertex connection"
term_candidate_7 = "minimal generating sets"
result_term_candidate_7 = _is_top_t_vertices_connection(term_candidate_7, top_t_vertices)
assert result_term_candidate_7 is False, "'"+term_candidate_7+"' is NOT a top T vertex connection"
term_candidate_8 = "solutions"
result_term_candidate_8 = _is_top_t_vertices_connection(term_candidate_8, top_t_vertices)
assert result_term_candidate_8 is False, "'"+term_candidate_8+"' is NOT a top T vertex connection"
term_candidate_9 = "types systems"
result_term_candidate_9 = _is_top_t_vertices_connection(term_candidate_9, top_t_vertices)
assert result_term_candidate_9 is False, "'"+term_candidate_9+"' is NOT a top T vertex connection"
term_candidate_10 = "algorithms"
result_term_candidate_10 = _is_top_t_vertices_connection(term_candidate_10, top_t_vertices)
assert result_term_candidate_10 is False, "'"+term_candidate_10+"' is NOT a top T vertex connection"
def test_collapse_adjacent_keywords(self):
weighted_keywords = {'sets': 0.03472, 'supporting': 0.03448, 'compatibility': 0.04089,
'components': 0.00643, 'minimal': 0.06524, 'algorithms': 0.05472, 'inequations': 0.04641,
'corresponding': 0.02194, 'numbers': 0.02379, 'systems': 0.083597, 'constraints': 0.02148,
'linear': 0.08849, 'natural': 0.040847, 'diophantine': 0.0370565, 'mixed': 0.03591,
'equations': 0.054968, 'strict': 0.041742, 'set': 0.066734, 'construction': 0.03580,
'system': 0.02148, 'types': 0.03591, 'criteria': 0.02381, 'upper': 0.00643,
'nonstrict': 0.026167, 'solutions': 0.050879}
original_tokenised_text= ['compatibility', 'of', 'systems', 'of', 'linear', 'constraints', 'over',
'the', 'set', 'of', 'natural', 'numbers', '.', 'criteria', 'of', 'compatibility',
'of', 'a', 'system', 'of', 'linear', 'diophantine', 'equations', ',',
'strict', 'inequations', ',', 'and', 'nonstrict', 'inequations', 'are',
'considered', '.', 'upper', 'bounds', 'for', 'components', 'of', 'a',
'minimal', 'set', 'of', 'solutions', 'and', 'algorithms', 'of',
'construction', 'of', 'minimal', 'generating', 'sets', 'of', 'solutions',
'for', 'all', 'types', 'of', 'systems', 'are', 'given', '.', 'these',
'criteria', 'and', 'the', 'corresponding', 'algorithms', 'for', 'constructing',
'a', 'minimal', 'supporting', 'set', 'of', 'solutions', 'can', 'be', 'used',
'in', 'solving', 'all', 'the', 'considered', 'types', 'systems', 'and',
'systems', 'of', 'mixed', 'types', '.']
key_terms = _collapse_adjacent_keywords(weighted_keywords, original_tokenised_text)
print("key terms collapsed from context: ", key_terms)
assert len(key_terms) == 29
assert key_terms[0][0] == 'compatibility'
assert key_terms[1][0] == 'systems'
assert key_terms[2][0] == 'linear'
assert key_terms[2][1] == 'constraints'
assert key_terms[3][0] == 'set'
assert key_terms[4][0] == 'natural'
assert key_terms[4][1] == 'numbers'
assert key_terms[5][0] == 'criteria'
S0021999113005652_weighted_keywords = {'degradation': 0.03048, 'future': 0.004573, 'result': 0.004573,
'exchange': 0.03367, 'progress': 0.004573, 'important': 0.03048,
'modelling': 0.030487, 'extensive': 0.03048, 'reynolds': 0.02551,
'figure': 0.004573170731707318, 'datum': 0.004573, 'impact': 0.03048,
'study': 0.00457, 'function': 0.004573, 'environmental': 0.0304878,
'effect': 0.030487, 'air': 0.03070, 'flow': 0.016393,
'schmidt': 0.02551, 'fig': 0.030487, 'turbulent': 0.004573,
'rate': 0.024854, 'chemical': 0.03582, 'number': 0.036786,
'interface': 0.0045731, 'reaction': 0.047672, 'depict': 0.0304878,
'practical': 0.03048, 'interesting': 0.004573,
'investigation': 0.0304878, 'concentration': 0.0304878,
'worth': 0.0045731, 'increase': 0.04951, 'bulk': 0.00457,
'water': 0.055614, 'efficiency': 0.015095, 'equilibrium': 0.030487,
'product': 0.030487, 'aquarium': 0.0248545,
'by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎': 0.030487, 'acidification': 0.016393,
'gas': 0.018886, 'information': 0.03048}
S0021999113005652_tokenised_text = ['it', 'be', 'interesting', 'to', 'quantify', 'the', 'effect',
'of', 'the', 'schmidt', 'number', 'and', 'the', 'chemical',
'reaction', 'rate', 'on', 'the', 'bulk', '-', 'mean', 'concentration',
'of', 'b', 'in', 'water', '.', 'the', 'datum', 'could', 'present',
'important', 'information', 'on', 'evaluate', 'the', 'environmental',
'impact', 'of', 'the', 'degradation', 'product', 'of', 'b', ',',
'as', 'well', 'as', 'acidification', 'of', 'water', 'by', 'the',
'chemical', 'reaction', '.', 'here', ',', 'the', 'bulk', '-',
'mean', 'concentration', 'of', 'b', 'be', 'define',
'by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎', 'fig', '.', '15', 'depict', 'the',
'effect', 'of', 'the', 'schmidt', 'and', 'the', 'chemical',
'reaction', 'rate', 'on', 'the', 'bulk', '-', 'mean',
'concentration', 'cb⁎ .', 'it', 'be', 'worth', 'to', 'mention',
'here', 'that', 'the', 'bulk', '-', 'mean', 'concentration', 'of',
'b', 'reach', 'approximately', '0.6', 'as', 'the', 'chemical',
'reaction', 'rate', 'and', 'the', 'schmidt', 'number', 'increase',
'to', 'infinite', ',', 'and', 'the', 'concentration', 'be',
'small', 'than', 'the', 'equilibrium', 'concentration', 'of', 'a',
'at', 'the', 'interface', '.', 'this', 'figure', 'indicate',
'that', 'progress', 'of', 'the', 'chemical', 'reaction', 'be',
'somewhat', 'interfere', 'by', 'turbulent', 'mix', 'in', 'water',
',', 'and', 'the', 'efficiency', 'of', 'the', 'chemical',
'reaction', 'be', 'up', 'to', 'approximately', '60', '%', '.',
'the', 'efficiency', 'of', 'the', 'chemical', 'reaction', 'in',
'water', 'will', 'be', 'a', 'function', 'of', 'the', 'reynolds',
'number', 'of', 'the', 'water', 'flow', ',', 'and', 'the',
'efficiency', 'could', 'increase', 'as', 'the', 'reynolds',
'number', 'increase', '.', 'we', 'need', 'an', 'extensive',
'investigation', 'on', 'the', 'efficiency', 'of', 'the', 'aquarium',
'chemical', 'reaction', 'in', 'the', 'near', 'future', 'to', 'extend',
'the', 'result', 'of', 'this', 'study', 'further', 'to', 'establish',
'practical', 'modelling', 'for', 'the', 'gas', 'exchange',
'between', 'air', 'and', 'water', '.']
S0021999113005652_key_terms = _collapse_adjacent_keywords(S0021999113005652_weighted_keywords, S0021999113005652_tokenised_text)
print("S0021999113005652_key_terms: ", S0021999113005652_key_terms)
assert len(S0021999113005652_key_terms) == 57
assert S0021999113005652_key_terms[0][0] == "interesting"
assert S0021999113005652_key_terms[1][0] == "effect"
assert S0021999113005652_key_terms[2][0] == "schmidt"
assert S0021999113005652_key_terms[2][1] == "number"
assert S0021999113005652_key_terms[3][0] == "chemical"
assert S0021999113005652_key_terms[3][1] == "reaction"
assert S0021999113005652_key_terms[3][2] == "rate"
assert S0021999113005652_key_terms[4][0] == "bulk"
assert S0021999113005652_key_terms[5][0] == "concentration"
assert S0021999113005652_key_terms[6][0] == "water"
assert S0021999113005652_key_terms[7][0] == "datum"
assert S0021999113005652_key_terms[8][0] == "important"
assert S0021999113005652_key_terms[8][1] == "information"
assert S0021999113005652_key_terms[9][0] == "environmental"
assert S0021999113005652_key_terms[9][1] == "impact"
assert S0021999113005652_key_terms[16][0] == "by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎"
assert S0021999113005652_key_terms[16][1] == "fig"
def test_get_longer_terms(self):
candidate_term1 = ["real", "time"]
candidate_term2 = ["floating", "point"]
longer_terms = [["real", "time", "clock"],
["real", "time", "expert", "system"],
["real", "time", "image", "generation"],
["real", "time", "output"],
["real", "time", "system"],
["floating", "point", "arithmetic"],
["floating", "point", "constant"],
["floating", "point", "operation"],
["floating", "point", "routine"]]
candidate_term1_longer_terms = GCValue._get_longer_terms(candidate_term1, longer_terms)
assert len(candidate_term1_longer_terms) == 5
assert candidate_term1_longer_terms == [['real', 'time', 'clock'],
['real', 'time', 'expert', 'system'],
['real', 'time', 'image', 'generation'],
['real', 'time', 'output'],
['real', 'time', 'system']]
candidate_term2_longer_terms = GCValue._get_longer_terms(candidate_term2, longer_terms)
assert len(candidate_term2_longer_terms) == 4
assert candidate_term2_longer_terms == [["floating", "point", "arithmetic"],
["floating", "point", "constant"],
["floating", "point", "operation"],
["floating", "point", "routine"]]
#gc_value = GCValue()
#gc_value.weighing({"real": 1.0, "time":1.2, "clock":2.1, "expert":3.1, "system":4.1, "image":1.12,
# "generation":1.4, "output":2.1, "floating":0.3, "point": 0.8, "arithmetic": 0.3},
# longer_terms)
@ignore_warnings
def test_keywords_extraction(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="sum")
print("extracted keywords:"+ str(results))
print("top_vertices: ", top_vertices)
assert 13 == len(results)
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "minimal set" == term_list[2]
assert "types systems" == term_list[3]
assert "linear constraints" == term_list[4]
assert "strict inequations" == term_list[5]
assert "systems" == term_list[6]
assert "corresponding algorithms" == term_list[7]
assert "nonstrict inequations" == term_list[8]
assert "set" in term_list
assert "minimal" in term_list
assert "algorithms" in term_list
assert "solutions" in term_list
assert "natural numbers" not in term_list
assert 'linear' == top_vertices[0][0]
assert 'systems' == top_vertices[1][0]
assert 'set' == top_vertices[2][0]
assert 'minimal' == top_vertices[3][0]
assert 'equations' == top_vertices[4][0]
assert 'algorithms' == top_vertices[5][0]
assert 'solutions' == top_vertices[6][0]
assert 'inequations' == top_vertices[7][0]
print("after enabling lemmatization....")
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, lemma=True, weight_comb="sum")
assert 12 == len(results)
print("extracted keywords after lemmatization: ", results)
print("top_vertices after lemmatization: ", top_vertices)
term_list = [term[0] for term in results]
assert "minimal supporting set" == term_list[0]
assert "linear diophantine equation" == term_list[1]
assert "minimal set" == term_list[2]
assert "type system" == term_list[3]
assert "linear constraint" == term_list[4]
assert "strict inequations" == term_list[5]
assert "system" == term_list[6]
assert "corresponding algorithm" == term_list[7]
assert "nonstrict inequations" == term_list[8]
assert 'system' == top_vertices[0][0]
assert 'set' == top_vertices[1][0]
assert 'linear' == top_vertices[2][0]
assert 'algorithm' == top_vertices[3][0]
assert 'equation' == top_vertices[4][0]
assert 'minimal' == top_vertices[5][0]
assert 'inequations' == top_vertices[6][0]
def test_keywords_extraction2(self):
"""
test keywords extraction with example nodes (with custom syntactic filters and step list) in the paper
"""
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
custom_categories = {'NNS', 'NNP', 'NN', 'JJ', 'VBZ'}
# manually filter few nodes not appearing in the given example of original paper
stop_words={'set', 'mixed', 'corresponding', 'supporting'}
ranked_terms, top_vertices = keywords_extraction(example_abstract, top_p = 1, top_t=None, directed=False,
syntactic_categories=custom_categories, stop_words=stop_words, weight_comb="sum")
print("ranked terms with custom filters 1: ", ranked_terms)
print("top_vertices with custom filters 1: ", top_vertices)
top_vertices_names = [top_vertex[0] for top_vertex in top_vertices]
assert 'supporting' not in top_vertices_names
assert 'corresponding' not in top_vertices_names
assert 'mixed' not in top_vertices_names
assert 'set' not in top_vertices_names
assert 'linear diophantine equations' == ranked_terms[0][0]
assert 'linear constraints' == ranked_terms[1][0]
assert 'types systems' == ranked_terms[2][0]
assert 'upper bounds' == ranked_terms[3][0]
assert 'strict inequations' == ranked_terms[4][0]
assert 'natural numbers' == ranked_terms[5][0]
assert 'systems' == ranked_terms[6][0]
assert 'nonstrict inequations' == ranked_terms[7][0]
assert 'compatibility' == ranked_terms[8][0]
assert 'construction' == ranked_terms[9][0] or 'minimal' == ranked_terms[9][0] \
or 'algorithms' == ranked_terms[9][0] or 'solutions' == ranked_terms[9][0] \
or 'sets' == ranked_terms[9][0]
# >>> [('linear diophantine equations', 0.19805), ('linear constraints', 0.12147),
# ('types systems', 0.10493), ('upper bounds', 0.10114), ('strict inequations', 0.09432),
# ('natural numbers', 0.09091), ('systems', 0.08092), ('nonstrict inequations', 0.07741),
# ('compatibility', 0.04666), ('algorithms', 0.04545), ('minimal', 0.04545),
# ('construction', 0.04545), ('sets', 0.04545), ('solutions', 0.04545),
# ('components', 0.03522), ('criteria', 0.02665), ('types', 0.02401), ('system', 0.02348)]
stop_words={'set', 'mixed', 'corresponding', 'supporting', "minimal"}
ranked_terms, top_vertices = keywords_extraction(example_abstract, top_p = 1, top_t=None, directed=False,
syntactic_categories=custom_categories, stop_words=stop_words)
print("ranked terms with custom filters 2: ", ranked_terms)
print("top_vertices with custom filters 2: ", top_vertices)
top_vertices_names = [top_vertex[0] for top_vertex in top_vertices]
assert 'minimal' not in top_vertices_names
assert 'supporting' not in top_vertices_names
assert 'corresponding' not in top_vertices_names
assert 'mixed' not in top_vertices_names
assert 'set' not in top_vertices_names
# [('linear diophantine equations', 0.20748), ('linear constraints', 0.12726), ('types systems', 0.10992),
# ('upper bounds', 0.10596), ('strict inequations', 0.09881), ('natural numbers', 0.09524),
# ('systems', 0.08477), ('nonstrict inequations', 0.0811), ('solutions', 0.06182), ('algorithms', 0.06182),
# ('compatibility', 0.04889), ('components', 0.0369), ('sets', 0.03342), ('construction', 0.03342),
# ('criteria', 0.02792), ('types', 0.02516), ('system', 0.02459)]
def test_keywords_extraction3(self):
"""
test with different pagerank algorithms
"""
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="pagerank_numpy", weight_comb="sum")
print("ranked terms computed with 'pagerank_numpy': ", results)
print("top_vertices computed with 'pagerank_numpy': ", top_vertices)
assert len(results) == 13
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="pagerank_scipy", weight_comb="sum")
print("ranked terms computed with 'pagerank_scipy': ", results)
print("top_vertices computed with 'pagerank_scipy': ", top_vertices)
assert len(results) == 13
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="betweenness_centrality", weight_comb="sum")
print("ranked terms computed with 'betweenness_centrality': ", results)
print("top_vertices computed with 'betweenness_centrality': ", top_vertices)
assert len(results) == 11
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="degree_centrality", weight_comb="sum")
print("ranked terms computed with 'degree_centrality': ", results)
print("top_vertices computed with 'degree_centrality': ", top_vertices)
assert top_vertices[0][0] == 'systems'
assert top_vertices[1][0] == 'linear'
assert top_vertices[2][0] == 'minimal' or top_vertices[2][0] == 'set'
# top 30% results is not stable for degree_centrality
# assert len(results) == 11 or len(results) == 12
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="hits", weight_comb="sum")
print("ranked terms computed with 'hits': ", results)
print("top_vertices computed with 'hits': ", top_vertices)
assert top_vertices[0][0] == 'systems'
assert top_vertices[1][0] == 'linear'
assert top_vertices[2][0] == 'mixed' or top_vertices[2][0] == 'types'
assert top_vertices[4][0] == 'equations'
assert len(results) == 7 or len(results) == 8
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="closeness_centrality", weight_comb="sum")
print("ranked terms computed with 'closeness_centrality': ", results)
print("top_vertices computed with 'closeness_centrality': ", top_vertices)
assert len(results) == 10 or len(results) == 11
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="edge_betweenness_centrality", weight_comb="sum")
print("ranked terms computed with 'edge_betweenness_centrality': ", results)
print("top_vertices computed with 'edge_betweenness_centrality': ", top_vertices)
assert len(results) == 8 or len(results) == 10
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="eigenvector_centrality", max_iter=1000, weight_comb="sum")
print("ranked terms computed with 'eigenvector_centrality': ", results)
print("top_vertices computed with 'eigenvector_centrality': ", top_vertices)
assert len(results) == 7 or len(results) == 8
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="katz_centrality", weight_comb="sum")
print("ranked terms computed with 'katz_centrality': ", results)
print("top_vertices computed with 'katz_centrality': ", top_vertices)
assert len(results) == 11
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="communicability_betweenness",
window=5, weighted=False, weight_comb="sum")
print("ranked terms computed with 'communicability_betweenness': ", results)
print("top_vertices computed with 'communicability_betweenness': ", top_vertices)
print(len(results))
assert results[0][0] == 'minimal supporting set'
assert results[1][0] == 'minimal set'
assert results[2][0] == 'linear diophantine equations'
assert len(results) == 12
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="current_flow_closeness",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'current_flow_closeness': ", results)
print("top_vertices computed with 'current_flow_closeness': ", top_vertices)
print(len(results))
assert len(results) == 9
assert results[0][0] == 'minimal supporting set'
assert results[1][0] == 'minimal set'
assert top_vertices[0][0] == 'set'
assert top_vertices[1][0] == 'minimal'
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="current_flow_betweenness",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'current_flow_betweenness': ", results)
print("top_vertices computed with 'current_flow_betweenness': ", top_vertices)
print(len(results))
assert len(results) == 11
assert top_vertices[0][0] == 'systems'
assert top_vertices[1][0] == 'linear'
assert top_vertices[2][0] == 'set'
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="edge_current_flow_betweenness",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'edge_current_flow_betweenness': ", results)
print("top_vertices computed with 'edge_current_flow_betweenness': ", top_vertices)
print(len(results))
assert len(results) == 10 or len(results) == 11
assert top_vertices[0][0] == 'systems' or top_vertices[0][0] == 'linear'
assert top_vertices[1][0] == 'linear' or top_vertices[1][0] == 'systems'
assert top_vertices[2][0] == 'strict' or top_vertices[2][0] == 'equations'
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="load_centrality",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'load_centrality': ", results)
print("top_vertices computed with 'load_centrality': ", top_vertices)
print(len(results))
assert len(results) == 11
assert results[0][0] == 'linear diophantine equations'
assert results[1][0] == 'linear constraints'
assert results[2][0] == 'systems' or results[2][0] == 'types systems'
assert top_vertices[0][0] == 'linear'
assert top_vertices[1][0] == 'systems'
assert top_vertices[2][0] == 'equations'
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="clustering_coefficient",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'clustering_coefficient': ", results)
print("top_vertices computed with 'clustering_coefficient': ", top_vertices)
assert results[0][0] == 'mixed types'
assert results[1][0] == 'linear diophantine equations'
assert results[2][0] == 'minimal supporting set'
assert len(results) == 9
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, solver="TeRGraph",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'TeRGraph': ", results)
print("top_vertices computed with 'TeRGraph': ", top_vertices)
assert len(results) == 7
assert results[0][0] == 'nonstrict inequations'
assert results[1][0] == 'natural numbers'
assert results[2][0] == 'corresponding algorithms'
coreness_results, coreness_top_vertices = keywords_extraction(example_abstract, top_p = 1, solver="coreness",
weighted=False, weight_comb="sum")
print("ranked terms computed with 'coreness': ", coreness_results)
print("top_vertices computed with 'coreness': ", coreness_top_vertices)
coreness_results_dict = {k:v for k, v in coreness_results}
coreness_top_vertices_dict = {k:v for k, v in coreness_top_vertices}
assert len(coreness_results) == 23
assert coreness_results_dict['minimal supporting set'] == 6
assert coreness_results_dict['linear diophantine equations'] == 6
assert coreness_results_dict['types systems'] == 4
assert coreness_results_dict['minimal set'] == 4
assert coreness_top_vertices_dict['minimal'] == 2
assert coreness_top_vertices_dict['sets'] == 2
assert coreness_top_vertices_dict['diophantine'] == 2
assert coreness_top_vertices_dict['equations'] == 2
assert coreness_top_vertices_dict['criteria'] == 1
assert coreness_top_vertices_dict['upper'] == 0
assert coreness_top_vertices_dict['components'] == 0
mean_coreness_results, coreness_top_vertices = keywords_extraction(example_abstract, top_p = 1, solver="coreness",
weighted=False, weight_comb="avg")
print("ranked term phrases computed with Mean coreness: ", mean_coreness_results)
mean_coreness_results_dict = {k:v for k, v in mean_coreness_results}
assert mean_coreness_results_dict['types'] == 2
assert mean_coreness_results_dict['minimal supporting set'] == 2
assert mean_coreness_results_dict['components'] == 0
assert mean_coreness_results_dict['linear diophantine equations'] == 2
with self.assertRaises(ValueError) as context:
keywords_extraction(example_abstract, top_p = 0.3, solver="my_pagerank")
self.assertTrue("The node weighting solver supports only pagerank, "
"pagerank_numpy, pagerank_scipy, betweenness_centrality, "
"edge_betweenness_centrality, degree_centrality, closeness_centrality, hits, "
"eigenvector_centrality, katz_centrality, communicability_betweenness, "
"current_flow_closeness, current_flow_betweenness, edge_current_flow_betweenness, "
"load_centrality,clustering_coefficient,TeRGraph,coreness got 'my_pagerank'" in context.exception)
def test_neighborhood_size(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
mean_neighbors_results, mean_neighbors_vertices = keywords_extraction(example_abstract, top_p = 1, solver="neighborhood_size",
weighted=False, weight_comb="avg")
print("ranked term phrases computed with Mean neighborhood size: ", mean_neighbors_results)
mean_neighbors_results_dict = {k:v for k, v in mean_neighbors_results}
mean_neighbors_vertices_dict = {k:v for k, v in mean_neighbors_vertices}
print(len(mean_neighbors_results))
assert len(mean_neighbors_results) == 23
assert mean_neighbors_results_dict["set"] == 4.0
assert mean_neighbors_results_dict["minimal"] == 4.0
assert mean_neighbors_results_dict["minimal set"] == 4.0
assert mean_neighbors_results_dict["linear constraints"] == 3.0
assert mean_neighbors_results_dict["solutions"] == 3.0
assert mean_neighbors_results_dict["nonstrict inequations"] == 1.5
assert mean_neighbors_results_dict["linear diophantine equations"] == 3.33333
print(mean_neighbors_vertices_dict)
assert mean_neighbors_vertices_dict["linear"] == 5
assert mean_neighbors_vertices_dict["set"] == 4
assert mean_neighbors_vertices_dict["systems"] == 4
assert mean_neighbors_vertices_dict["minimal"] == 4
assert mean_neighbors_vertices_dict["algorithms"] == 3
assert mean_neighbors_vertices_dict["compatibility"] == 2
def test_keywords_extraction_with_mwt_scoring(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="avg")
print("extracted keywords with avg weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "systems" == term_list[0]
assert "set" == term_list[1]
assert "minimal set" == term_list[2]
assert "minimal" == term_list[3]
assert "linear diophantine equations" == term_list[4]
assert "types systems" == term_list[5]
assert "minimal supporting set" == term_list[6]
assert "linear constraints" == term_list[7]
assert "algorithms" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="norm_avg")
print("extracted keywords with norm_avg weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "systems" == term_list[0]
assert "set" == term_list[1]
assert "minimal" == term_list[2]
assert "algorithms" == term_list[3]
assert "solutions" == term_list[4]
assert "minimal set" == term_list[5]
assert "types systems" == term_list[6]
assert "linear constraints" == term_list[7]
assert "strict inequations" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="log_norm_avg")
print("extracted keywords with log_norm_avg weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "minimal set" == term_list[0]
assert "types systems" == term_list[1]
assert "linear constraints" == term_list[2]
assert "strict inequations" == term_list[3]
assert "corresponding algorithms" == term_list[4]
assert "linear diophantine equations" == term_list[5]
assert "nonstrict inequations" == term_list[6]
assert "systems" == term_list[7]
assert "minimal supporting set" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="gaussian_norm_avg")
print("extracted keywords with gaussian_norm_avg weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "systems" == term_list[0]
assert "set" == term_list[1]
assert "minimal set" == term_list[2]
assert "minimal" == term_list[3]
assert "linear diophantine equations" == term_list[4]
assert "types systems" == term_list[5]
assert "minimal supporting set" == term_list[6]
assert "linear constraints" == term_list[7]
assert "algorithms" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="sum")
print("extracted keywords with sum weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "minimal set" == term_list[2]
assert "types systems" == term_list[3]
assert "linear constraints" == term_list[4]
assert "strict inequations" == term_list[5]
assert "systems" == term_list[6]
assert "corresponding algorithms" == term_list[7]
assert "nonstrict inequations" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="norm_sum")
print("extracted keywords with norm_sum weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "systems" == term_list[0]
assert "set" == term_list[1]
assert "minimal set" == term_list[2]
assert "minimal" == term_list[3]
assert "linear diophantine equations" == term_list[4]
assert "types systems" == term_list[5]
assert "minimal supporting set" == term_list[6]
assert "linear constraints" == term_list[7]
assert "algorithms" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="log_norm_sum")
print("extracted keywords with log_norm_sum weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "minimal set" == term_list[0]
assert "types systems" == term_list[1]
assert "linear diophantine equations" == term_list[2]
assert "linear constraints" == term_list[3]
assert "minimal supporting set" == term_list[4]
assert "strict inequations" == term_list[5]
assert "corresponding algorithms" == term_list[6]
assert "nonstrict inequations" == term_list[7]
assert "systems" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="gaussian_norm_sum")
print("extracted keywords with gaussian_norm_sum weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "minimal set" == term_list[2]
assert "types systems" == term_list[3]
assert "linear constraints" == term_list[4]
assert "strict inequations" == term_list[5]
assert "systems" == term_list[6]
assert "corresponding algorithms" == term_list[7]
assert "nonstrict inequations" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="max")
print("extracted keywords with max weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear constraints" == term_list[0] or "linear diophantine equations" == term_list[0]
assert "linear diophantine equations" == term_list[1] or "linear constraints" == term_list[1]
assert "systems" == term_list[2] or "types systems" == term_list[2]
assert "systems" == term_list[3] or "types systems" == term_list[3]
assert "set" == term_list[4] or "minimal set" == term_list[4] or "minimal supporting set" == term_list[4]
assert "minimal set" == term_list[5] or "set" == term_list[5] or "minimal supporting set" == term_list[5]
assert "minimal supporting set" == term_list[6] or "minimal set" == term_list[6] or "set" == term_list[6]
assert "minimal" == term_list[7]
assert "algorithms" == term_list[8] or "corresponding algorithms" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="norm_max")
print("extracted keywords with norm_max weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "systems" == term_list[0]
assert "set" == term_list[1]
assert "minimal" == term_list[2]
assert "algorithms" == term_list[3]
assert "solutions" == term_list[4]
assert "linear constraints" == term_list[5]
assert "types systems" == term_list[6]
assert "minimal set" == term_list[7]
assert "linear diophantine equations" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="log_norm_max")
print("extracted keywords with log_norm_max weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear constraints" == term_list[0]
assert "types systems" == term_list[1]
assert "minimal set" == term_list[2]
assert "linear diophantine equations" == term_list[3]
assert "corresponding algorithms" == term_list[4]
assert "nonstrict inequations" == term_list[5] or "strict inequations" == term_list[5]
assert "strict inequations" == term_list[6] or "nonstrict inequations" == term_list[6]
assert "minimal supporting set" == term_list[7]
assert "systems" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="gaussian_norm_max")
print("extracted keywords with gaussian_norm_max weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear constraints" == term_list[0]
assert "linear diophantine equations" == term_list[1]
assert "systems" == term_list[2] or "types systems" == term_list[2]
assert "types systems" == term_list[3] or "systems" == term_list[3]
assert "set" == term_list[4] or "minimal set" == term_list[4]
assert "minimal set" == term_list[5] or "set" == term_list[5]
assert "minimal supporting set" == term_list[6]
assert "minimal" == term_list[7]
assert "algorithms" == term_list[8] or "corresponding algorithms" == term_list[8]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="len_log_norm_max")
print("extracted keywords with len_log_norm_max weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "linear constraints" == term_list[2]
assert "types systems" == term_list[3]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="len_log_norm_avg")
print("extracted keywords with len_log_norm_avg weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "minimal set" == term_list[2]
assert "types systems" == term_list[3]
results, top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="len_log_norm_sum")
print("extracted keywords with len_log_norm_sum weighting:"+ str(results))
term_list = [term[0] for term in results]
assert "linear diophantine equations" == term_list[0]
assert "minimal supporting set" == term_list[1]
assert "minimal set" == term_list[2]
assert "types systems" == term_list[3]
assert "linear constraints" == term_list[4]
with self.assertRaises(ValueError) as context:
keywords_extraction(example_abstract, top_p = 0.3, weight_comb="my_norm")
self.assertTrue("Unspported weight_comb 'my_norm'! "
"Options are 'avg', 'norm_avg', 'log_norm_avg', 'gaussian_norm_avg', 'sum', "
"'norm_sum', 'log_norm_sum', 'gaussian_norm_sum', 'max', 'norm_max',"
" 'log_norm_max', 'gaussian_norm_max', "
"'len_log_norm_max', 'len_log_norm_avg', 'len_log_norm_sum'. " in context.exception)
def test_keywords_extraction_with_gcvalue(self):
example_abstract = "Compatibility of systems of linear constraints over the set of natural numbers. " \
"Criteria of compatibility of a system of linear Diophantine equations, strict inequations, " \
"and nonstrict inequations are considered. Upper bounds for components of a minimal set of " \
"solutions and algorithms of construction of minimal generating sets of solutions for all " \
"types of systems are given. These criteria and the corresponding algorithms for " \
"constructing a minimal supporting set of solutions can be used in solving all the " \
"considered types systems and systems of mixed types."
gcvalue_results, gcvalue_top_vertices = keywords_extraction(example_abstract, top_p = 0.3, weight_comb="gcvalue", workers=2)
print("GCValue results: ", gcvalue_results)
def test_keywords_extraction_from_segmented_corpus(self):
example_user_defined_context_corpus = [["Compatibility", "of", "systems", "of", "linear", "constraints",
"over", "the", "set", "of", "natural", "numbers",".",
"Criteria", "of", "compatibility", "of", "a", "system", "of",
"linear", "Diophantine", "equations", ",", "strict", "inequations", ",",
"and", "nonstrict", "inequations", "are", "considered", "."],
["Upper", "bounds", "for", "components", "of", "a", "minimal", "set",
"of", "solutions", "and","algorithms","of", "construction", "of",
"minimal", "generating", "sets", "of", "solutions", "for", "all",
"types", "of", "systems", "are", "given", "."],
["These", "criteria", "and", "the", "corresponding", "algorithms",
"for", "constructing", "a", "minimal", "supporting", "set", "of",
"solutions", "can", "be", "used", "in", "solving", "all", "the",
"considered", "types", "systems", "and", "systems", "of", "mixed",
"types","."]]
from jgtextrank.core import keywords_extraction_from_segmented_corpus
results, top_vertices = keywords_extraction_from_segmented_corpus(example_user_defined_context_corpus, top_p=1, weight_comb="sum")
print("extracted keywords with user defined corpus context:"+ str(results))
print("top_vertices: ", top_vertices)
assert 23 == len(results)
term_list = [term[0] for term in results]
assert "linear diophantine equations" in term_list
assert "minimal supporting set" in term_list
assert "minimal set" in term_list
assert "types systems" in term_list
assert "linear constraints" in term_list
assert "strict inequations" in term_list
assert "systems" in term_list
assert "corresponding algorithms" in term_list
assert "natural numbers" in term_list, "'natural numbers' is given more " \
"weights than the weight with computed in default sentential context."
assert "nonstrict inequations" in term_list
assert "mixed types" in term_list
assert "minimal" in term_list
assert 'set' in term_list
# [('linear diophantine equations', 0.17848), ('minimal supporting set', 0.16067),
# ('minimal set', 0.12723), ('types systems', 0.1143), ('linear constraints', 0.10842),
# ('strict inequations', 0.08805), ('systems', 0.07958), ('corresponding algorithms', 0.07575),
# ('natural numbers', 0.07384), ('nonstrict inequations', 0.07262),
# ('mixed types', 0.06943), ('minimal', 0.06362), ('set', 0.06361),
# ('algorithms', 0.05406), ('solutions', 0.04964), ('criteria', 0.03779),
# ('compatibility', 0.03606), ('construction', 0.0352), ('types', 0.03472),
# ('sets', 0.03405), ('system', 0.02125), ('upper', 0.00644), ('components', 0.00644)]
@ignore_warnings
def test_keywords_extraction_from_tagged_corpus(self):
from jgtextrank.core import keywords_extraction_from_tagged_corpus
pos_tagged_corpus= [[('Compatibility', 'NN'), ('of', 'IN'), ('systems', 'NNS'), ('of', 'IN'),
('linear', 'JJ'), ('constraints', 'NNS'), ('over', 'IN'), ('the', 'DT'),
('set', 'NN'), ('of', 'IN'), ('natural', 'JJ'), ('numbers', 'NNS'), ('.', '.')],
[('Criteria', 'NNS'), ('of', 'IN'), ('compatibility', 'NN'), ('of', 'IN'),
('a', 'DT'), ('system', 'NN'), ('of', 'IN'), ('linear', 'JJ'),
('Diophantine', 'NNP'), ('equations', 'NNS'), (',', ','), ('strict', 'JJ'),
('inequations', 'NNS'), (',', ','), ('and', 'CC'), ('nonstrict', 'JJ'),
('inequations', 'NNS'), ('are', 'VBP'), ('considered', 'VBN'), ('.', '.')],
[('Upper', 'NNP'), ('bounds', 'VBZ'), ('for', 'IN'), ('components', 'NNS'),
('of', 'IN'), ('a', 'DT'), ('minimal', 'JJ'), ('set', 'NN'), ('of', 'IN'),
('solutions', 'NNS'), ('and', 'CC'), ('algorithms', 'NN'), ('of', 'IN'),
('construction', 'NN'), ('of', 'IN'), ('minimal', 'JJ'), ('generating', 'VBG'),
('sets', 'NNS'), ('of', 'IN'), ('solutions', 'NNS'), ('for', 'IN'), ('all', 'DT'),
('types', 'NNS'), ('of', 'IN'), ('systems', 'NNS'), ('are', 'VBP'),
('given', 'VBN'), ('.', '.')],
[('These', 'DT'), ('criteria', 'NNS'), ('and', 'CC'), ('the', 'DT'),
('corresponding', 'JJ'), ('algorithms', 'NN'), ('for', 'IN'),
('constructing', 'VBG'), ('a', 'DT'), ('minimal', 'JJ'), ('supporting', 'VBG'),
('set', 'NN'), ('of', 'IN'), ('solutions', 'NNS'), ('can', 'MD'), ('be', 'VB'),
('used', 'VBN'), ('in', 'IN'), ('solving', 'VBG'), ('all', 'PDT'), ('the', 'DT'),
('considered', 'VBN'), ('types', 'NNS'), ('systems', 'NNS'), ('and', 'CC'),
('systems', 'NNS'), ('of', 'IN'), ('mixed', 'JJ'), ('types', 'NNS'), ('.', '.')]]
results, top_vertices = keywords_extraction_from_tagged_corpus(pos_tagged_corpus, top_p = 0.3, weight_comb="sum")
print()
print("extracted keywords from pre-tagged content:"+ str(results))
print("top_vertices: ", top_vertices)
print("len(results): ", len(results))
assert 10 == len(results), "check possible changes/errors in solver and hyperparameter, e.g., num_iter, tol"
term_list = [term[0] for term in results]
assert "linear diophantine equations" in term_list
assert "types systems" in term_list
assert "linear constraints" in term_list
assert "minimal set" in term_list
assert "systems" in term_list
assert "corresponding algorithms" in term_list
assert "algorithms" in term_list
assert "set" in term_list
assert "solutions" in term_list
assert "minimal" in term_list
# after lemmatisation
results, top_vertices = keywords_extraction_from_tagged_corpus(pos_tagged_corpus, top_p = 0.3, lemma=True)
print("extracted keywords from pre-tagged content after lemmatisation: ", results)
print("top_vertices after lemmatisation: ", top_vertices)
assert len(results) == 11
term_list = [term[0] for term in results]
assert "linear diophantine equation" in term_list
assert "type system" in term_list
assert "minimal set" in term_list
assert "linear constraint" in term_list
assert "strict inequations" in term_list
assert "corresponding algorithm" in term_list
assert "system" in term_list
assert "nonstrict inequations" in term_list
assert "natural number" in term_list
assert "algorithm" in term_list
assert "set" in term_list
def test_kea_with_text_formulate(self):
"""
This is to test the content with formulate
where simply splits the term units with space may have the conflicts with the original tokeniser
:return:
"""
from jgtextrank.core import _keywords_extraction_from_preprocessed_context
S0021999113005652_textsnippet = [(['it', 'be', 'interesting', 'to', 'quantify', 'the', 'effect', 'of',
'the', 'schmidt', 'number', 'and', 'the', 'chemical', 'reaction',
'rate', 'on', 'the', 'bulk', '-', 'mean', 'concentration', 'of', 'b','in', 'water', '.'],
[('interesting', 'JJ'), ('effect', 'NNS'), ('schmidt', 'NNP'), ('number', 'NN'),
('chemical', 'JJ'), ('reaction', 'NN'), ('rate', 'NN'), ('bulk', 'JJ'),
('concentration', 'NN'), ('water', 'NN')]),
(['the', 'datum', 'could', 'present', 'important', 'information', 'on',
'evaluate', 'the', 'environmental', 'impact', 'of', 'the', 'degradation',
'product', 'of', 'b', ',', 'as', 'well', 'as', 'acidification', 'of',
'water', 'by', 'the', 'chemical', 'reaction', '.'],
[('datum', 'NNS'), ('important', 'JJ'), ('information', 'NN'),
('environmental', 'JJ'), ('impact', 'NNS'), ('degradation', 'NN'), ('product', 'NN'),
('acidification', 'NN'), ('water', 'NN'), ('chemical', 'JJ'), ('reaction', 'NN')]),
(['here', ',', 'the', 'bulk', '-', 'mean', 'concentration', 'of', 'b',
'be', 'define', 'by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎', 'fig', '.'],
[('bulk', 'JJ'), ('concentration', 'NN'), ('by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎', 'NNP'), ('fig', 'NNP')]),
(['15', 'depict', 'the', 'effect', 'of', 'the', 'schmidt', 'and', 'the',
'chemical', 'reaction', 'rate', 'on', 'the', 'bulk', '-', 'mean', 'concentration', 'cb⁎ .'],
[('depict', 'NNS'), ('effect', 'NN'), ('schmidt', 'NNP'), ('chemical', 'JJ'),
('reaction', 'NN'), ('rate', 'NN'), ('bulk', 'JJ'), ('concentration', 'NN')]),
(['it', 'be', 'worth', 'to', 'mention', 'here', 'that', 'the', 'bulk', '-', 'mean',
'concentration', 'of', 'b', 'reach', 'approximately', '0.6', 'as', 'the', 'chemical',
'reaction', 'rate', 'and', 'the', 'schmidt', 'number', 'increase', 'to',
'infinite', ',', 'and', 'the', 'concentration', 'be', 'small', 'than', 'the',
'equilibrium', 'concentration', 'of', 'a', 'at', 'the', 'interface', '.'],
[('worth', 'JJ'), ('bulk', 'JJ'), ('concentration', 'NN'), ('chemical', 'JJ'),
('reaction', 'NN'), ('rate', 'NN'), ('schmidt', 'NNP'), ('number', 'NN'),
('increase', 'NN'), ('concentration', 'NN'), ('equilibrium', 'NN'), ('concentration', 'NN'), ('interface', 'NN')]),
(['this', 'figure', 'indicate', 'that', 'progress', 'of', 'the',
'chemical', 'reaction', 'be', 'somewhat', 'interfere', 'by', 'turbulent',
'mix', 'in', 'water', ',', 'and', 'the', 'efficiency', 'of', 'the',
'chemical', 'reaction', 'be', 'up', 'to', 'approximately', '60', '%', '.'],
[('figure', 'NN'), ('progress', 'NN'), ('chemical', 'JJ'), ('reaction', 'NN'),
('turbulent', 'JJ'), ('water', 'NN'), ('efficiency', 'NN'), ('chemical', 'JJ'), ('reaction', 'NN')]),
(['the', 'efficiency', 'of', 'the', 'chemical', 'reaction', 'in', 'water',
'will', 'be', 'a', 'function', 'of', 'the', 'reynolds', 'number', 'of',
'the', 'water', 'flow', ',', 'and', 'the', 'efficiency', 'could', 'increase',
'as', 'the', 'reynolds', 'number', 'increase', '.'],
[('efficiency', 'NN'), ('chemical', 'JJ'), ('reaction', 'NN'), ('water', 'NN'),
('function', 'NN'), ('reynolds', 'NNP'), ('number', 'NN'), ('water', 'NN'),
('flow', 'NN'), ('efficiency', 'NN'), ('reynolds', 'NNP'), ('number', 'NN'), ('increase', 'NNS')]),
(['we', 'need', 'an', 'extensive', 'investigation', 'on', 'the', 'efficiency',
'of', 'the', 'aquarium', 'chemical', 'reaction', 'in', 'the', 'near',
'future', 'to', 'extend', 'the', 'result', 'of', 'this', 'study',
'further', 'to', 'establish', 'practical', 'modelling', 'for', 'the',
'gas', 'exchange', 'between', 'air', 'and', 'water', '.'],
[('extensive', 'JJ'), ('investigation', 'NN'), ('efficiency', 'NN'),
('aquarium', 'JJ'), ('chemical', 'NN'), ('reaction', 'NN'),
('future', 'NN'), ('result', 'NNS'), ('study', 'NN'), ('practical', 'JJ'),
('modelling', 'NN'), ('gas', 'NN'), ('exchange', 'NN'), ('air', 'NN'), ('water', 'NN')])]
results, top_vertices = _keywords_extraction_from_preprocessed_context(S0021999113005652_textsnippet, top_p = 1, weight_comb="sum")
print("extracted keywords from pre-tagged S0021999113005652 text snippet:"+ str(results))
print("top_vertices: ", top_vertices)
print("total key terms", len(results))
assert len(results) == 37
assert results["schmidt number"] == 0.06231
assert results["chemical reaction rate"] == 0.10836
assert results["water"] == 0.05561
assert results["by(24)cb⁎ =∫01〈cb⁎〉(z⁎)dz⁎ fig"] == 0.06098
assert results["water flow"] == 0.07201
assert results["aquarium chemical reaction"] == 0.10836
def test_visualise_cooccurrence_graph(self):
"""
produce the co-occurrence graph close to the example picture in original paper
:return: None
"""
example_tokenised_corpus_context = [["Compatibility", "of", "systems", "of", "linear", "constraints",
"over", "the", "set", "of", "natural", "numbers", "." ,
"Criteria", "of", "compatibility", "of", "a", "system", "of",
"linear", "Diophantine", "equations", "strict", "inequations", ",",
"and", "nonstrict", "inequations", "are", "considered",".", "Upper",
"bounds", "for", "components","of", "a", "minimal", "set", "of",
"solutions", "and", "algorithms", "of", "construction", "of",
"minimal", "generating", "sets", "of", "solutions", "for", "all",
"types", "of", "systems", "are", "given", ".", "These", "criteria",
"and", "the", "corresponding", "algorithms", "for",
"constructing", "a", "minimal", "supporting", "set", "of",
"solutions", "can", "be", "used", "in", "solving", "all", "the",
"considered", "types", "systems", "and", "systems", "of", "mixed",
"types", "."]]
# try to include verbs into the graph
custom_categories = {'NNS', 'NNP', 'NN', 'JJ', 'VBZ'}
# manually filter few nodes not appearing in the given example of original paper
stop_words={'set', 'mixed', 'corresponding', 'supporting'}
preprocessed_context = preprocessing_tokenised_context(example_tokenised_corpus_context,
syntactic_categories=custom_categories,
stop_words=stop_words)
cooccurrence_graph, original_tokenised_context = build_cooccurrence_graph(preprocessed_context)
connected_components = list(nx.connected_components(cooccurrence_graph))
print("visualising connected components:", connected_components)
assert len(connected_components) == 3
pos = nx.spring_layout(cooccurrence_graph,k=0.20,iterations=20)
nx.draw_networkx(cooccurrence_graph, pos=pos, arrows=True, with_labels=True)
plt.show()
plt.savefig("test_sample_cooccurrence_graph.png") # save as png
```
|
{
"source": "jerrygb/mlflow",
"score": 2
}
|
#### File: tracking/_model_registry/test_model_registry_fluent.py
```python
import mock
import pytest
from mlflow import register_model
from mlflow.entities.model_registry import ModelVersion, RegisteredModel
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INTERNAL_ERROR, RESOURCE_ALREADY_EXISTS
from mlflow.tracking import MlflowClient
def test_register_model_with_runs_uri():
create_model_patch = mock.patch.object(MlflowClient, "create_registered_model",
return_value=RegisteredModel("Model 1"))
get_uri_patch = mock.patch(
"mlflow.store.artifact.runs_artifact_repo.RunsArtifactRepository.get_underlying_uri",
return_value="s3:/path/to/source")
create_version_patch = mock.patch.object(
MlflowClient, "create_model_version",
return_value=ModelVersion(RegisteredModel("Model 1"), 1))
with get_uri_patch, create_model_patch, create_version_patch:
register_model("runs:/run12345/path/to/model", "Model 1")
MlflowClient.create_registered_model.assert_called_once_with("Model 1")
MlflowClient.create_model_version.assert_called_once_with("Model 1", "s3:/path/to/source",
"run12345")
def test_register_model_with_non_runs_uri():
create_model_patch = mock.patch.object(MlflowClient, "create_registered_model",
return_value=RegisteredModel("Model 1"))
create_version_patch = mock.patch.object(
MlflowClient, "create_model_version",
return_value=ModelVersion(RegisteredModel("Model 1"), 1))
with create_model_patch, create_version_patch:
register_model("s3:/some/path/to/model", "Model 1")
MlflowClient.create_registered_model.assert_called_once_with("Model 1")
MlflowClient.create_model_version.assert_called_once_with("Model 1", run_id=None,
source="s3:/some/path/to/model")
def test_register_model_with_existing_registered_model():
create_model_patch = mock.patch.object(MlflowClient, "create_registered_model",
side_effect=MlflowException("Some Message",
RESOURCE_ALREADY_EXISTS))
create_version_patch = mock.patch.object(
MlflowClient, "create_model_version",
return_value=ModelVersion(RegisteredModel("Model 1"), 1))
with create_model_patch, create_version_patch:
register_model("s3:/some/path/to/model", "Model 1")
MlflowClient.create_registered_model.assert_called_once_with("Model 1")
MlflowClient.create_model_version.assert_called_once_with("Model 1", run_id=None,
source="s3:/some/path/to/model")
def test_register_model_with_unexpected_mlflow_exception_in_create_registered_model():
create_model_patch = mock.patch.object(MlflowClient, "create_registered_model",
side_effect=MlflowException("Dunno", INTERNAL_ERROR))
with create_model_patch, pytest.raises(MlflowException):
register_model("s3:/some/path/to/model", "Model 1")
MlflowClient.create_registered_model.assert_called_once_with("Model 1")
def test_register_model_with_unexpected_exception_in_create_registered_model():
create_model_patch = mock.patch.object(MlflowClient, "create_registered_model",
side_effect=Exception("Dunno"))
with create_model_patch, pytest.raises(Exception):
register_model("s3:/some/path/to/model", "Model 1")
MlflowClient.create_registered_model.assert_called_once_with("Model 1")
```
|
{
"source": "JerryGCDing/AI",
"score": 2
}
|
#### File: JerryGCDing/AI/Res-34_fruit.py
```python
import numpy as np
import opencv
import random
import tensorflow as tf
import tensorflow.contrib.slim as slim
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
file_path = r'D:\Artificial_Intellegence_Project\Practical\Fruit\fruit\Training\\'
# claim variables
xs = tf.placeholder(tf.float32, [None, 100, 100, 3])
ys = tf.placeholder(tf.float32, [None, 50])
# keep_prob = tf.placeholder(tf.float32)
global_step = tf.Variable(0)
x_image = tf.reshape(xs, [-1, 100, 100, 3])
# weight
def weights(shape):
init = tf.truncated_normal(shape, stddev=0.01)
return tf.Variable(init)
# biases
def biases(shape):
init = tf.constant(0.02, shape=shape)
return tf.Variable(init)
# identity layer
def identity(inputs, out_size, k_size, stage, block):
x_short_cut = inputs
block_name = 'res'+str(stage)+str(block)
with tf.variable_scope(block_name):
# convolution layer 1
conv1 = slim.conv2d(inputs, out_size, k_size, stride=1, padding='SAME', activation_fn=None)
conv1_output = tf.nn.relu(tf.layers.batch_normalization(conv1, axis=3))
# convolution layer 2
conv2 = slim.conv2d(conv1_output, out_size, k_size, stride=1, padding='SAME', activation_fn=None)
conv2_BN = tf.layers.batch_normalization(conv2, axis=3)
conv2_output = tf.nn.relu(conv2_BN+x_short_cut)
return conv2_output
# convolution layer
def conv(inputs, out_size, k_size, stage, block):
x_short_cut = inputs
block_name = 'res'+str(stage)+str(block)
with tf.variable_scope(block_name):
# convolution layer 1
conv1 = slim.conv2d(inputs, out_size, k_size, stride=2, padding='SAME', activation_fn=None)
conv1_output = tf.nn.relu(tf.layers.batch_normalization(conv1, axis=3))
# convolution layer 2
conv2 = slim.conv2d(conv1_output, out_size, k_size, stride=1, padding='SAME', activation_fn=None)
conv2_output = tf.layers.batch_normalization(conv2, axis=3)
# input reshape
input_conv = slim.conv2d(x_short_cut, out_size, k_size, stride=2, padding='SAME', activation_fn=None)
input_reshape = tf.layers.batch_normalization(input_conv, axis=3)
# output
output = tf.nn.relu(input_reshape+conv2_output)
return output
# stage 1
conv1 = slim.conv2d(x_image, 64, 7, stride=2, padding='VALID')
conv1_relu = tf.nn.relu(tf.layers.batch_normalization(conv1))
h1_pool = tf.nn.max_pool(conv1_relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# stage 2
id_2_1 = identity(h1_pool, 64, 3, 2, 1)
id_2_2 = identity(id_2_1, 64, 3, 2, 2)
id_2_3 = identity(id_2_2, 64, 3, 2, 3)
# stage 3
conv_3_1 = conv(id_2_3, 128, 3, 3, 1)
id_3_2 = identity(conv_3_1, 128, 3, 3, 2)
id_3_3 = identity(id_3_2, 128, 3, 3, 3)
id_3_4 = identity(id_3_3, 128, 3, 3, 4)
# stage 4
conv_4_1 = conv(id_3_4, 256, 3, 4, 1)
id_4_2 = identity(conv_4_1, 256, 3, 4, 2)
id_4_3 = identity(id_4_2, 256, 3, 4, 3)
id_4_4 = identity(id_4_3, 256, 3, 4, 4)
id_4_5 = identity(id_4_4, 256, 3, 4, 5)
id_4_6 = identity(id_4_5, 256, 3, 4, 6)
# stage 5
conv_5_1 = conv(id_4_6, 512, 3, 5, 1)
id_5_2 = identity(conv_5_1, 512, 3, 5, 2)
id_5_3 = identity(id_5_2, 512, 3, 5, 3)
# fc layer
h_pool = tf.nn.avg_pool(id_5_3, [1, 2, 2, 1], strides=[1, 1, 1, 1], padding='SAME')
h_pool_flaten = tf.reshape(h_pool, [-1, 3*3*512])
# stage 6
w_fc1 = weights([3*3*512, 50])
b_fc1 = biases([50])
h_fc1 = tf.matmul(h_pool_flaten, w_fc1)+b_fc1
prediction = tf.nn.softmax(h_fc1)
cross_entropy = -tf.reduce_mean(tf.reduce_sum(ys*tf.log(prediction), reduction_indices=[1]))
# learning rate decay
learning_rate = tf.train.exponential_decay(1e-3, global_step, staircase=True, decay_rate=0.96, decay_steps=20000)
# train step
train_step = tf.train.AdamOptimizer(learning_rate, epsilon=0.1).minimize(cross_entropy, global_step=global_step)
sess.run(tf.global_variables_initializer())
correct_prediction = tf.equal(tf.argmax(ys, 1), tf.argmax(prediction, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
img, label = opencv.file_name(file_path)
label_op = sess.run(tf.one_hot(label, depth=50))
index = [i for i in range(len(img))]
random.shuffle(index)
print('training-----------------------------------------------')
for i in range(1500):
update = tf.assign(global_step, i)
if i >= 502:
a = i % 502
else:
a = i
img_batch = np.array(img)[index[a*50: a*50+50]]
label_batch = np.array(label_op[index[a*50: a*50+50]])
sess.run(train_step, feed_dict={xs: img_batch, ys: label_batch})
sess.run(update)
if (i+1) % 10 == 0:
print((i+1), sess.run(accuracy, feed_dict={xs: img_batch, ys: label_batch}))
# save_path = saver_1.save(sess, check_dir, global_step=0)
file_path = r'D:\Artificial_Intellegence_Project\Practical\Fruit\fruit\Test\\'
correct_prediction = tf.equal(tf.argmax(ys, 1), tf.argmax(prediction, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
img, label = opencv.file_name(file_path)
label_op = sess.run(tf.one_hot(label, depth=50))
print('testing=================================================')
for a in range(169):
label_batch = np.array(label_op[index[a*50: a*50+50]])
img_batch = np.array(img)[index[a*50: a*50+50]]
if (a+1) % 10 == 0:
print(sess.run(accuracy, feed_dict={xs: img_batch, ys: label_batch}))
sess.close()
```
|
{
"source": "jerry-git/daily-dose-of-python",
"score": 3
}
|
#### File: code/10/async.py
```python
import asyncio
import os
from decimal import Decimal
from typing import Optional
from pydantic import condecimal
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlmodel import Field, SQLModel, select
class Restaurant(SQLModel, table=True):
id: int = Field(default=None, primary_key=True)
name: str = Field(index=True)
address: str
currency: str
class MenuItem(SQLModel, table=True):
id: int = Field(default=None, primary_key=True)
name: str
price: condecimal(decimal_places=2)
restaurant_id: Optional[int] = Field(default=None, foreign_key="restaurant.id")
async def main() -> None:
db_url = os.environ.get("RESTAURANT_DB_URL", "sqlite+aiosqlite:///my_db")
db_engine = create_async_engine(db_url)
async with db_engine.begin() as conn:
await conn.run_sync(SQLModel.metadata.create_all)
async with AsyncSession(db_engine, expire_on_commit=False) as session:
# Writing
restaurant = Restaurant(
name="Second best Pizza in town", address="Foo street 1", currency="EUR"
)
session.add(restaurant)
await session.commit()
pizza1 = MenuItem(name="Margherita", price=10.50, restaurant_id=restaurant.id)
pizza2 = MenuItem(name="2xPineapple", price=16.80, restaurant_id=restaurant.id)
session.add_all((pizza1, pizza2))
await session.commit()
# Reading
query = (
select(MenuItem)
.join(Restaurant)
.where(Restaurant.name == "Second best Pizza in town")
)
result = await session.execute(query)
menu_items = result.scalars().all()
assert len(menu_items) == 2
assert menu_items[0] == MenuItem(
id=1, name="Margherita", price=Decimal("10.50"), restaurant_id=restaurant.id
)
if __name__ == "__main__":
asyncio.run(main())
```
#### File: code/11/pytest_test_cases_example.py
```python
from pytest_cases import fixture, parametrize
@fixture
def fixture1() -> str:
return "foo"
@fixture
@parametrize("value", ["bar", "baz"])
def fixture2(value: str) -> str:
return value
@parametrize("value", [fixture1, fixture2])
def test_just_a_dummy_example(value: str) -> None:
assert value in ("foo", "bar", "baz")
```
#### File: code/5/left.py
```python
from typing import TypeVar
TShape = TypeVar("TShape", bound="Shape")
class Shape:
def set_scale(self: TShape, scale: float) -> TShape:
self.scale = scale
return self
class Circle(Shape):
def set_radius(self, radius: float) -> Circle:
self.radius = radius
return self
```
#### File: code/5/right.py
```python
from typing import Self
class Shape:
def set_scale(self, scale: float) -> Self:
self.scale = scale
return self
class Circle(Shape):
def set_radius(self, radius: float) -> Self:
self.radius = radius
return self
```
#### File: code/9/ep9.py
```python
import datetime as dt
import random
from typing import Any
from dirty_equals import Contains, IsList, IsNow, IsPositiveFloat
from fastapi import FastAPI
from fastapi.testclient import TestClient
app = FastAPI()
@app.post("/order")
async def create_order() -> dict[str, Any]:
# Just a dummy payload for demonstration
return {
"price": random.random() * 100,
"products": ["milk", "coke", "pasta"],
"created_at": dt.datetime.now().isoformat(),
"created_by": "Jerry",
}
def test_order_api() -> None:
client = TestClient(app)
response = client.post("/order")
assert response.json() == {
"price": IsPositiveFloat(),
"products": IsList(length=3) & Contains("pasta"),
"created_at": IsNow(iso_string=True),
"created_by": "Jerry",
}
```
|
{
"source": "jerry-git/logplot",
"score": 2
}
|
#### File: src/logplot/conf.py
```python
from collections import namedtuple
from itertools import combinations
import logging
import yaml
logger = logging.getLogger(__name__)
General = namedtuple(
"General",
[
"log_open_cmd",
"default_entry_style",
"click_hit_tolerance",
"shell",
"plot_title",
"x_axis_name",
"y_axis_name",
"legend_title",
],
)
General.__new__.__defaults__ = (None,) * len(General._fields)
ConfEntry = namedtuple("ConfEntry", ["identifier", "value", "label", "initial_state"])
# stuff after value are optional
ConfEntry.__new__.__defaults__ = (None, None)
SpecialConfEntry = namedtuple(
"SpecialConfEntry", ["identifier", "value", "label", "regex", "style"]
)
# stuff after value are optional
SpecialConfEntry.__new__.__defaults__ = (None, None, None)
Conf = namedtuple("Conf", ["general", "entries", "special_entries"])
def _loag_yaml(path):
with open(path) as f:
return yaml.safe_load(f)
def read(default_path, user_path):
default = _loag_yaml(default_path)
user = _loag_yaml(user_path)
# General settings from default and then override by user defined
general = default.get("general", {})
general.update(user.get("general", {}))
general = General(**general)
# Log entry specific settings only from user
basics = [ConfEntry(**e) for e in user["entries"]]
for e1, e2 in combinations(basics, 2):
if e1.identifier in e2.identifier or e2.identifier in e1.identifier:
logger.warning(
"Colliding identifiers: {} {}".format(e1.identifier, e2.identifier)
)
specials = [SpecialConfEntry(**e) for e in user.get("special-entries", [])]
return Conf(general, basics, specials)
```
#### File: logplot/tests/test_conf.py
```python
import json
from os import path as osp
import pytest
from logplot import conf
DATA_DIR = osp.join(osp.dirname(__file__), "data", "conf")
DEFAULT_PATH = osp.join(DATA_DIR, "default{}.yaml")
USER_PATH = osp.join(DATA_DIR, "user{}.yaml")
EXPECTED_JSON_PATH = osp.join(DATA_DIR, "expected_json_{}_{}.txt")
class TestRead:
@pytest.mark.parametrize("default_case, user_case", [(1, 1), (1, 2)])
def test_it_reads(self, default_case, user_case):
res = conf.read(DEFAULT_PATH.format(default_case), USER_PATH.format(user_case))
with open(EXPECTED_JSON_PATH.format(default_case, user_case)) as expected_json:
assert json.dumps(res) == expected_json.read()
```
#### File: logplot/tests/test_log.py
```python
import json
from os import path as osp
import pytest
from logplot import log
from logplot.conf import General, ConfEntry, SpecialConfEntry, Conf
DATA_DIR = osp.join(osp.dirname(__file__), "data", "log")
LOG_PATH = osp.join(DATA_DIR, "log{}.txt")
CONF_PATH = osp.join(DATA_DIR, "conf{}.txt")
EXPECTED_JSON_PATH = osp.join(DATA_DIR, "expected_json_{}_{}.txt")
def conf_from_json_file(path):
with open(path) as f:
general, basics, specials = json.loads(f.read())
entries = [ConfEntry(*e) for e in basics]
special_entries = [SpecialConfEntry(*e) for e in specials]
conf = Conf(General(*general), entries, special_entries)
return conf
class TestParse:
@pytest.mark.parametrize("log_case, conf_case", [(1, 1)])
def test_it_parses(self, log_case, conf_case):
conf = conf_from_json_file(CONF_PATH.format(conf_case))
res = log.parse(LOG_PATH.format(log_case), conf)
with open(EXPECTED_JSON_PATH.format(log_case, conf_case)) as expected_json:
assert json.dumps(res) == expected_json.read()
```
|
{
"source": "jerry-git/pychoir",
"score": 4
}
|
#### File: pychoir/pychoir/strings.py
```python
import re
import sys
from typing import Any, Union
from pychoir import Matcher
if sys.version_info >= (3, 7):
from re import Pattern
else:
Pattern = Any
class StartsWith(Matcher):
"""A Matcher checking that the compared string :code:`.startswith()` the passed string.
:param start: The string the compared value is expected to start with.
Usage:
>>> from pychoir import StartsWith
>>> 'foobar' == StartsWith('foo')
True
>>> 'barbar' == StartsWith('foo')
False
"""
def __init__(self, start: str):
super().__init__()
self.start = start
def _matches(self, other: str) -> bool:
return other.startswith(self.start)
def _description(self) -> str:
return repr(self.start)
class EndsWith(Matcher):
"""A Matcher checking that the compared string :code:`.endswith()` the passed string.
:param end: The string the compared value is expected to end with.
Usage:
>>> from pychoir import EndsWith
>>> 'foobar' == EndsWith('bar')
True
>>> 'foofoo' == EndsWith('bar')
False
"""
def __init__(self, end: str):
super().__init__()
self.end = end
def _matches(self, other: str) -> bool:
return other.endswith(self.end)
def _description(self) -> str:
return repr(self.end)
class MatchesRegex(Matcher):
"""A Matcher checking that the compared string matches the passed regular expression.
:param regex: The regular expression (as a string or a :class:`re.Pattern`).
Usage:
>>> import re
>>> from pychoir import MatchesRegex
>>> 'foobar' == MatchesRegex(r'^f.obar')
True
>>> 'foofoo' == MatchesRegex(re.compile(r'^b[ao]r$'))
False
"""
def __init__(self, regex: Union[str, Pattern]):
super().__init__()
if isinstance(regex, str):
regex = re.compile(regex)
self.regex = regex
def _matches(self, other: str) -> bool:
return re.search(self.regex, other) is not None
def _description(self) -> str:
return repr(self.regex)
```
#### File: pychoir/tests/test_logical.py
```python
from contextlib import contextmanager
from typing import Iterator
from pychoir import (
NE,
AllOf,
And,
AnyOf,
EqualTo,
GreaterThan,
HasLength,
IsInstance,
IsNoneOf,
Not,
Or,
ResultsTrueFor,
)
def test_and():
assert AllOf is And
assert [1] == [And(IsInstance(int), GreaterThan(0))]
assert {'a': [1]} == {'a': And(IsInstance(list), HasLength(GreaterThan(0)))}
assert not {'a': []} == {'a': And(IsInstance(list), HasLength(GreaterThan(0)))}
assert str(And(IsInstance(list), HasLength(GreaterThan(0)))) == 'And(IsInstance(list), HasLength(GreaterThan(0)))'
def test_or():
assert AnyOf is Or
assert [1] == [Or(0, 1)]
assert {'a': [1]} == {'a': [Or(0, 1, 2)]}
assert not [2] == [Or(0, 1)]
assert str([Or(0, 1)]) == '[Or(0, 1)]'
def test_not():
assert IsNoneOf is Not
assert [1] == [IsNoneOf(0, 2, GreaterThan(3))]
assert {'a': [None]} == {'a': IsNoneOf(Ellipsis, 0, [])}
assert not [1] == [IsNoneOf(0, 1, 2)]
assert str([Not(0, 1, 2)]) == '[Not(0, 1, 2)]'
def test_not_or():
@contextmanager
def make_not_or_123() -> Iterator[Not]:
yield Not(Or(EqualTo(1), 2, 3))
with make_not_or_123() as not_or_123:
assert 4 == not_or_123
assert str(not_or_123) == 'Not(Or(EqualTo(1), 2, 3))'
with make_not_or_123() as not_or_123:
assert not 4 != not_or_123
assert str(not_or_123) == 'Not(Or(EqualTo(1)[FAILED for 4], 2, 3)[FAILED for 4])[FAILED for 4]'
with make_not_or_123() as not_or_123:
for i in (1, 2, 3):
assert i != not_or_123
assert str(not_or_123) == 'Not(Or(EqualTo(1), 2, 3))'
with make_not_or_123() as not_or_123:
for i in (1, 2, 3):
assert not i == not_or_123
assert str(not_or_123) == 'Not(Or(EqualTo(1)[FAILED for 1], 2, 3)[FAILED for (1, 2, 3)])[FAILED for (1, 2, 3)]'
def test_not_not():
@contextmanager
def make_not_one() -> Iterator[Not]:
yield Not(1)
with make_not_one() as not_one:
assert 2 == not_one
assert str(not_one) == 'Not(1)'
with make_not_one() as not_one:
assert 1 != not_one
assert str(not_one) == 'Not(1)'
with make_not_one() as not_one:
assert not 2 != not_one
assert str(not_one) == 'Not(1)[FAILED for 2]'
with make_not_one() as not_one:
assert not 1 == not_one
assert str(not_one) == 'Not(1)[FAILED for 1]'
@contextmanager
def make_not_eq_one() -> Iterator[Not]:
yield Not(EqualTo(1))
with make_not_eq_one() as not_eq_one:
assert 2 == not_eq_one
assert str(not_eq_one) == 'Not(EqualTo(1))'
with make_not_eq_one() as not_eq_one:
assert 1 != not_eq_one
assert str(not_eq_one) == 'Not(EqualTo(1))'
with make_not_eq_one() as not_eq_one:
assert not 2 != not_eq_one
assert str(not_eq_one) == 'Not(EqualTo(1)[FAILED for 2])[FAILED for 2]'
with make_not_eq_one() as not_eq_one:
assert not 1 == not_eq_one
assert str(not_eq_one) == 'Not(EqualTo(1)[FAILED for 1])[FAILED for 1]'
@contextmanager
def make_not_not_one() -> Iterator[Not]:
yield Not(Not(1))
with make_not_not_one() as not_not_one:
assert 1 == not_not_one
assert str(not_not_one) == 'Not(Not(1))'
with make_not_not_one() as not_not_one:
assert 2 != not_not_one
assert str(not_not_one) == 'Not(Not(1))'
with make_not_not_one() as not_not_one:
assert not 1 != not_not_one
assert str(not_not_one) == 'Not(Not(1)[FAILED for 1])[FAILED for 1]'
with make_not_not_one() as not_not_one:
assert not 2 == not_not_one
assert str(not_not_one) == 'Not(Not(1)[FAILED for 2])[FAILED for 2]'
@contextmanager
def make_not_ne_one() -> Iterator[Not]:
yield Not(NE(1))
with make_not_ne_one() as not_ne_one:
assert 1 == not_ne_one
assert str(not_ne_one) == 'Not(NotEqualTo(1))'
with make_not_ne_one() as not_ne_one:
assert 2 != not_ne_one
assert str(not_ne_one) == 'Not(NotEqualTo(1))'
with make_not_ne_one() as not_ne_one:
assert not 1 != not_ne_one
assert str(not_ne_one) == 'Not(NotEqualTo(1)[FAILED for 1])[FAILED for 1]'
with make_not_ne_one() as not_ne_one:
assert not 2 == not_ne_one
assert str(not_ne_one) == 'Not(NotEqualTo(1)[FAILED for 2])[FAILED for 2]'
@contextmanager
def make_not_not_eq_one() -> Iterator[Not]:
yield Not(Not(EqualTo(1)))
with make_not_not_eq_one() as not_not_eq_one:
assert 1 == not_not_eq_one
assert str(not_not_eq_one) == 'Not(Not(EqualTo(1)))'
with make_not_not_eq_one() as not_not_eq_one:
assert 2 != not_not_eq_one
assert str(not_not_eq_one) == 'Not(Not(EqualTo(1)))'
with make_not_not_eq_one() as not_not_eq_one:
assert not 1 != not_not_eq_one
assert str(not_not_eq_one) == 'Not(Not(EqualTo(1)[FAILED for 1])[FAILED for 1])[FAILED for 1]'
with make_not_not_eq_one() as not_not_eq_one:
assert not 2 == not_not_eq_one
assert str(not_not_eq_one) == 'Not(Not(EqualTo(1)[FAILED for 2])[FAILED for 2])[FAILED for 2]'
@contextmanager
def make_not_not_eq_one_eq_two() -> Iterator[Not]:
yield Not(Not(EqualTo(1), EqualTo(2)))
with make_not_not_eq_one_eq_two() as not_not_eq_one_eq_two:
assert 1 == not_not_eq_one_eq_two
assert str(not_not_eq_one_eq_two) == 'Not(Not(EqualTo(1), EqualTo(2)))'
with make_not_not_eq_one_eq_two() as not_not_eq_one_eq_two:
assert not 2 != not_not_eq_one_eq_two
assert (str(not_not_eq_one_eq_two)
== 'Not(Not(EqualTo(1), EqualTo(2)[FAILED for 2])[FAILED for 2])[FAILED for 2]')
with make_not_not_eq_one_eq_two() as not_not_eq_one_eq_two:
assert not 1 != not_not_eq_one_eq_two
assert (str(not_not_eq_one_eq_two)
== 'Not(Not(EqualTo(1)[FAILED for 1], EqualTo(2))[FAILED for 1])[FAILED for 1]')
with make_not_not_eq_one_eq_two() as not_not_eq_one_eq_two:
assert 2 == not_not_eq_one_eq_two
assert str(not_not_eq_one_eq_two) == 'Not(Not(EqualTo(1), EqualTo(2)))'
def test_results_true_for():
assert {'a': 1} == {'a': ResultsTrueFor(bool)}
assert ['foobar'] == [ResultsTrueFor(lambda x: x.startswith('foo'))]
assert not {'a': 0} == {'a': ResultsTrueFor(bool)}
assert str({'a': ResultsTrueFor(bool)}) == "{'a': ResultsTrueFor(<class 'bool'>)}"
```
|
{
"source": "jerry-git/slack-thug",
"score": 2
}
|
#### File: slack-thug/tests/conftest.py
```python
from unittest.mock import MagicMock
import tempfile
import os
import pytest
from slack_thug.db import init_db
from slack_thug.app import app
@pytest.fixture
def test_db(monkeypatch):
_, db_uri = tempfile.mkstemp()
monkeypatch.setenv("THUG_SQLITE_URI", db_uri)
init_db()
yield
os.remove(db_uri)
@pytest.fixture
def slack_client(monkeypatch):
monkeypatch.setenv("SLACK_TOKEN", "foo")
c = MagicMock()
monkeypatch.setattr("slack_thug.slack.SlackClient", c)
return c()
@pytest.fixture
def client():
c = app.test_client()
yield c
```
|
{
"source": "jerry-git/test-skeleton",
"score": 3
}
|
#### File: test-skeleton/test_skeleton/cli.py
```python
import argparse
def cli():
parser = argparse.ArgumentParser(description='Test skeleton creator')
parser.add_argument('input', type=str, help='filepath of input .py file')
parser.add_argument(
'--save', action='store_true', help='save result as test_<input> file')
return parser.parse_args()
```
|
{
"source": "jerryhanhuan/leetcode",
"score": 4
}
|
#### File: leetcode/python/palindrome_str.py
```python
class Solution:
# 判断字符串是否是回文字符串
def IsPalindromeStr(self, s):
s_len = len(s)
for i in range(0, s_len//2):
if s[i] != s[s_len-1-i]:
return False
return True
# 获取最长的回文子串
def longestPalindrome(self,s):
sub = [s[i:j] for i in range(len(s)) for j in range(i+1, len(s)+1)]
max_str = ""
for i in sub:
if self.IsPalindromeStr(i):
if len(i) > len(max_str):
max_str = i
return max_str
if __name__ == '__main__':
s = input('请输入一个字符串::')
sopp = Solution()
if sopp.IsPalindromeStr(s):
print('%s 是回文字符串'%(s))
else:
print('%s 不是回文字符串'%(s))
sub_palind = sopp.longestPalindrome(s)
print('最长回文字串:%s'%(sub_palind))
```
#### File: leetcode/python/pow.py
```python
class Solution:
def myPow(self,x,n):
if n == 0:
return 1.0
elif n < 0:
x = 1/x
n = abs(n)
if n % 2:
#error,递归深度失败错误
#return x * self.myPow(x*x,n/2)
return x * self.myPow(x,n-1)
else:
return self.myPow(x*x,n/2)
if __name__ == '__main__':
x = float(input('请输入一个数::'))
n = int(input('请输入幂::'))
s = Solution();
r = s.myPow(x,n)
print('result:',r)
```
|
{
"source": "jerryhe26/vnpy",
"score": 2
}
|
#### File: gateway/okexo/okexo_gateway.py
```python
import hashlib
import hmac
import sys
import time
import json
import base64
import zlib
from copy import copy
from datetime import datetime, timedelta
from threading import Lock
from urllib.parse import urlencode
from typing import Dict, List
from requests import ConnectionError
from vnpy.event.engine import EventEngine
from vnpy.api.rest import Request, RestClient
from vnpy.api.websocket import WebsocketClient
from vnpy.trader.constant import (
Direction,
Exchange,
OrderType,
Product,
Status,
Interval,
OptionType
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
AccountData,
ContractData,
PositionData,
BarData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
REST_HOST: str = "https://www.okex.com"
WEBSOCKET_HOST: str = "wss://real.okex.com:8443/ws/v3"
STATE_OKEXO2VT: Dict[str, Status] = {
"0": Status.NOTTRADED,
"-2": Status.NOTTRADED,
"1": Status.PARTTRADED,
"2": Status.ALLTRADED,
"-1": Status.CANCELLED,
}
ORDERTYPE_OKEXO2VT: Dict[str, OrderType] = {
"0": OrderType.LIMIT,
"1": OrderType.MARKET,
}
SIDE_OKEXO2VT: Dict[str, Direction] = {
"buy": Direction.LONG,
"sell": Direction.SHORT,
}
SIDE_VT2OKEXO: Dict[Direction, str] = {
Direction.LONG: "buy",
Direction.SHORT: "sell",
}
INTERVAL_VT2OKEXO: Dict[Interval, str] = {
Interval.MINUTE: "60",
Interval.HOUR: "3600",
Interval.DAILY: "86400",
}
OPTIONTYPE_OKEXO2VT = {
"C": OptionType.CALL,
"P": OptionType.PUT
}
underlyings: set = set()
class OkexoGateway(BaseGateway):
"""
VN Trader Gateway for OKEX connection.
"""
default_setting = {
"API Key": "",
"Secret Key": "",
"Passphrase": "",
"会话数": 3,
"代理地址": "",
"代理端口": "",
}
exchanges: List[Exchange] = [Exchange.OKEX]
def __init__(self, event_engine: EventEngine):
"""Constructor"""
super().__init__(event_engine, "OKEXO")
self.rest_api = OkexoRestApi(self)
self.ws_api = OkexoWebsocketApi(self)
self.orders: Dict[str, OrderData] = {}
def connect(self, setting: dict) -> None:
""""""
key = setting["API Key"]
secret = setting["Secret Key"]
passphrase = setting["Passphrase"]
session_number = setting["会话数"]
proxy_host = setting["代理地址"]
proxy_port = setting["代理端口"]
if proxy_port.isdigit():
proxy_port = int(proxy_port)
else:
proxy_port = 0
self.rest_api.connect(key, secret, passphrase,
session_number, proxy_host, proxy_port)
self.ws_api.connect(key, secret, passphrase, proxy_host, proxy_port)
def subscribe(self, req: SubscribeRequest) -> None:
""""""
self.ws_api.subscribe(req)
def send_order(self, req: OrderRequest) -> str:
""""""
return self.rest_api.send_order(req)
def cancel_order(self, req: CancelRequest) -> Request:
""""""
self.rest_api.cancel_order(req)
def query_account(self) -> None:
""""""
pass
def query_position(self) -> None:
""""""
pass
def query_history(self, req: HistoryRequest) -> List[BarData]:
""""""
return self.rest_api.query_history(req)
def close(self) -> None:
""""""
self.rest_api.stop()
self.ws_api.stop()
def on_order(self, order: OrderData) -> None:
""""""
self.orders[order.orderid] = order
super().on_order(order)
def get_order(self, orderid: str):
""""""
return self.orders.get(orderid, None)
class OkexoRestApi(RestClient):
"""
OKEXO REST API
"""
def __init__(self, gateway: "OkexoGateway"):
""""""
super().__init__()
self.gateway: OkexoGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.key: str = ""
self.secret: str = ""
self.passphrase: str = ""
self.order_count: int = 10000
self.order_count_lock: Lock = Lock()
self.connect_time: int = 0
def sign(self, request: Request) -> Request:
"""
Generate OKEXo signature.
"""
# Sign
timestamp = get_timestamp()
request.data = json.dumps(request.data)
if request.params:
path = request.path + "?" + urlencode(request.params)
else:
path = request.path
msg = timestamp + request.method + path + request.data
signature = generate_signature(msg, self.secret)
# Add headers
request.headers = {
"OK-ACCESS-KEY": self.key,
"OK-ACCESS-SIGN": signature,
"OK-ACCESS-TIMESTAMP": timestamp,
"OK-ACCESS-PASSPHRASE": self.passphrase,
"Content-Type": "application/json"
}
return request
def connect(
self,
key: str,
secret: str,
passphrase: str,
session_number: int,
proxy_host: str,
proxy_port: int,
) -> None:
"""
Initialize connection to REST server.
"""
self.key = key
self.secret = secret.encode()
self.passphrase = <PASSWORD>
self.connect_time = int(datetime.now().strftime("%y%m%d%H%M%S"))
self.init(REST_HOST, proxy_host, proxy_port)
self.start(session_number)
self.gateway.write_log("REST API启动成功")
self.query_time()
self.query_underlying()
def _new_order_id(self) -> int:
with self.order_count_lock:
self.order_count += 1
return self.order_count
def send_order(self, req: OrderRequest) -> str:
""""""
# Need both offset and direction for sending order.
orderid = f"a{self.connect_time}{self._new_order_id()}"
if req.direction == Direction.LONG:
side = "buy"
else:
side = "sell"
data = {
"client_oid": orderid,
"instrument_id": req.symbol,
"price": str(req.price),
"size": str(int(req.volume)),
"side": side,
}
if req.type == OrderType.MARKET:
data["match_price"] = "1"
else:
data["match_price"] = "0"
order = req.create_order_data(orderid, self.gateway_name)
self.add_request(
"POST",
"/api/option/v3/order",
callback=self.on_send_order,
data=data,
extra=order,
on_failed=self.on_send_order_failed,
on_error=self.on_send_order_error,
)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest) -> Request:
""""""
item = req.symbol.split("-")
underlying = f"{item[0]}-{item[1]}"
path = f"/api/option/v3/cancel_order/{underlying}/{req.orderid}"
self.add_request(
"POST",
path,
callback=self.on_cancel_order,
on_error=self.on_cancel_order_error,
on_failed=self.on_cancel_order_failed,
extra=req
)
def query_underlying(self) -> Request:
""""""
self.add_request(
"GET",
"/api/option/v3/underlying",
callback=self.on_query_underlying
)
def query_contract(self) -> Request:
""""""
for underlying in underlyings:
self.add_request(
"GET",
f"/api/option/v3/instruments/{underlying}",
callback=self.on_query_contract
)
def query_account(self) -> Request:
""""""
for underlying in underlyings:
self.add_request(
"GET",
f"/api/option/v3/accounts/{underlying}",
callback=self.on_query_account
)
def query_order(self) -> Request:
""""""
for underlying in underlyings:
# get waiting orders
self.add_request(
"GET",
f"/api/option/v3/orders/{underlying}?state=0",
callback=self.on_query_order
)
# get part traded orders
self.add_request(
"GET",
f"/api/option/v3/orders/{underlying}?state=1",
callback=self.on_query_order
)
def query_position(self) -> Request:
""""""
for underlying in underlyings:
self.add_request(
"GET",
f"/api/option/v3/{underlying}/position",
callback=self.on_query_position
)
def query_time(self) -> Request:
""""""
self.add_request(
"GET",
"/api/general/v3/time",
callback=self.on_query_time
)
def on_query_underlying(self, data: List[str], request: Request) -> None:
""""""
for underlying in data:
underlyings.add(underlying)
self.gateway.write_log("期权标的信息查询成功")
self.query_contract()
def on_query_contract(self, data: List, request: Request) -> None:
""""""
if not data:
return
for instrument_data in data:
symbol = instrument_data["instrument_id"]
contract = ContractData(
symbol=symbol,
exchange=Exchange.OKEX,
name=symbol,
product=Product.OPTION,
size=float(instrument_data["contract_val"]),
pricetick=float(instrument_data["tick_size"]),
min_volume=float(instrument_data["lot_size"]),
option_strike=int(instrument_data["strike"]),
option_type=OPTIONTYPE_OKEXO2VT[instrument_data["option_type"]],
option_expiry=datetime.strptime(instrument_data["delivery"], "%Y-%m-%dT%H:%M:%S.%fZ"),
option_portfolio=instrument_data["underlying"],
option_index=instrument_data["strike"],
history_data=True,
net_position=True,
gateway_name=self.gateway_name,
)
contract.option_underlying = "_".join([
contract.option_portfolio,
contract.option_expiry.strftime("%Y%m%d")
])
self.gateway.on_contract(contract)
self.gateway.write_log("期权合约信息查询成功")
# Start websocket api after instruments data collected
self.gateway.ws_api.start()
# and query pending orders
self.query_account()
self.query_position()
self.query_order()
def on_query_account(self, data: dict, request: Request) -> None:
""""""
equity = float(data["equity"])
if equity:
account = AccountData(
accountid=data["underlying"],
balance=float(data["equity"]),
frozen=float(data.get("margin_for_unfilled", 0)),
gateway_name=self.gateway_name,
)
self.gateway.on_account(account)
self.gateway.write_log(f"{account.accountid}账户资金查询成功")
def on_query_position(self, data: dict, request: Request) -> None:
""""""
if not data["holding"]:
return
for pos_data in data["holding"]:
pos = PositionData(
symbol=pos_data["instrument_id"],
exchange=Exchange.OKEX,
direction=Direction.NET,
volume=int(pos_data["position"]),
frozen=float(pos_data["avail_position"]) - float(pos_data["avail_position"]),
price=float(pos_data["avg_cost"]),
pnl=float(pos_data["realized_pnl"]),
gateway_name=self.gateway_name,
)
self.gateway.on_position(pos)
def on_query_order(self, data: dict, request: Request) -> None:
""""""
for order_data in data["order_info"]:
direction = SIDE_OKEXO2VT[order_data["side"]]
order = OrderData(
symbol=order_data["instrument_id"],
exchange=Exchange.OKEX,
type=ORDERTYPE_OKEXO2VT[order_data["order_type"]],
orderid=order_data["client_oid"],
direction=direction,
traded=int(order_data["filled_qty"]),
price=float(order_data["price"]),
volume=float(order_data["size"]),
time=utc_to_local(order_data["timestamp"]).strftime("%H:%M:%S"),
status=STATE_OKEXO2VT[order_data["state"]],
gateway_name=self.gateway_name,
)
self.gateway.on_order(order)
def on_query_time(self, data: dict, request: Request) -> None:
""""""
server_time = data["iso"]
local_time = datetime.utcnow().isoformat()
msg = f"服务器时间:{server_time},本机时间:{local_time}"
self.gateway.write_log(msg)
def on_send_order_failed(self, status_code: str, request: Request) -> None:
"""
Callback when sending order failed on server.
"""
order = request.extra
order.status = Status.REJECTED
order.time = datetime.now().strftime("%H:%M:%S.%f")
self.gateway.on_order(order)
msg = f"委托失败,状态码:{status_code},信息:{request.response.text}"
self.gateway.write_log(msg)
def on_send_order_error(
self, exception_type: type, exception_value: Exception, tb, request: Request
) -> None:
"""
Callback when sending order caused exception.
"""
order = request.extra
order.status = Status.REJECTED
self.gateway.on_order(order)
# Record exception if not ConnectionError
if not issubclass(exception_type, ConnectionError):
self.on_error(exception_type, exception_value, tb, request)
def on_send_order(self, data: dict, request: Request) -> None:
"""
Websocket will push a new order status
"""
order = request.extra
error_msg = data["error_message"]
if error_msg:
order.status = Status.REJECTED
self.gateway.on_order(order)
self.gateway.write_log(f"委托失败:{error_msg}")
def on_cancel_order_error(
self, exception_type: type, exception_value: Exception, tb, request: Request
) -> None:
"""
Callback when cancelling order failed on server.
"""
# Record exception if not ConnectionError
if not issubclass(exception_type, ConnectionError):
self.on_error(exception_type, exception_value, tb, request)
def on_cancel_order(self, data, request) -> None:
"""
Websocket will push a new order status
"""
pass
def on_cancel_order_failed(self, status_code: int, request: Request) -> None:
"""
If cancel failed, mark order status to be rejected.
"""
req = request.extra
order = self.gateway.get_order(req.orderid)
if order:
order.status = Status.REJECTED
self.gateway.on_order(order)
def on_failed(self, status_code: int, request: Request) -> None:
"""
Callback to handle request failed.
"""
msg = f"请求失败,状态码:{status_code},信息:{request.response.text}"
self.gateway.write_log(msg)
def on_error(
self, exception_type: type, exception_value: Exception, tb, request: Request
) -> None:
"""
Callback to handler request exception.
"""
msg = f"触发异常,状态码:{exception_type},信息:{exception_value}"
self.gateway.write_log(msg)
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb, request)
)
def query_history(self, req: HistoryRequest) -> List[BarData]:
""""""
buf = {}
end_time = None
for i in range(10):
path = f"/api/option/v3/instruments/{req.symbol}/candles"
# Create query params
params = {
"granularity": INTERVAL_VT2OKEXO[req.interval]
}
if end_time:
params["end"] = end_time
# Get response from server
resp = self.request(
"GET",
path,
params=params
)
# Break if request failed with other status code
if resp.status_code // 100 != 2:
msg = f"获取历史数据失败,状态码:{resp.status_code},信息:{resp.text}"
self.gateway.write_log(msg)
break
else:
data = resp.json()
if not data:
msg = f"获取历史数据为空"
break
for l in data:
ts, o, h, l, c, v, _ = l
dt = utc_to_local(ts)
bar = BarData(
symbol=req.symbol,
exchange=req.exchange,
datetime=dt,
interval=req.interval,
volume=float(v),
open_price=float(o),
high_price=float(h),
low_price=float(l),
close_price=float(c),
gateway_name=self.gateway_name
)
buf[bar.datetime] = bar
begin = data[-1][0]
end = data[0][0]
msg = f"获取历史数据成功,{req.symbol} - {req.interval.value},{begin} - {end}"
self.gateway.write_log(msg)
# Update start time
end_time = begin
index = list(buf.keys())
index.sort()
history = [buf[i] for i in index]
return history
class OkexoWebsocketApi(WebsocketClient):
""""""
def __init__(self, gateway):
""""""
super().__init__()
self.ping_interval: int = 20 # OKEX use 30 seconds for ping
self.gateway: OkexoGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.key: str = ""
self.secret: str = ""
self.passphrase: str = ""
self.trade_count: int = 10000
self.connect_time: int = 0
self.callbacks: Dict[str, callable] = {}
self.ticks: Dict[str, TickData] = {}
def connect(
self,
key: str,
secret: str,
passphrase: str,
proxy_host: str,
proxy_port: int
) -> None:
""""""
self.key = key
self.secret = secret.encode()
self.passphrase = <PASSWORD>
self.connect_time = int(datetime.now().strftime("%y%m%d%H%M%S"))
self.init(WEBSOCKET_HOST, proxy_host, proxy_port)
def unpack_data(self, data) -> json.JSONDecoder:
""""""
return json.loads(zlib.decompress(data, -zlib.MAX_WBITS))
def subscribe(self, req: SubscribeRequest) -> None:
"""
Subscribe to tick data upate.
"""
tick = TickData(
symbol=req.symbol,
exchange=req.exchange,
name=req.symbol,
datetime=datetime.now(),
gateway_name=self.gateway_name,
)
self.ticks[req.symbol] = tick
channel_ticker = f"option/ticker:{req.symbol}"
channel_depth = f"option/depth5:{req.symbol}"
self.callbacks[channel_ticker] = self.on_ticker
self.callbacks[channel_depth] = self.on_depth
req = {
"op": "subscribe",
"args": [channel_ticker, channel_depth]
}
self.send_packet(req)
def on_connected(self) -> None:
""""""
self.gateway.write_log("Websocket API连接成功")
self.login()
def on_disconnected(self) -> None:
""""""
self.gateway.write_log("Websocket API连接断开")
def on_packet(self, packet: dict) -> None:
""""""
if "event" in packet:
event = packet["event"]
if event == "subscribe":
return
elif event == "error":
msg = packet["message"]
self.gateway.write_log(f"Websocket API请求异常:{msg}")
elif event == "login":
self.on_login(packet)
else:
channel = packet["table"]
data = packet["data"]
callback = self.callbacks.get(channel, None)
if callback:
for d in data:
callback(d)
def on_error(self, exception_type: type, exception_value: Exception, tb) -> None:
""""""
msg = f"触发异常,状态码:{exception_type},信息:{exception_value}"
self.gateway.write_log(msg)
sys.stderr.write(self.exception_detail(exception_type, exception_value, tb))
def login(self) -> None:
"""
Need to login befores subscribe to websocket topic.
"""
timestamp = str(time.time())
msg = timestamp + "GET" + "/users/self/verify"
signature = generate_signature(msg, self.secret)
req = {
"op": "login",
"args": [
self.key,
self.passphrase,
timestamp,
signature.decode("utf-8")
]
}
self.send_packet(req)
self.callbacks["login"] = self.on_login
def subscribe_topic(self) -> None:
"""
Subscribe to all private topics.
"""
self.callbacks["option/ticker"] = self.on_ticker
self.callbacks["option/depth5"] = self.on_depth
self.callbacks["option/account"] = self.on_account
self.callbacks["option/order"] = self.on_order
self.callbacks["option/position"] = self.on_position
# Subscribe to order update
channels = []
for underlying in underlyings:
channel = f"option/order:{underlying}"
channels.append(channel)
req = {
"op": "subscribe",
"args": channels
}
self.send_packet(req)
# Subscribe to account update
channels = []
for underlying in underlyings:
channel = f"option/account:{underlying}"
channels.append(channel)
req = {
"op": "subscribe",
"args": channels
}
self.send_packet(req)
# Subscribe to position update
channels = []
for underlying in underlyings:
channel = f"option/position:{underlying}"
channels.append(channel)
req = {
"op": "subscribe",
"args": channels
}
self.send_packet(req)
def on_login(self, data: dict) -> None:
""""""
success = data.get("success", False)
if success:
self.gateway.write_log("Websocket API登录成功")
self.subscribe_topic()
else:
self.gateway.write_log("Websocket API登录失败")
def on_ticker(self, data: dict) -> None:
""""""
symbol = data["instrument_id"]
tick = self.ticks.get(symbol, None)
if not tick:
return
tick.last_price = float(data["last"])
tick.high_price = float(data["high_24h"])
tick.low_price = float(data["low_24h"])
tick.volume = float(data["volume_24h"])
tick.datetime = utc_to_local(data["timestamp"])
self.gateway.on_tick(copy(tick))
def on_depth(self, data: dict) -> None:
""""""
symbol = data["instrument_id"]
tick = self.ticks.get(symbol, None)
if not tick:
return
bids = data["bids"]
asks = data["asks"]
for n, buf in enumerate(bids):
price, volume, _, __ = buf
tick.__setattr__("bid_price_%s" % (n + 1), float(price))
tick.__setattr__("bid_volume_%s" % (n + 1), int(volume))
for n, buf in enumerate(asks):
price, volume, _, __ = buf
tick.__setattr__("ask_price_%s" % (n + 1), float(price))
tick.__setattr__("ask_volume_%s" % (n + 1), int(volume))
tick.datetime = utc_to_local(data["timestamp"])
self.gateway.on_tick(copy(tick))
def on_order(self, data: dict) -> None:
""""""
direction = SIDE_OKEXO2VT[data["side"]]
order = OrderData(
symbol=data["instrument_id"],
exchange=Exchange.OKEX,
type=ORDERTYPE_OKEXO2VT[data["order_type"]],
orderid=data["client_oid"],
direction=direction,
price=float(data["price"]),
volume=float(data["size"]),
traded=float(data["filled_qty"]),
time=utc_to_local(data["timestamp"]).strftime("%H:%M:%S"),
status=STATE_OKEXO2VT[data["state"]],
gateway_name=self.gateway_name,
)
self.gateway.on_order(copy(order))
trade_volume = data.get("last_fill_qty", 0)
if not trade_volume or float(trade_volume) == 0:
return
self.trade_count += 1
tradeid = f"{self.connect_time}{self.trade_count}"
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=tradeid,
direction=order.direction,
offset=order.offset,
price=float(data["last_fill_px"]),
volume=float(trade_volume),
time=order.time,
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
def on_account(self, data: dict) -> None:
""""""
account = AccountData(
accountid=data["underlying"],
balance=float(data["equity"]),
frozen=float(data.get("margin_for_unfilled", 0)),
gateway_name=self.gateway_name
)
self.gateway.on_account(account)
def on_position(self, data: dict) -> None:
""""""
pos = PositionData(
symbol=data["instrument_id"],
exchange=Exchange.OKEX,
direction=Direction.NET,
volume=int(data["position"]),
frozen=float(data["avail_position"]) - float(data["avail_position"]),
price=float(data["avg_cost"]),
pnl=float(data["realized_pnl"]),
gateway_name=self.gateway_name,
)
self.gateway.on_position(pos)
def generate_signature(msg: str, secret_key: str) -> bytes:
"""OKEX V3 signature"""
return base64.b64encode(hmac.new(secret_key, msg.encode(), hashlib.sha256).digest())
def get_timestamp() -> str:
""""""
now = datetime.utcnow()
timestamp = now.isoformat("T", "milliseconds")
return timestamp + "Z"
def utc_to_local(timestamp) -> datetime:
time = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
utc_time = time + timedelta(hours=8)
return utc_time
```
#### File: gateway/tora/error_codes.py
```python
error_codes = {
0: "没有错误",
-1: "TCP连接没建立",
-2: "交互通道无效",
-3: "用户未登录",
-4: "非本前置会话不能订阅私有流",
-5: "重复的私有流订阅请求",
-6: "打开私有流文件失败",
-7: "内部通信错误",
-8: "创建会话通道失败",
-9: "超出流控限制",
}
def get_error_msg(error_code: int):
try:
return error_codes[error_code]
except KeyError:
return "未知错误"
```
|
{
"source": "jerryhgoss/atmospy",
"score": 3
}
|
#### File: atmospy/tests/test_stats.py
```python
import unittest
import atmospy.stats as stats
import pandas as pd
import numpy as np
import os
datadir = "datafiles/"
class TestClass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_regression(self):
# load some data
test = pd.read_csv(
os.path.join(datadir,'regression_test.csv')
)
assert isinstance(test, pd.DataFrame)
# list, np array, series, df[col] test cases
#pd.DataFrame and column names case
self.test_generic(x='internal', y='reference', data=test)
#pd.Series case
self.test_generic(x=test['internal'], y=test['reference'])
#arrays case
self.test_generic(x=test['internal'].to_numpy(), y=test['reference'].to_numpy())
#list case
self.test_generic(x=test['internal'].to_list(), y=test['reference'].to_list())
def test_generic(self, **kwargs):
#load kwargs
x = kwargs.pop("x")
y = kwargs.pop("y")
data = kwargs.pop("data", None)
statstest = stats.stats(x=x, y=y, data=data)
#make sure all data is there in correct format
self.assertIsInstance(statstest, dict)
self.assertTrue("mae" in statstest.keys())
#still need to add
#self.assertTrue("cvmae" in statstest.keys())
self.assertTrue("mape" in statstest.keys())
self.assertTrue("mbe" in statstest.keys())
self.assertTrue("rmse" in statstest.keys())
self.assertTrue("mdae" in statstest.keys())
self.assertTrue("r2_score" in statstest.keys())
#make sure the values are correct
self.assertGreaterEqual(statstest["mae"], 3.0)
self.assertLessEqual(statstest["mae"], 3.1)
self.assertGreaterEqual(statstest["mape"], 0.31)
self.assertLessEqual(statstest["mape"], 0.33)
self.assertGreaterEqual(statstest["mbe"], 2.9)
self.assertLessEqual(statstest["mbe"], 3.1)
self.assertGreaterEqual(statstest["rmse"], 3.7)
self.assertLessEqual(statstest["rmse"], 3.8)
self.assertGreaterEqual(statstest["mdae"], 2.5)
self.assertLessEqual(statstest["mdae"], 2.6)
self.assertGreaterEqual(statstest["r2_score"], 0.35)
self.assertLessEqual(statstest["r2_score"], 0.37)
def test_epa(self):
#load some data
test = pd.read_csv(
os.path.join(datadir,'epa_test.csv')
)
assert isinstance(test, pd.DataFrame)
return
```
|
{
"source": "jerryhluo/OpenBioLink",
"score": 3
}
|
#### File: src/openbiolink/edge.py
```python
from openbiolink.edgeType import EdgeType
from openbiolink.node import Node
class Edge:
def __init__(self, node1: Node, type: EdgeType, node2: Node, source: "", qscore=None, sourcedb=None):
self.node1 = node1
self.type = type
self.node2 = node2
self.source = source
self.qscore = qscore
self.sourcedb = sourcedb
def __eq__(self, other):
if isinstance(other, Edge):
return (
self.type == other.type and self.node1.id == other.node1.id and self.node2.id == other.node2.id
) # todo only if directional
return False
def __hash__(self):
return hash((self.node1.id, self.type, self.node2.id))
def __iter__(self):
return iter([self.node1.id, self.type, self.node2.id, self.qscore])
def to_list(self, include_qscore):
if include_qscore:
return iter([self.node1.resolved_id, self.type,
self.node2.resolved_id, self.qscore, self.sourcedb])
else:
return iter([self.node1.resolved_id, self.type,
self.node2.resolved_id, "", self.sourcedb])
def to_sub_rel_obj_list(self):
return iter([self.node1.id, self.type, self.node2.id, self.sourcedb])
```
#### File: file_processor/onto_mapping/ontoMapDoAltidProcessor.py
```python
from openbiolink.graph_creation.file_processor.fileProcessor import FileProcessor
from openbiolink.graph_creation.metadata_infile.mapping.inMetaMapOntoDoAltid import InMetaMapOntoDoAltid
from openbiolink.graph_creation.types.infileType import InfileType
from openbiolink.graph_creation.types.readerType import ReaderType
class OntoMapDoAltidProcessor(FileProcessor):
IN_META_CLASS = InMetaMapOntoDoAltid
def __init__(self):
self.use_cols = self.IN_META_CLASS.USE_COLS
super().__init__(
self.use_cols,
readerType=ReaderType.READER_ONTO_DO,
infileType=InfileType.IN_MAP_ONTO_DO_ALT_ID,
mapping_sep=self.IN_META_CLASS.MAPPING_SEP,
)
```
#### File: file_reader/edge/edgeHpoDisReader.py
```python
import os
from openbiolink.graph_creation import graphCreationConfig as g
from openbiolink.graph_creation.file_reader.csvReader import CsvReader
from openbiolink.graph_creation.metadata_db_file.edge.dbMetaEdgeHpoDis import DbMetaEdgeHpoDis
from openbiolink.graph_creation.types.dbType import DbType
from openbiolink.graph_creation.types.readerType import ReaderType
class EdgeHpoDisReader(CsvReader):
DB_META_CLASS = DbMetaEdgeHpoDis
def __init__(self):
super().__init__(
in_path=os.path.join(g.O_FILE_PATH, self.DB_META_CLASS.OFILE_NAME),
sep=None,
cols=self.DB_META_CLASS.COLS,
use_cols=self.DB_META_CLASS.FILTER_COLS,
nr_lines_header=self.DB_META_CLASS.HEADER,
dtypes={col_name: "str" for col_name in self.DB_META_CLASS.FILTER_COLS},
readerType=ReaderType.READER_EDGE_HPO_DIS,
dbType=DbType.DB_EDGE_HPO_DIS,
)
```
#### File: file_reader/onto/ontoDoReader.py
```python
import os
from openbiolink.graph_creation import graphCreationConfig as g
from openbiolink.graph_creation.file_reader.oboReader import OboReader
from openbiolink.graph_creation.metadata_db_file.onto.dbMetaOntoDo import DbMetaOntoDo
from openbiolink.graph_creation.types.dbType import DbType
from openbiolink.graph_creation.types.readerType import ReaderType
class OntoDoReader(OboReader):
DB_META_CLASS = DbMetaOntoDo
def __init__(self):
super().__init__(
in_path=os.path.join(g.O_FILE_PATH, self.DB_META_CLASS.OFILE_NAME),
quadruple_list=self.DB_META_CLASS.QUADRUPLES,
readerType=ReaderType.READER_ONTO_DO,
dbType=DbType.DB_ONTO_DO,
)
```
#### File: openbiolink/graph_creation/graphCreation.py
```python
import inspect
import os
from tqdm import tqdm
import openbiolink.graphProperties as graphProp
from openbiolink import globalConfig, globalConfig as globConst, utils
from openbiolink.cli import Cli
from openbiolink.graph_creation import graphCreationConfig as gcConst
from openbiolink.graph_creation.file_downloader.fileDownloader import *
from openbiolink.graph_creation.file_processor.fileProcessor import *
from openbiolink.graph_creation.file_reader.fileReader import *
from openbiolink.graph_creation.file_writer.fileWriter import *
from openbiolink.graph_creation.graphCreator import GraphCreator
from openbiolink.graph_creation.graph_writer import *
from openbiolink.graph_creation.metadata_db_file import *
from openbiolink.graph_creation.metadata_edge.edgeOntoMetadata import EdgeOntoMetadata
from openbiolink.graph_creation.metadata_edge.edgeRegularMetadata import EdgeRegularMetadata
from openbiolink.graph_creation.metadata_edge.tnEdgeRegularMetadata import TnEdgeRegularMetadata
from openbiolink.graph_creation.metadata_infile import *
from openbiolink.gui.tqdmbuf import TqdmBuffer
class Graph_Creation:
def __init__(self, folder_path, use_db_metadata_classes=None, use_edge_metadata_classes=None):
gcConst.O_FILE_PATH = os.path.join(folder_path, gcConst.O_FILE_FOLDER_NAME)
gcConst.IN_FILE_PATH = os.path.join(folder_path, gcConst.IN_FILE_FOLDER_NAME)
self.db_file_metadata = [x() for x in utils.get_leaf_subclasses(DbMetadata)]
self.file_readers = [x() for x in utils.get_leaf_subclasses(FileReader)]
self.file_processors = [x() for x in utils.get_leaf_subclasses(FileProcessor)]
self.infile_metadata = [x() for x in utils.get_leaf_subclasses(InfileMetadata)]
self.edge_metadata = [x(graphProp.QUALITY) for x in utils.get_leaf_subclasses(EdgeRegularMetadata)] + [
x(graphProp.QUALITY) for x in utils.get_leaf_subclasses(EdgeOntoMetadata)
]
self.tn_edge_metadata = [x(graphProp.QUALITY) for x in utils.get_leaf_subclasses(TnEdgeRegularMetadata)]
self.dbType_reader_map = utils.cls_list_to_dic(self.file_readers, "dbType")
self.readerType_processor_map = utils.cls_list_to_dic(self.file_processors, "readerType")
self.infileType_inMetadata_map = {x.infileType: x for x in self.infile_metadata}
# if not glob.DIRECTED:
## remove onto
# if use_edge_metadata_classes is None:
# use_edge_metadata_classes = [x(glob.QUALITY) for x in utils.get_leaf_subclasses(EdgeRegularMetadata)]
# else:
# temp_use_edge_metadata_classes =[]
# for edge_class in use_edge_metadata_classes:
# if inspect.isclass(edge_class):
# if not issubclass(edge_class, EdgeOntoMetadata):
# temp_use_edge_metadata_classes.append(edge_class())
# else:
# if not issubclass(type(edge_class), EdgeOntoMetadata):
# temp_use_edge_metadata_classes.append(edge_class)
# use_edge_metadata_classes = temp_use_edge_metadata_classes
# #use_edge_metadata_classes = [x for x in use_edge_metadata_classes if not issubclass(type(x), EdgeOntoMetadata)]
# use only the desired sources
if use_db_metadata_classes is not None:
self.init_custom_sources_bottom_up(use_db_metadata_classes)
if use_edge_metadata_classes is not None:
self.init_custom_sources_top_down(use_edge_metadata_classes)
graphProp.EDGE_TYPES = [str(x.__class__.__name__) for x in self.edge_metadata]
# testme
# ----------- download ----------
def download_db_files(
self, skip_existing: bool = True,
):
logging.info("## Start downloading files ##")
directory = gcConst.O_FILE_PATH
os.makedirs(directory, exist_ok=True)
#tqdmbuffer = TqdmBuffer() if globConst.GUI_MODE else None
#it = tqdm(self.db_file_metadata, file=tqdmbuffer, desc="Downloading files")
for index, db_file in enumerate(self.db_file_metadata):
path = os.path.join(directory, db_file.ofile_name)
if skip_existing and os.path.exists(path):
logging.info(f"Skipping: {db_file.NAME}")
continue
logging.info(f"Downloading {index + 1}/{len(self.db_file_metadata)}: {db_file.NAME}")
FileDownloader.download(db_file.url, path)
# ----------- create input files ----------
def create_input_files(self):
logging.info("## Start creating input files ##")
skip = None
for_all = False
if not globalConfig.INTERACTIVE_MODE:
skip = globalConfig.SKIP_EXISTING_FILES
for_all = True
if not os.path.exists(gcConst.IN_FILE_PATH):
os.makedirs(gcConst.IN_FILE_PATH)
tqdmbuffer = TqdmBuffer() if globConst.GUI_MODE else None
it = tqdm(self.file_readers, file=tqdmbuffer)
for reader in it:
if reader.readerType not in self.readerType_processor_map:
logging.info(f"There is no processor for the reader {reader.readerType}")
continue
logging.info(f"Reading: {reader.__class__.__name__}")
# check beforehand if read in content is processed as parsing can be time consuming
all_files_exist = all(
os.path.isfile(
os.path.join(gcConst.IN_FILE_PATH, self.infileType_inMetadata_map[processor.infileType].csv_name)
)
for processor in self.readerType_processor_map[reader.readerType]
)
if all_files_exist and not for_all and self.readerType_processor_map[reader.readerType]:
first_processor = self.readerType_processor_map[reader.readerType][0]
first_processor_out_path = os.path.join(
gcConst.IN_FILE_PATH, (self.infileType_inMetadata_map[first_processor.infileType]).csv_name
)
if globConst.GUI_MODE:
from openbiolink.gui.gui import skipExistingFiles
skip, for_all = skipExistingFiles(first_processor_out_path)
else:
skip, for_all = Cli.skip_existing_files(first_processor_out_path)
if not skip or not all_files_exist:
# execute processors
in_data = reader.read_file()
# fixme ResourceWarning: Enable tracemalloc to get the object allocation traceback
for processor in self.readerType_processor_map[reader.readerType]:
out_file_path = os.path.join(
gcConst.IN_FILE_PATH, (self.infileType_inMetadata_map[processor.infileType]).csv_name
)
if not for_all:
if globConst.GUI_MODE:
from openbiolink.gui.gui import skipExistingFiles
skip, for_all = skipExistingFiles(out_file_path)
else:
skip, for_all = Cli.skip_existing_files(out_file_path)
if not (skip and os.path.isfile(out_file_path)):
logging.info(f"Processing: {processor.__class__.__name__}")
out_data = processor.process(in_data)
FileWriter.write_to_file(out_data, out_file_path)
# ----------- create graph ----------
def create_graph(self, format=None, file_sep=None, multi_file=None, print_qscore=True):
logging.info("## Start creating graph ##")
graph_creator = GraphCreator()
# create/output positive graph
tp_nodes, tp_edges, tp_namespaces = graph_creator.meta_edges_to_graph(
edge_metadata_list=self.edge_metadata, tn=False,
)
tn_nodes, tn_edges, tn_namespaces = graph_creator.meta_edges_to_graph(
edge_metadata_list=self.tn_edge_metadata, tn=True,
)
logging.info("## Start writing graph ##")
if format is None:
format = "tsv"
format = format.upper()
if format == "TSV":
graph_writer = GraphTSVWriter(file_sep=file_sep, multi_file=multi_file, print_qscore=print_qscore)
elif format == "RDF-N3":
graph_writer = GraphRDFWriter(file_sep=file_sep, multi_file=multi_file, print_qscore=print_qscore)
elif format == "PICKLE":
graph_writer = GraphPickleWriter()
elif format == "BEL":
graph_writer = GraphBELWriter()
else:
raise ValueError(f"Invalid format: {format}")
graph_writer.write(
tp_nodes=tp_nodes,
tp_edges=tp_edges,
tp_namespaces=tp_namespaces,
tn_nodes=tn_nodes,
tn_edges=tn_edges,
tn_namespaces=tn_namespaces,
)
# ----------- helper init functions ----------
def init_custom_sources_bottom_up(self, use_db_metdata_classes):
"""helper __init__ function for custom db_metadata_classes"""
self.db_file_metadata = []
# remove dbMetadata from list
# make sure to use instances of classes
for x in use_db_metdata_classes:
if inspect.isclass(x):
self.db_file_metadata.append(x())
else:
self.db_file_metadata.append(x)
# remove readers
keep_dbType = [x.dbType for x in self.db_file_metadata]
logging.info(
"readers removed: " + str([x.__class__.__name__ for x in self.file_readers if x.dbType not in keep_dbType])
)
self.file_readers = [x for x in self.file_readers if x.dbType in keep_dbType]
self.dbType_reader_map = utils.cls_list_to_dic(self.file_readers, "dbType")
# remove processors
keep_readerType = [x.readerType for x in self.file_readers]
logging.info(
"processors removed: %s"
% (str([x.__class__.__name__ for x in self.file_processors if x.readerType not in keep_readerType]))
)
self.file_processors = [x for x in self.file_processors if x.readerType in keep_readerType]
self.readerType_processor_map = utils.cls_list_to_dic(self.file_processors, "readerType")
# remove infile metadata
keep_infileType = [x.infileType for x in self.file_processors]
logging.info(
"processors removed: "
+ str([x.__class__.__name__ for x in self.infile_metadata if x.infileType not in keep_infileType])
)
self.infile_metadata = [x for x in self.infile_metadata if x.infileType in keep_infileType]
self.infileType_inMetadata_map = {x.infileType: x for x in self.infile_metadata}
# remove edge metadata
logging.info(
"edges removed: "
+ str(
[
x.__class__.__name__
for x in self.edge_metadata + self.tn_edge_metadata
if x.EDGE_INMETA_CLASS.INFILE_TYPE not in keep_infileType
]
)
)
self.edge_metadata = [x for x in self.edge_metadata if x.EDGE_INMETA_CLASS.INFILE_TYPE in keep_infileType]
self.tn_edge_metadata = [x for x in self.tn_edge_metadata if x.EDGE_INMETA_CLASS.INFILE_TYPE in keep_infileType]
# check for deleted dependencies of mappings
additional_remove_metaEdges = []
additional_remove_mapping_infileType = []
for metaEdge in self.edge_metadata + self.tn_edge_metadata:
mappings = [
metaEdge.MAP1_META_CLASS,
metaEdge.MAP2_META_CLASS,
metaEdge.MAP1_ALT_ID_META_CLASS,
metaEdge.MAP2_ALT_ID_META_CLASS,
]
for mapping in mappings:
if mapping is not None and mapping.INFILE_TYPE not in keep_infileType:
additional_remove_metaEdges.append(metaEdge)
additional_remove_mapping_infileType.append(mapping.INFILE_TYPE)
if len(additional_remove_metaEdges) > 0:
message = (
"\nDue to manual exclusion of DB resources, also the edges: %s\n "
"will be removed due to deleted dependencies of used mappings (i.e. %s\n "
"Consider manually exclude edges instead of DB resources."
% (
str([x.__class__.__name__ for x in additional_remove_metaEdges]),
str([str(x) for x in additional_remove_mapping_infileType]),
)
)
logging.warning(message)
if globConst.GUI_MODE:
from openbiolink.gui import gui
gui.askForExit(message)
elif globConst.INTERACTIVE_MODE:
Cli.ask_for_exit(message)
else:
sys.exit()
self.edge_metadata = [x for x in self.edge_metadata if x not in additional_remove_metaEdges]
self.tn_edge_metadata = [x for x in self.tn_edge_metadata if x not in additional_remove_metaEdges]
def init_custom_sources_top_down(self, use_edge_metdata_classes):
"""hepler __init__ function for custom edge_metadata_classes"""
# remove edge_metadata
logging.info(
"Edge Metadata removed: "
+ str(
[
x.__class__.__name__
for x in self.edge_metadata
if x.EDGE_INMETA_CLASS not in [y.EDGE_INMETA_CLASS for y in use_edge_metdata_classes]
]
)
)
self.edge_metadata = []
for x in use_edge_metdata_classes:
if inspect.isclass(x):
self.edge_metadata.append(x())
else:
self.edge_metadata.append(x)
# remove inMetadata
infileType_edgeMetadata_map = utils.cls_list_to_dic(self.edge_metadata, "EDGE_INMETA_CLASS.INFILE_TYPE")
infileType_edgeMetadata_map.update(
utils.cls_list_to_dic(
self.edge_metadata, "MAP1_META_CLASS.INFILE_TYPE", lambda a: a.MAP1_META_CLASS is not None
)
)
infileType_edgeMetadata_map.update(
utils.cls_list_to_dic(
self.edge_metadata, "MAP2_META_CLASS.INFILE_TYPE", lambda a: a.MAP2_META_CLASS is not None
)
)
infileType_edgeMetadata_map.update(
utils.cls_list_to_dic(
self.edge_metadata, "MAP1_ALT_ID_META_CLASS.INFILE_TYPE", lambda a: a.MAP1_ALT_ID_META_CLASS is not None
)
)
infileType_edgeMetadata_map.update(
utils.cls_list_to_dic(
self.edge_metadata, "MAP2_ALT_ID_META_CLASS.INFILE_TYPE", lambda a: a.MAP2_ALT_ID_META_CLASS is not None
)
)
infileType_edgeMetadata_map.update(
utils.cls_list_to_dic(self.tn_edge_metadata, "EDGE_INMETA_CLASS.INFILE_TYPE")
)
infileType_edgeMetadata_map.update(
utils.cls_list_to_dic(
self.tn_edge_metadata, "MAP1_META_CLASS.INFILE_TYPE", lambda a: a.MAP1_META_CLASS is not None
)
)
infileType_edgeMetadata_map.update(
utils.cls_list_to_dic(
self.tn_edge_metadata, "MAP2_META_CLASS.INFILE_TYPE", lambda a: a.MAP2_META_CLASS is not None
)
)
infileType_edgeMetadata_map.update(
utils.cls_list_to_dic(
self.tn_edge_metadata,
"MAP1_ALT_ID_META_CLASS.INFILE_TYPE",
lambda a: a.MAP1_ALT_ID_META_CLASS is not None,
)
)
infileType_edgeMetadata_map.update(
utils.cls_list_to_dic(
self.tn_edge_metadata,
"MAP2_ALT_ID_META_CLASS.INFILE_TYPE",
lambda a: a.MAP2_ALT_ID_META_CLASS is not None,
)
)
keep_infileTypes = list(infileType_edgeMetadata_map.keys())
logging.info(
"Infile Metadata removed: "
+ str([x.__class__.__name__ for x in self.infile_metadata if x.infileType not in keep_infileTypes])
)
self.infile_metadata = [x for x in self.infile_metadata if x.infileType in keep_infileTypes]
self.infileType_inMetadata_map = {x.infileType: x for x in self.infile_metadata}
# remove processors
logging.info(
"Processors removed: "
+ str([x.__class__.__name__ for x in self.file_processors if x.infileType not in keep_infileTypes])
)
self.file_processors = [x for x in self.file_processors if x.infileType in keep_infileTypes]
self.readerType_processor_map = utils.cls_list_to_dic(self.file_processors, "readerType")
# remove readers
keep_readerType = list(self.readerType_processor_map.keys())
logging.info(
"Readers removed: "
+ str([x.__class__.__name__ for x in self.file_readers if x.readerType not in keep_readerType])
)
self.file_readers = [x for x in self.file_readers if x.readerType in keep_readerType]
self.dbType_reader_map = utils.cls_list_to_dic(self.file_readers, "dbType")
# remove db_metadata
keep_dbType = list(self.dbType_reader_map.keys())
logging.info(
"DB_source removed: "
+ str([x.__class__.__name__ for x in self.db_file_metadata if x.dbType not in keep_dbType])
)
self.db_file_metadata = [x for x in self.db_file_metadata if x.dbType in keep_dbType]
```
#### File: metadata_db_file/edge/dbMetaEdgeDisGeNet.py
```python
from openbiolink.graph_creation.metadata_db_file.edge.dbMetadataEdge import DbMetadataEdge
from openbiolink.graph_creation.types.dbType import DbType
class DbMetaEdgeDisGeNet(DbMetadataEdge):
NAME = "Edge - DisGeNet - Gene Disease"
# URL = "http://www.disgenet.org/ds/DisGeNET/results/curated_gene_disease_associations.tsv.gz"
URL = "http://www.disgenet.org/static/disgenet_ap1/files/downloads/curated_gene_disease_associations.tsv.gz"
OFILE_NAME = "DisGeNet_gene_disease.tsv.gz"
COLS = [
"geneID",
"geneSym",
"DSI",
"DPI",
"umlsID",
"disName",
"diseaseType",
"diseaseClass",
"diseaseSemanticType",
"score",
"EI",
"YearInitial",
"YearFinal",
"NofPmids",
"NofSnps",
"source",
]
FILTER_COLS = ["geneID", "umlsID", "score"]
HEADER = 1
DB_TYPE = DbType.DB_EDGE_DISGENET
def __init__(self):
super().__init__(
url=DbMetaEdgeDisGeNet.URL, ofile_name=DbMetaEdgeDisGeNet.OFILE_NAME, dbType=DbMetaEdgeDisGeNet.DB_TYPE
)
```
#### File: metadata_db_file/edge/dbMetaEdgeHpa.py
```python
from openbiolink.graph_creation.metadata_db_file.edge.dbMetadataEdge import DbMetadataEdge
from openbiolink.graph_creation.types.dbType import DbType
class DbMetaEdgeHpa(DbMetadataEdge):
NAME = "Edge - HPA - Expression Data"
URL = "https://www.proteinatlas.org/download/rna_tissue.tsv.zip"
OFILE_NAME = "HPA_gene_anatomy.tsv.zip"
COLS = ["geneID", "geneName", "anatomy", "expressionValue", "Unit"]
FILTER_COLS = ["geneID", "anatomy", "expressionValue"]
HEADER = 1
DB_TYPE = DbType.DB_EDGE_HPA
def __init__(self):
super().__init__(url=DbMetaEdgeHpa.URL, ofile_name=DbMetaEdgeHpa.OFILE_NAME, dbType=DbMetaEdgeHpa.DB_TYPE)
```
#### File: metadata_db_file/edge/dbMetaEdgeSiderSe.py
```python
from openbiolink.graph_creation.metadata_db_file.edge.dbMetadataEdge import DbMetadataEdge
from openbiolink.graph_creation.types.dbType import DbType
class DbMetaEdgeSiderSe(DbMetadataEdge):
NAME = "Edge - Sider - Side Effects"
URL = "http://sideeffects.embl.de/media/download/meddra_all_se.tsv.gz"
OFILE_NAME = "SIDER_se.tsv.gz"
COLS = ["stitchID_flat", "stitchID_stereo", "umlsID", "medDRAumlsType", "medDRAumlsID", "SEname"]
FILTER_COLS = ["stitchID_stereo", "umlsID"]
HEADER = 0
DB_TYPE = DbType.DB_EDGE_SIDER_SE
def __init__(self):
super().__init__(
url=DbMetaEdgeSiderSe.URL, ofile_name=DbMetaEdgeSiderSe.OFILE_NAME, dbType=DbMetaEdgeSiderSe.DB_TYPE
)
```
#### File: metadata_db_file/edge/dbMetaEdgeStringAction.py
```python
from openbiolink.graph_creation.metadata_db_file.edge.dbMetadataEdge import DbMetadataEdge
from openbiolink.graph_creation.types.dbType import DbType
class DbMetaEdgeStringAction(DbMetadataEdge):
NAME = "Edge - STRING - Gene Gene (Action)"
# current link can be extracted from https://stringdb-static.org/cgi/download.pl?sessionId=FuXJ9a0fkSMB&species_text=Homo+sapiens
URL = "https://stringdb-static.org/download/protein.actions.v11.0/9606.protein.actions.v11.0.txt.gz"
OFILE_NAME = "STRING_gene_gene_actions.tsv.gz" # tab separated txt file
COLS = ["item_id_a", "item_id_b", "mode", "action", "is_directional", "a_is_acting", "score"]
FILTER_COLS = ["item_id_a", "item_id_b", "mode", "action", "is_directional", "a_is_acting", "score"]
HEADER = 1
DB_TYPE = DbType.DB_EDGE_STRING_ACTION
def __init__(self):
super().__init__(url=self.URL, ofile_name=self.OFILE_NAME, dbType=self.DB_TYPE)
```
#### File: graph_creation/metadata_edge/edgeMetadata.py
```python
class EdgeMetadata:
MAP1_META_CLASS = None
MAP2_META_CLASS = None
MAP1_ALT_ID_META_CLASS = None
MAP2_ALT_ID_META_CLASS = None
def __init__(
self,
is_directional,
edges_file_path,
source,
colindex1,
colindex2,
edgeType,
node1_type,
node1_namespace,
node2_type,
node2_namespace,
colindex_qscore=None,
cutoff_num=None,
cutoff_txt=None,
mapping1_file=None,
mapping1_targetnamespace=None,
map1_sourceindex=None,
map1_targetindex=None,
altid_mapping1_file=None,
altid_mapping1_targetnamespace=None,
altid_map1_sourceindex=None,
altid_map1_targetindex=None,
mapping2_file=None,
mapping2_targetnamespace=None,
map2_sourceindex=None,
map2_targetindex=None,
altid_mapping2_file=None,
altid_mapping2_targetnamespace=None,
altid_map2_sourceindex=None,
altid_map2_targetindex=None,
):
self.is_directional = is_directional
self.edges_file_path = edges_file_path
self.source = source
self.colindex1 = colindex1
self.colindex2 = colindex2
self.edgeType = edgeType
self.node1_type = node1_type
self.node1_namespace = node1_namespace
self.node2_type = node2_type
self.node2_namespace = node2_namespace
self.colindex_qscore = colindex_qscore
self.cutoff_num = cutoff_num
self.cutoff_txt = cutoff_txt
self.mapping1_file = mapping1_file
self.mapping1_targetnamespace = mapping1_targetnamespace
self.map1_sourceindex = map1_sourceindex
self.map1_targetindex = map1_targetindex
self.altid_mapping1_file = altid_mapping1_file
self.altid_mapping1_targetnamespace = altid_mapping1_targetnamespace
self.altid_map1_sourceindex = altid_map1_sourceindex
self.altid_map1_targetindex = altid_map1_targetindex
self.mapping2_file = mapping2_file
self.mapping2_targetnamespace = mapping2_targetnamespace
self.map2_sourceindex = map2_sourceindex
self.map2_targetindex = map2_targetindex
self.altid_mapping2_file = altid_mapping2_file
self.altid_mapping2_targetnamespace = altid_mapping2_targetnamespace
self.altid_map2_sourceindex = altid_map2_sourceindex
self.altid_map2_targetindex = altid_map2_targetindex
```
#### File: graph_creation/metadata_edge/edgeOntoMetadata.py
```python
from openbiolink.graph_creation.metadata_edge.edgeMetadata import EdgeMetadata
class EdgeOntoMetadata(EdgeMetadata):
def __init__(
self,
is_directional,
edges_file_path,
source,
colindex1,
colindex2,
edgeType,
node1_type,
node1_namespace,
node2_type,
node2_namespace,
):
super().__init__(
is_directional=is_directional,
edges_file_path=edges_file_path,
source=source,
colindex1=colindex1,
colindex2=colindex2,
edgeType=edgeType,
node1_type=node1_type,
node1_namespace=node1_namespace,
node2_type=node2_type,
node2_namespace=node2_namespace,
)
```
#### File: metadata_infile/edge/inMetaEdgeSiderSe.py
```python
from openbiolink.edgeType import EdgeType
from openbiolink.graph_creation.metadata_infile.infileMetadata import InfileMetadata
from openbiolink.graph_creation.types.infileType import InfileType
from openbiolink.namespace import *
from openbiolink.nodeType import NodeType
class InMetaEdgeSiderSe(InfileMetadata):
CSV_NAME = "DB_SIDER_se.csv"
USE_COLS = ["stitchID_stereo", "umlsID"]
NODE1_COL = 0
NODE2_COL = 1
QSCORE_COL = None
SOURCE = "SIDER"
NODE1_TYPE = NodeType.DRUG
NODE1_NAMESPACE = Namespace(Namespaces.PUBCHEM, False)
NODE2_TYPE = NodeType.PHENOTYPE
NODE2_NAMESPACE = Namespace(Namespaces.UMLS, False)
EDGE_TYPE = EdgeType.DRUG_PHENOTYPE
INFILE_TYPE = InfileType.IN_EDGE_SIDER_SE
MAPPING_SEP = None
def __init__(self):
super().__init__(
csv_name=InMetaEdgeSiderSe.CSV_NAME, cols=self.USE_COLS, infileType=InMetaEdgeSiderSe.INFILE_TYPE
)
```
#### File: metadata_infile/mapping/inMetaMapOntoDoAltid.py
```python
from openbiolink.graph_creation.metadata_infile.infileMetadata import InfileMetadata
from openbiolink.graph_creation.types.infileType import InfileType
from openbiolink.namespace import *
class InMetaMapOntoDoAltid(InfileMetadata):
CSV_NAME = "DB_ONTO_mapping_DO_alt_id.csv"
USE_COLS = ["ID", "ALT_ID"]
SOURCE_COL = 1
TARGET_COL = 0
TARGET_NAMESPACE = Namespace(Namespaces.DIS)
MAPPING_SEP = ";" # ';' sep is created while parsing
INFILE_TYPE = InfileType.IN_MAP_ONTO_DO_ALT_ID
def __init__(self):
super().__init__(
csv_name=InMetaMapOntoDoAltid.CSV_NAME, cols=self.USE_COLS, infileType=InMetaMapOntoDoAltid.INFILE_TYPE
)
```
#### File: openbiolink/train_test_set_creation/sampler.py
```python
import numpy
import pandas
from tqdm import tqdm
import logging
import openbiolink.train_test_set_creation.ttsConfig as ttsConst
from openbiolink import globalConfig as globConst, utils
from openbiolink.gui.tqdmbuf import TqdmBuffer
class Sampler:
def __init__(self, meta_edges_dic, nodes):
self.meta_edges_dic = meta_edges_dic
self.nodes = nodes
def generate_n_random_samples(self, n, node_type_1, edge_type, node_type_2, exclude_df):
exclude_df = exclude_df[globConst.COL_NAMES_EDGES]
samples = pandas.DataFrame(columns=globConst.COL_NAMES_EDGES)
nodes_nodeType1 = self.nodes.loc[self.nodes[globConst.NODE_TYPE_COL_NAME] == node_type_1]
num_nodes1, _ = nodes_nodeType1.shape
nodes_nodeType2 = self.nodes.loc[self.nodes[globConst.NODE_TYPE_COL_NAME] == node_type_2]
num_nodes2, _ = nodes_nodeType2.shape
i = 0
while len(samples) < n:
if i > 100:
break
if num_nodes1 == 0:
logging.warning("Number of nodes with type 1 was 0")
break
if num_nodes2 == 0:
logging.warning("Number of nodes with type 2 was 0")
break
num_examples = n - len(samples)
node1_list = nodes_nodeType1.sample(n=num_examples, random_state=(globConst.RANDOM_STATE + i), replace=True)
node1_list = node1_list.id.tolist()
node2_list = nodes_nodeType2.sample(
n=num_examples, random_state=(globConst.RANDOM_STATE + (i + 100)), replace=True
)
node2_list = node2_list.id.tolist()
sample_candidates = pandas.DataFrame(
data={
globConst.NODE1_ID_COL_NAME: node1_list,
globConst.EDGE_TYPE_COL_NAME: [edge_type] * num_examples,
globConst.NODE2_ID_COL_NAME: node2_list,
}
)
_, sub_samples = utils.get_diff(exclude_df[globConst.COL_NAMES_TRIPLES], sample_candidates)
sub_samples.drop_duplicates(inplace=True)
sub_samples[globConst.SOURCE_COL_NAME] = ["GENERATED"] * len(sub_samples)
sub_samples[globConst.QSCORE_COL_NAME] = [None] * len(sub_samples)
samples = samples.append(sub_samples, ignore_index=True)
exclude_df = exclude_df.append(pandas.DataFrame(sub_samples))
i += 1
# todo auswirkungen if num neg examples != num pos examples
return samples
class NegativeSampler(Sampler):
def __init__(self, meta_edges_dic, tn_edgeTypes, all_tn, nodes, identifier2type):
super().__init__(meta_edges_dic, nodes)
self.meta_edges_dic = meta_edges_dic
self.tn_edgeTypes = tn_edgeTypes
self.all_tn = all_tn
self.identifier2type = identifier2type
self.all_tn = self.add_edge_type_key_column(all_tn)
def add_edge_type_key_column(self, df):
df[ttsConst.EDGE_TYPE_KEY_NAME] = (
df[globConst.NODE1_ID_COL_NAME].str.split(":").map(lambda x: self.identifier2type[x[0]])
+ "_"
+ df[globConst.EDGE_TYPE_COL_NAME]
+ "_"
+ df[globConst.NODE2_ID_COL_NAME].str.split(":").map(lambda x: self.identifier2type[x[0]])
)
return df
def generate_random_neg_samples(self, pos_samples, distrib="orig"):
col_names = globConst.COL_NAMES_EDGES
pos_samples = pos_samples[col_names]
neg_samples = pandas.DataFrame(columns=col_names)
pos_samples = self.add_edge_type_key_column(pos_samples)
# generate distribution of meta_edge types for negative samples
meta_edges = list(self.meta_edges_dic.keys())
meta_edges.sort()
neg_samples_count_meta_edges = {}
if distrib == "uni":
num_tp_examples, _ = pos_samples.shape
neg_samples_metaEdges = list(numpy.random.choice(meta_edges, num_tp_examples))
neg_samples_metaEdges.sort()
neg_samples_count_meta_edges = {
e: neg_samples_metaEdges.count(e)
for e in set(neg_samples_metaEdges)
if neg_samples_metaEdges.count(e) > 0
}
elif distrib == "orig":
for key in self.meta_edges_dic.keys():
num_entry = len(pos_samples.loc[(pos_samples[ttsConst.EDGE_TYPE_KEY_NAME] == key)])
if num_entry > 0:
neg_samples_count_meta_edges[key] = num_entry
# generate a negative sub-sample for each negative meta_edge type
tqdmbuffer = TqdmBuffer() if globConst.GUI_MODE else None
for meta_edge_triple_key, count in tqdm(sorted(neg_samples_count_meta_edges.items()), file=tqdmbuffer):
node_type_1, edge_type, node_type_2 = self.meta_edges_dic[meta_edge_triple_key]
pos_samples_of_meta_edge = pos_samples.loc[
(pos_samples[ttsConst.EDGE_TYPE_KEY_NAME] == meta_edge_triple_key)
]
if (
edge_type in self.tn_edgeTypes
): # only onto edgesTypes can appear multiple times, there should be no onto tn
neg_samples = neg_samples.append(
self.subsample_with_tn(
meta_edge_triple_key=meta_edge_triple_key,
subsample_size=count,
exclude_df=pos_samples_of_meta_edge[col_names],
),
ignore_index=True,
)
else:
neg_samples = neg_samples.append(
self.generate_n_random_samples(
n=count,
node_type_1=node_type_1,
edge_type=edge_type,
node_type_2=node_type_2,
exclude_df=pos_samples_of_meta_edge[col_names],
),
ignore_index=True,
)
neg_samples[globConst.VALUE_COL_NAME] = 0
return neg_samples[col_names + [globConst.VALUE_COL_NAME]]
def subsample_with_tn(self, meta_edge_triple_key, subsample_size, exclude_df, col_names=globConst.COL_NAMES_EDGES):
node_type_1, edge_type, node_type_2 = self.meta_edges_dic[meta_edge_triple_key]
tn_examples = self.all_tn.loc[self.all_tn[ttsConst.EDGE_TYPE_KEY_NAME] == meta_edge_triple_key] # testme
count_existing_tn, _ = tn_examples.shape
if subsample_size <= count_existing_tn:
neg_samples = tn_examples.sample(n=subsample_size, random_state=globConst.RANDOM_STATE)
else:
exclude_df = exclude_df.append(tn_examples)
neg_samples = tn_examples
neg_samples = neg_samples.append(
self.generate_n_random_samples(
n=(subsample_size - count_existing_tn),
node_type_1=node_type_1,
edge_type=edge_type,
node_type_2=node_type_2,
exclude_df=exclude_df,
)
)
neg_samples.reset_index(inplace=True)
return neg_samples[col_names]
```
#### File: test/evaluation_tests/test_dglke_evaluation.py
```python
import torch
import os
import numpy as np
from openbiolink.evaluation.dataLoader import DataLoader
from openbiolink.evaluation.evaluation import Evaluator
from dglke.models.infer import ScoreInfer
from dglke.utils import load_model_config
class FakeEdge(object):
def __init__(self, head_emb, rel_emb, tail_emb):
self._hobj = {}
self._robj = {}
self._tobj = {}
self._hobj['emb'] = head_emb
self._robj['emb'] = rel_emb
self._tobj['emb'] = tail_emb
@property
def src(self):
return self._hobj
@property
def dst(self):
return self._tobj
@property
def data(self):
return self._robj
class DglkeEvaluator(Evaluator):
def __init__(self, dataset_name, model_path, entity_to_id_path, relation_to_id_path):
dl = DataLoader(dataset_name, entity_to_id_path=entity_to_id_path, relation_to_id_path=relation_to_id_path)
super().__init__(dl)
config = load_model_config(os.path.join(model_path, 'config.json'))
model = ScoreInfer(-1, config, model_path)
model.load_model()
self.model = model.model
self.entity_emb = self.model.entity_emb(self.entities.long())
self.entity_emb.share_memory_()
self.relation_emb = self.model.relation_emb(self.relations.long())
self.relation_emb.share_memory_()
def score_batch(self, batch):
head_neg_score = self.model.score_func.create_neg(True)
tail_neg_score = self.model.score_func.create_neg(False)
head_neg_prepare = self.model.score_func.create_neg_prepare(True)
tail_neg_prepare = self.model.score_func.create_neg_prepare(False)
pos_head_emb = self.entity_emb[batch[:, 0], :]
pos_tail_emb = self.entity_emb[batch[:, 2], :]
pos_rel = batch[:, 1].long()
pos_rel_emb = self.model.relation_emb(pos_rel)
edata = FakeEdge(pos_head_emb, pos_rel_emb, pos_tail_emb)
pos_score = self.model.score_func.edge_func(edata)['score']
neg_head, tail = head_neg_prepare(pos_rel, 1, self.entity_emb, pos_tail_emb, -1, False)
neg_scores_head = head_neg_score(neg_head, pos_rel_emb, tail,
1, len(batch), self.num_neg)
head, neg_tail = tail_neg_prepare(pos_rel, 1, pos_head_emb, self.entity_emb, -1, False)
neg_scores_tail = tail_neg_score(head, pos_rel_emb, neg_tail,
1, len(batch), self.num_neg)
return pos_score, neg_scores_head.squeeze(0), pos_score, neg_scores_tail.squeeze(0)
if __name__ == "__main__":
torch.manual_seed(145)
np.random.seed(145)
model_path = r"G:\ckpts\TransE_l2_FB15k_0"
entity_to_id_path = r"G:\triples\entities.tsv"
relation_to_id_path = r"G:\triples\relations.tsv"
evaluator = DglkeEvaluator("HQ_DIR", model_path, entity_to_id_path, relation_to_id_path)
result = evaluator.evaluate(100, 1)
print(result)
```
#### File: test/evaluation_tests/test_safran_evaluation.py
```python
import torch
from openbiolink.evaluation.dataLoader import DataLoader
from openbiolink.evaluation.evaluation import Evaluator
class SafranEvaluator(Evaluator):
def __init__(self, dataset_name, evaluation_file_path):
dl = DataLoader(dataset_name)
super().__init__(dl)
with open(evaluation_file_path) as infile:
content = infile.readlines()
content = [x.strip() for x in content]
self.predictions = dict()
for i in range(0, len(content), 3):
head, rel, tail = content[i].split(" ")
head = self.dl.entity_to_id[head]
rel = self.dl.relation_to_id[rel]
tail = self.dl.entity_to_id[tail]
pos_head = 0.0
neg_head = []
head_predictions = content[i+1]
if(head_predictions == "Heads:"):
continue
else:
head_predictions = head_predictions[len("Heads: "):].split("\t")
for j in range(0, len(head_predictions), 2):
head_prediction = self.dl.entity_to_id[head_predictions[j]]
confidence = float(head_predictions[j+1])
if head == head_prediction:
# Correct prediction
pos_head = confidence
else:
# False prediction
neg_head.append((head_prediction, confidence))
pos_tail = 0.0
neg_tail = []
tail_predictions = content[i+2]
if tail_predictions == "Tails:":
continue
else:
tail_predictions = tail_predictions[len("Tails: "):].split("\t")
for j in range(0, len(tail_predictions), 2):
tail_prediction = self.dl.entity_to_id[tail_predictions[j]]
confidence = float(tail_predictions[j+1])
if tail == tail_prediction:
# Correct prediction
pos_tail = confidence
else:
# False prediction
neg_tail.append((tail_prediction, confidence))
self.predictions[f"{str(head)};{str(rel)};{str(tail)}"] = (pos_head, neg_head, pos_tail, neg_tail)
def score_batch(self, batch):
pos_score_head = torch.zeros((len(batch),), dtype=torch.float)
neg_score_head = torch.zeros((100, self.num_neg), dtype=torch.float)
pos_score_tail = torch.zeros((len(batch),), dtype=torch.float)
neg_score_tail = torch.zeros((100, self.num_neg), dtype=torch.float)
for i in range(batch.shape[0]):
head, rel, tail = batch[i,:]
key = f"{str(head.item())};{str(rel.item())};{str(tail.item())}"
if key in self.predictions:
(pos_head, neg_heads, pos_tail, neg_tails) = self.predictions[key]
pos_score_head[i] = pos_head
for neg_head, confidence in neg_heads:
neg_score_head[i, neg_head] = confidence
pos_score_tail[i] = pos_tail
for neg_tail, confidence in neg_tails:
neg_score_tail[i, neg_tail] = confidence
else:
pass
return pos_score_head, neg_score_head, pos_score_tail, neg_score_tail
if __name__ == "__main__":
evaluation_file_path = r"G:\prediction.txt"
evaluator = SafranEvaluator("HQ_DIR", evaluation_file_path)
result = evaluator.evaluate(100, 1, filtering=False)
print(result)
```
#### File: test/train_test_split_tests/test_negativeSampler.py
```python
from unittest import TestCase
class TestNegativeSampler(TestCase):
def test_generate_random_neg_samples(self):
pass
def test_subsample_with_tn(self):
pass
def test_generate_corrupted_neg_samples(self):
pass
```
|
{
"source": "Jerryhu1/MasterThesis",
"score": 2
}
|
#### File: ea/.ipynb_checkpoints/modelTrainer-checkpoint.py
```python
from collections import defaultdict
import collections
import numpy as np
import pandas as pd
import os.path
from ea import util, individual, constants
import nltk
from music21 import *
pitch_matrix_path = './pitch_matrix.csv'
duration_matrix_path = './duration_matrix.csv'
symbol_matrix_path = './symbol_matrix.csv'
def flatten(l):
return [item for sublist in l for item in sublist]
def train_pitch_matrix(scores):
if os.path.exists(pitch_matrix_path):
return pd.read_csv(pitch_matrix_path, index_col=0)
if scores is None:
scores = get_corpus()
notes = flatten(get_pitches_per_score(scores))
if constants.N_GRAM == 'trigram':
matrix = get_trigram_matrix(notes)
else:
matrix = get_bigram_matrix(notes)
matrix = get_probabilistic_matrix(matrix)
matrix.to_csv(pitch_matrix_path)
return matrix
def get_trigram_matrix(items):
trigrams = nltk.trigrams(items)
matrix = defaultdict(lambda: defaultdict(lambda: 0))
for n1, n2, n3 in trigrams:
matrix[(n1, n2)][n3] += 1
matrix = pd.DataFrame(matrix)
matrix = matrix.fillna(0)
return matrix
def get_bigram_matrix(items):
if items[0] is individual.Note:
items = map(lambda x: x.pitch, items)
bigrams = nltk.bigrams(items)
matrix = defaultdict(lambda: defaultdict(lambda: 0))
for n1, n2 in bigrams:
matrix[n1][n2] += 1
matrix = pd.DataFrame(matrix)
matrix = matrix.fillna(0)
return matrix
def get_corpus():
curr_corpus = corpus.corpora.LocalCorpus('wiki')
curr_corpus = curr_corpus.metadataBundle
scores = []
for c in curr_corpus[0:100]:
score = c.parse()
# Tranpose to C
score = util.transpose_piece(score, 'C')
scores.append(score)
return scores
def get_pitches_per_score(scores):
all_notes_per_score = [] # Multidimensional array of all notes per piece
for p in scores:
curr_pitches = []
# Get a part of the piece
for part in p.parts:
measure_iterator = part.getElementsByClass(stream.Measure)
if len(measure_iterator) > 0:
note_iterator = measure_iterator.flat.notesAndRests
else:
note_iterator = part.notesAndRests
if len(note_iterator) == 0:
continue
for el in note_iterator:
if el.isRest:
pitch_name = 'REST'
else:
if el.isChord:
root = el.root()
if root is None:
continue
pitch_name = el.root().nameWithOctave
else:
pitch_name = el.nameWithOctave
if pitch_name not in constants.NOTE_RANGE:
continue
if '-' in pitch_name or '##' in pitch_name:
pitch_name = el.pitch.getEnharmonic().nameWithOctave
curr_pitches.append(pitch_name)
all_notes_per_score.append(curr_pitches)
return all_notes_per_score
def create_frequency_array(notes):
# Get frequency array
counter = collections.Counter(flatten(notes))
# Initial note counter
counter[' '] = len(notes)
return counter
def create_frequency_matrix(pieces, possible_notes):
"""
Creates the frequency matrix, pieces must be supplied as multi dimensional array of notes, each
entry is one piece
"""
possible_notes.add(' ')
zeros = np.full((len(possible_notes), len(possible_notes)), 0)
matrix = pd.DataFrame(zeros, index=possible_notes, columns=possible_notes)
matrix = matrix.astype(float)
for n in pieces:
# Fill transition matrix frequencies
for i in range(len(n) + 1):
# Last note
curr_note = n[i - 1]
if i == len(n):
matrix[curr_note][' '] = matrix[curr_note][' '] + 1
continue
# First note
next_note = n[i]
if i == 0:
matrix[' '][next_note] = matrix[' '][next_note] + 1
continue
matrix[curr_note][next_note] = matrix[curr_note][next_note] + 1
return matrix
def get_probabilistic_matrix(matrix):
matrix = matrix.astype(float)
for n1_n2 in matrix:
total_count = sum(matrix[n1_n2])
for n3 in matrix[n1_n2].keys():
if matrix[n1_n2][n3] != 0.0:
matrix[n1_n2][n3] /= total_count
return matrix
def train_duration_matrix(scores):
if os.path.exists(duration_matrix_path):
return pd.read_csv(duration_matrix_path, index_col=0)
print(test)
if scores is None:
scores = get_corpus()
# set containing all possible notes for matrix creation
possible_durations = ['half', 'quarter', 'eighth', '16th']
all_durations = [] # Multidimensional array of all notes per piece
for i in scores:
# Get a part of the piece
noteIterator = i.parts[0].getElementsByClass(stream.Measure).flat.getElementsByClass('Note')
if len(noteIterator) == 0:
noteIterator = i.parts[0].notesAndRests
for j in range(len(noteIterator)):
dur = noteIterator[j]
if dur.duration.type not in possible_durations:
continue
all_durations.append(dur.duration.type)
if constants.N_GRAM == 'trigram':
matrix = get_trigram_matrix(all_durations)
else:
matrix = get_bigram_matrix(all_durations)
matrix = get_probabilistic_matrix(matrix)
matrix.to_csv(duration_matrix_path)
return matrix
def train_interval_matrix(scores):
first_notes_per_bar = []
all_intervals = []
for s in scores:
noteIterator = s.parts[0].getElementsByClass(stream.Measure)
if len(noteIterator) == 0:
noteIterator = s.parts[0].notesAndRests.stream()
for i in noteIterator:
firstNote = None
counter = 1
m = []
for j in i.notesAndRests:
if j.isChord:
curr_note = note.Note(j.root())
elif j.isRest:
m.append('REST')
continue
else:
curr_note = j
if counter == 1:
if j.isChord:
firstNote = note.Note(j.root())
else:
firstNote = j
pitch_name = firstNote.nameWithOctave
if '-' in pitch_name or '##' in pitch_name:
pitch_name = firstNote.pitch.getEnharmonic().nameWithOctave
if pitch_name in constants.NOTE_RANGE:
first_notes_per_bar.append(pitch_name)
counter += 1
continue
octave = curr_note.octave
root = note.Note('C')
root.octave = octave
interv = interval.Interval(root, curr_note).name
m.append(interv)
counter += 1
all_intervals.append(interv)
bigrams = list(nltk.bigrams(all_intervals))
matrix = defaultdict(lambda: defaultdict(lambda: 0))
for i1,i2 in bigrams:
matrix[i1][i2] += 1
for i1 in matrix:
total = float(sum(matrix[i1].values()))
for i2 in matrix[i1]:
matrix[i1][i2] /= total
int_matrix = pd.DataFrame(matrix)
int_matrix = int_matrix.fillna(0)
return int_matrix
def update_matrix(samples, matrix, convergence_rate):
if constants.N_GRAM == 'trigram':
n_matrix = get_trigram_matrix(flatten(samples))
else:
n_matrix = get_bigram_matrix(flatten(samples))
u_matrix = get_probabilistic_matrix(n_matrix)
new_matrix = matrix.copy()
for i in new_matrix.keys():
for j in new_matrix.keys():
if i in u_matrix and j in u_matrix[i]:
# u_matrix contains the values, subtract from each other
difference = u_matrix[i][j] - matrix[i][j]
elif i not in u_matrix:
# u_matrix does not contain the column, we can not update transitions strating from this note
difference = 0.0
else: # u_matrix contains the column, but not the row. So no transitions to that note at all
difference = -matrix[i][j]
new_matrix[i][j] = matrix[i][j] + (difference * convergence_rate)
return new_matrix
```
#### File: EvoMusicCompanion/ea/musicPlayer.py
```python
from music21 import note, chord, stream, duration, interval, midi
from music21.stream import Score
from ea import individual, fitness, constants, metrics
from ea.individual import Measure, Note
import datetime
import ea
def play(population: [individual.Individual]):
s = getPopulationScore(population)
player = midi.realtime.StreamPlayer(s)
player.play()
def write_music_midi(population: [individual.Individual]):
folder, file = metrics.get_path('MULTIPLE', '.mid')
s = getPopulationScore(population)
print(f'Writing to: {folder + file}')
s.write("midi", folder + file)
def play_music_xml(population: [individual.Individual]):
s = getPopulationScore(population)
s.show('musicxml')
def getPopulationScore(population: [individual.Individual]):
s = stream.Score(id='mainScore')
part = stream.Part(id='part0')
part1 = stream.Part(id='part1')
for i in range(len(population)):
# For each measure
for m in population[i].measures:
measure = stream.Measure()
chord_measure = stream.Measure()
if m.chord is not None:
chord_measure.append(chord.Chord(m.chord, quarterLength=4.0))
duration_count = 0.0
# For each note
for j in m.notes:
if j.pitch == 'REST':
n = note.Rest()
n.duration = duration.Duration(quarterLength=j.duration.duration_value / 0.25)
else:
n = note.Note(j.pitch)
n.duration = duration.Duration(quarterLength=j.duration.duration_value / 0.25)
measure.append(n)
duration_count += j.duration.duration_value
# Add rest if measure is not filled
if duration_count < 1.0:
measure[len(measure)-1].duration.quarterLength += (1.0 - duration_count) / 0.25
part.append(measure)
part1.append(chord_measure)
s.append(part)
s.append(part1)
return s
def play_intervals(population: [individual.Individual]):
population = map(lambda x: x.notes, population)
score = []
for i in population:
# For each measure
for m in i:
measure = []
# For each note
for j in m:
if j.pitch == 'REST':
n = note.Rest()
n.duration = duration.Duration(quarterLength=j.duration.duration_value / 0.25)
measure.append(n)
else:
intv = interval.Interval(j.interval)
n = intv.transposeNote(note.Note('C5'))
n.duration = duration.Duration(quarterLength=j.duration.duration_value / 0.25)
measure.append(n)
score.append(measure)
s = stream.Score(id='mainScore')
part = stream.Part(id='part0')
for i in range(len(score)):
m = stream.Measure(i + 1)
notesInMeasure = score[i]
for n in notesInMeasure:
m.append(n)
part.append(m)
chords = get_c_chord_part(len(part))
s.append(part)
s.append(chords)
print(f'key = {s.analyze("key")}')
player = midi.realtime.StreamPlayer(s)
player.play()
def get_c_chord_part(measures):
chords = [['C3', 'E3', 'G3'], ['G3', 'B3', 'D3'], ['E3', 'G3', 'B3'], ['D3', 'F#3', 'A3']]
counter = 0
chord_part = stream.Part(id='part1')
for m in range(measures):
c = chord.Chord(chords[counter], quarterLength=4.0)
chord_part.append(c)
counter += 1
if counter == 4:
counter = 0
return chord_part
def play_pitches(population):
score = []
for i in population:
for j in i:
score.append(note.Note(j))
s = stream.Score(id='mainScore')
part = stream.Part(id='part0')
part.append(score)
s.append(part)
s.show('musicxml')
def play_measure(measure: Measure):
s = stream.Score(id="mainScore")
notes_part = stream.Part(id="part0")
m = convert_measure_to_music21_measure(measure)
notes_part.append(m)
s.append(notes_part)
print(measure.chord)
chord_part = stream.Part(id="part1")
chord_measure = stream.Measure(1)
chord_measure.append(chord.Chord(measure.chord, quarterLength=4.0))
chord_part.append(chord_measure)
s.append(chord_part)
player = midi.realtime.StreamPlayer(s)
player.play()
def convert_measure_to_music21_measure(m: Measure):
m.notes: [Note]
measure = stream.Measure(1)
for j in m.notes:
if j.pitch == 'REST':
n_1 = note.Rest()
n_1.duration = duration.Duration(quarterLength=j.duration.duration_value / 0.25)
else:
n_1 = note.Note(j.pitch)
n_1.duration = duration.Duration(quarterLength=j.duration.duration_value / 0.25)
measure.append(n_1)
return measure
def music21_score_to_individual(s: Score):
p = s.parts[0]
measures = p.getElementsByClass(stream.Measure)
new_measures = []
for m in range(len(measures)):
new_measure = individual.Measure([], 0, [])
m_notes = measures[m].notesAndRests
for n in m_notes:
if n.isRest:
if n.duration.type == 'whole':
continue
pitch_name = 'REST'
elif n.isChord:
# Only 1 chord per measure :/
if len(new_measure.chord) > 0:
continue
chord = []
for chord_note in n:
chord.append(chord_note.pitch.nameWithOctave)
new_measure.chord = chord
continue
else:
pitch_name = n.nameWithOctave
if '-' in pitch_name or '##' in pitch_name:
pitch_name = n.pitch.getEnharmonic().nameWithOctave
dur = ea.duration.Duration(n.duration.type, None)
new_note = individual.Note(pitch_name, dur)
new_measure.notes.append(new_note)
if len(new_measure.notes) == 0:
continue
new_measures.append(new_measure)
indiv = individual.Individual(new_measures, 0.0)
if len(new_measures) > 0:
ea.initialisation.set_chords(indiv)
fitness.set_fitness(indiv)
else:
return None
return indiv
```
#### File: EvoMusicCompanion/ea/mutation.py
```python
from random import Random
from music21 import pitch
from music21.interval import Interval
from ea import initialisation, simulation, constants, duration
from ea.individual import Individual, Measure
import copy
rng = Random()
def applyMutation(individual: Individual, elitist_population: [Individual]):
mutations = [swap_measure, change_rest_or_note, change_duration, reverse_measure,
transpose_interval_measure, elitist_mutation]
p1 = 0.2
p2 = 0.2
p3 = 0.1
p4 = 0.1
p5 = 0.2
p6 = 0.05
probs = [p1, p2, p3, p4, p5, p6]
for i in range(len(mutations)):
prob = probs[i]
m = mutations[i]
p = rng.random()
if p < prob:
if m is elitist_mutation:
m(individual, elitist_population)
else:
m(individual)
def elitist_mutation(individual: Individual, elitist_population: [Individual]):
e_individual: Individual = rng.choice(elitist_population)
measure = rng.choice(range(len(e_individual.measures)))
e_individual_copy = copy.deepcopy(e_individual.measures[measure].notes)
individual.measures[measure].notes = e_individual_copy
if individual.measures[measure].notes is e_individual.measures[measure].notes:
print('Mutated individual has reference to elitist individual')
def swap_measure(individual: Individual):
i1 = rng.randrange(len(individual.measures))
i2 = rng.randrange(len(individual.measures))
while i1 == i2:
i2 = rng.randrange(len(individual.measures) - 1)
m1 = copy.deepcopy(individual.measures[i1].notes)
m2 = copy.deepcopy(individual.measures[i2].notes)
individual.measures[i1].notes = m2
individual.measures[i2].notes = m1
def swap_notes_in_measure(individual: Individual):
m_index = rng.randrange(len(individual.measures))
notes = individual.measures[m_index].notes
n_index1 = rng.randrange(len(notes))
n_index2 = rng.randrange(len(notes))
while n_index1 == n_index2:
n_index2 = rng.randrange(len(notes))
n1 = notes[n_index1]
n2 = notes[n_index2]
individual.measures[m_index].notes[n_index1] = n2
individual.measures[m_index].notes[n_index2] = n1
def change_rest_or_note(individual: Individual):
m_index = rng.randrange(len(individual.measures))
notes = individual.measures[m_index].notes
note_index = rng.randrange(len(notes))
note = notes[note_index]
if note.pitch == 'REST':
new_pitch = initialisation.get_random_pitch_transition(None)
note.set_pitch(new_pitch)
else:
note.set_pitch('REST')
notes[note_index] = note
def change_duration(individual: Individual):
measure = rng.choice(individual.measures)
notes = measure.notes
note = rng.choice(notes)
durations = [0.0625, 0.125, 0.25, 0.5]
d = rng.choice(durations)
new_d = duration.Duration(None, d)
note.duration = new_d
while measure.get_total_duration() > 1.0:
n = rng.choice(notes)
if n is note:
continue
n_dur_idx = durations.index(n.duration.duration_value)
# If this is a sixteenth note, we remove it
if n_dur_idx == 0:
measure.notes.pop(n_dur_idx)
# Else we go one step back in duration
else:
new_d = duration.Duration(None, durations[n_dur_idx - 1])
n.duration = new_d
def change_pitch(size: int, individual: Individual):
for i in range(size):
m = rng.choice(individual.measures)
note = rng.choice(m.notes)
def transpose_interval_measure(individual: Individual):
m: Measure = rng.choice(individual.measures)
intvl = 0
for i in range(len(m.notes)):
n = m.notes[i]
if n.pitch == 'REST':
continue
# If we find the first pitch, we transpose this first
if i == 0:
first_pitch = n.pitch
intvl = (rng.choice([1, 2, 3]))
init_scale_degree = constants.NOTE_RANGE.index(first_pitch)
if len(constants.NOTE_RANGE) - init_scale_degree < 13:
intvl = -intvl
# If the new scale degree is not in range, we set it to the minimum or maximum
if init_scale_degree + intvl < 0:
new_first_pitch = constants.NOTE_RANGE[0]
elif init_scale_degree + intvl > len(constants.NOTE_RANGE) - 1:
new_first_pitch = constants.NOTE_RANGE[-1]
else:
new_first_pitch = constants.NOTE_RANGE[init_scale_degree + intvl]
n.set_pitch(new_first_pitch)
continue
note_scale_degree = constants.NOTE_RANGE.index(n.pitch)
# The remaining notes will be transposed with the same intervals as previously
# If the note goes out of range, we lower or raise with an octave
if note_scale_degree + intvl > len(constants.NOTE_RANGE) - 1:
intvl = intvl - 7
elif note_scale_degree + intvl < 0:
intvl = intvl + 7
new_pitch = constants.NOTE_RANGE[note_scale_degree + intvl]
n.set_pitch(new_pitch)
def reverse_measure(individual: Individual):
m: Measure = rng.choice(individual.measures)
m_copy = copy.deepcopy(m)
j = len(m.notes) - 1
for i in range(len(m.notes)):
m.notes[i] = m_copy.notes[j]
j -= 1
```
#### File: EvoMusicCompanion/ea/simulation.py
```python
from math import ceil
from ea import individual, musicPlayer, modelTrainer, initialisation, crossover, mutation, fitness, selection, \
constants, metrics, modelUpdater
from ea.individual import Individual
import random
import time
import sys
class Simulation:
pitch_matrix = None
backoff_matrix = None
duration_matrix = None
simulation = None
def __init__(self, duration_matrix=None, pitch_matrix=None):
if self.simulation is not None:
print('Two instances of simulation, killing one')
self.duration_matrix = duration_matrix
self.pitch_matrix = pitch_matrix
self.population: [Individual] = []
self.elitist_population: [Individual] = []
self.simulation = self
def run(self, pitch_matrix, duration_matrix, backoff_matrix):
print('Starting generation')
if pitch_matrix is None:
self.pitch_matrix = modelTrainer.train_pitch_matrix(None)
if duration_matrix is None:
self.duration_matrix = modelTrainer.train_duration_matrix(None)
if backoff_matrix is None:
self.backoff_matrix = modelTrainer.get_backoff_matrix()
initialisation.pitch_matrix = self.pitch_matrix
initialisation.duration_matrix = self.duration_matrix
initialisation.backoff_matrix = self.backoff_matrix
print('Initializing population')
self.population = initialisation.initialize_population(constants.POPULATION_SIZE)
converged_counter = 0.0
converged_iteration = -1
for i in range(constants.ITERATIONS):
self.population.sort(key=lambda x: x.fitness)
self.elitist_population = self.population[0:constants.ELITISM_SIZE]
next_generation = []
if constants.SYSTEM == "GA" or constants.SYSTEM == "HYBRID":
random.shuffle(self.population)
if constants.CROSSOVER == "NONE":
self.mutation_only()
next_generation.extend(self.population)
else:
crossover_generation = self.crossover_mutation()
crossover_generation.sort(key=lambda x: x.fitness)
if constants.SYSTEM == "HYBRID":
next_generation.extend(crossover_generation[0:constants.CROSSOVER_POPULATION])
else:
next_generation.extend(crossover_generation)
if constants.SYSTEM == "MODEL" or constants.SYSTEM == "HYBRID":
# Elitism
next_generation.extend(self.elitist_population)
sel = self.population[0:constants.SELECTION_SIZE]
if constants.LEARNING_RATE != 0.0:
self.update_matrices(sel)
if constants.SYSTEM == "HYBRID":
next_generation.extend(initialisation.initialize_population(constants.MODEL_POPULATION))
else:
next_generation.extend(
initialisation.initialize_population(constants.POPULATION_SIZE))
next_generation.sort(key=lambda x: x.fitness)
next_generation = next_generation[0:constants.POPULATION_SIZE]
self.population = next_generation
# Metrics
if constants.SYSTEM is not "MULTIPLE" and constants.METRIC_MODE is not "ALL":
metrics.write_population_metrics(i, self.population)
if constants.METRIC_MODE == "ALL":
metrics.write_individual_metrics(i, population=self.population)
if i % 25 == 0:
print(f"Iteration {i} done")
print(f'Highest fitness: {self.population[0].fitness}')
sys.stdout.flush()
self.population.sort(key=lambda x: x.fitness)
if constants.RUN_MODE == 'MULTIPLE':
metrics.write_average_runs(converged_iteration, self.population)
if constants.SYSTEM != 'GA':
metrics.write_matrices(self.pitch_matrix, self.backoff_matrix, self.duration_matrix)
play_pieces = [self.population[0], self.population[ceil(len(self.population) / 2)], self.population[-1]]
musicPlayer.write_music_midi(play_pieces)
if constants.RUN_MODE == "SINGLE":
print('-------------------------------------------------')
print('Done evolving, playing songs')
print(f'Population size: {constants.POPULATION_SIZE}')
print(f'Elitist population size: {len(self.elitist_population)}')
print(f'Tournament size: {constants.TOURNAMENT_SIZE}')
print(f'Iterations: {constants.ITERATIONS}')
print(f'Model updating: None, ratio = N/A')
sys.stdout.flush()
def crossover_mutation(self):
next_generation = []
random.shuffle(self.population)
for j in range(1, len(self.population), 2):
family = []
p1 = self.population[j - 1]
p2 = self.population[j]
c1, c2 = crossover.measure_crossover(p1, p2)
mutation.applyMutation(c1, self.elitist_population)
mutation.applyMutation(c2, self.elitist_population)
fitness.set_fitness(c1)
fitness.set_fitness(c2)
family.extend([c1, c2, p1, p2])
family.sort(key=lambda x: x.fitness)
next_generation.extend(family[0:2])
return next_generation
```
#### File: EvoMusicCompanion/ea/util.py
```python
from music21 import interval, pitch
def transpose_piece(piece, key):
k = piece.analyze('key')
if k.tonicPitchNameWithCase.islower():
return None
i = interval.Interval(k.tonic, pitch.Pitch(key))
new_piece = piece.transpose(i)
return new_piece
def flatten(l):
return [item for sublist in l for item in sublist]
```
#### File: EvoMusicCompanion/gui/MusicPlayerThread.py
```python
from PyQt5.QtCore import QThread
from ea import musicPlayer
class MusicPlayerThread(QThread):
def __init__(self, piece, musicType):
QThread.__init__(self)
self.piece = piece
self.musicType = musicType
def run(self):
if self.musicType == 'measure':
musicPlayer.play_measure(self.piece)
elif self.musicType == 'piece':
musicPlayer.play(self.piece)
else:
print('No music type was given to the music thread')
```
|
{
"source": "jerryIsHere/datasets",
"score": 3
}
|
#### File: datasets/xor_tydi_qa/xor_tydi_qa.py
```python
import json
import textwrap
import datasets
_XOR_TYDI_QA_CITATION = """\
@misc{asai2020xor,
title={XOR QA: Cross-lingual Open-Retrieval Question Answering},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
year={2020},
eprint={2010.11856},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_XOR_TYDI_QA_DESCRIPTION = """\
XOR-TyDi QA brings together for the first time information-seeking questions,
open-retrieval QA, and multilingual QA to create a multilingual open-retrieval
QA dataset that enables cross-lingual answer retrieval. It consists of questions
written by information-seeking native speakers in 7 typologically diverse languages
and answer annotations that are retrieved from multilingual document collections.
There are three sub-tasks: XOR-Retrieve, XOR-EnglishSpan, and XOR-Full.
"""
_DESCRIPTIONS = {
"xor-retrieve": textwrap.dedent(
"""\
XOR-Retrieve is a cross-lingual retrieval task where a question is written in the target
language (e.g., Japanese) and a system is required to retrieve English document that answers the question.
"""
),
"xor-full": textwrap.dedent(
"""\
XOR-Full is a cross-lingual retrieval task where a question is written in the target
language (e.g., Japanese) and a system is required to output a short answer in the target language."""
),
}
_DATA_URLS = {
"xor-retrieve": {
"train": "https://nlp.cs.washington.edu/xorqa/XORQA_site/data/xor_train_retrieve_eng_span.jsonl",
"dev": "https://nlp.cs.washington.edu/xorqa/XORQA_site/data/xor_dev_retrieve_eng_span_v1_1.jsonl",
"test": "https://nlp.cs.washington.edu/xorqa/XORQA_site/data/xor_test_retrieve_eng_span_q_only_v1_1.jsonl",
},
"xor-full": {
"train": "https://nlp.cs.washington.edu/xorqa/XORQA_site/data/xor_train_full.jsonl",
"dev": "https://nlp.cs.washington.edu/xorqa/XORQA_site/data/xor_dev_full_v1_1.jsonl",
"test": "https://nlp.cs.washington.edu/xorqa/XORQA_site/data/xor_test_full_q_only_v1_1.jsonl",
},
}
_XOR_TYDI_QA_URL = "https://nlp.cs.washington.edu/xorqa/"
class XORTyDiConfig(datasets.BuilderConfig):
"BuilderConfig for XOR-TyDi Dataset"
def __init__(self, data_url, citation, url, **kwargs):
"""
Args:
data_url: `dictionary`, dict with url for each split of data.
citation: `string`, citation for the dataset.
url: `string`, url for information about the dataset.
**kwargs: keyword arguments forwarded to super.
"""
super(XORTyDiConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.data_url = data_url
self.citation = citation
self.url = url
class XORTyDi(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
XORTyDiConfig(
name=name,
description=_DESCRIPTIONS[name],
data_url=_DATA_URLS[name],
citation=_XOR_TYDI_QA_CITATION,
url=_XOR_TYDI_QA_URL,
)
for name in ["xor-retrieve", "xor-full"]
]
def _info(self):
features = {}
features["question"] = datasets.Value("string")
features["lang"] = datasets.features.ClassLabel(names=["ar", "bn", "fi", "ja", "ko", "ru", "te"])
features["answers"] = datasets.Value("string")
return datasets.DatasetInfo(
description=_XOR_TYDI_QA_DESCRIPTION + "\n" + self.config.description,
features=datasets.Features(features),
homepage=self.config.url,
citation=_XOR_TYDI_QA_CITATION,
)
def _split_generators(self, dl_manager):
train = dl_manager.download_and_extract(self.config.data_url["train"])
dev = dl_manager.download_and_extract(self.config.data_url["dev"])
test = dl_manager.download_and_extract(self.config.data_url["test"])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train, "split": "train"}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dev, "split": "dev"}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test, "split": "test"}),
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
jlines = f.read()
result = [json.loads(jline) for jline in jlines.splitlines()]
if split == "test":
for id_, row in enumerate(result):
yield id_, {"question": row["question"], "answers": "None", "lang": row["lang"].strip()}
else:
for id_, row in enumerate(result):
yield id_, {
"question": row["question"],
"answers": " ".join(row["answers"]),
"lang": row["lang"].strip(),
}
```
|
{
"source": "JerryIshihara/broadcast-video-feature-detection",
"score": 3
}
|
#### File: broadcast-video-feature-detection/Detector/DetectorWraper.py
```python
import cv2
import numpy as np
import heapq
from skimage.feature import hog
from config import *
from utils import *
class DetectorWraper(object):
"""
A class that contains all the detectors need for a clip detection
"""
def __init__(self, detectors, clip, genderDetect=None):
self.clip = clip
self.genderDetect = genderDetect
self.detectors = []
for detector in detectors:
self.detectors.append(detector)
self.num_save = 0
# later greedy algorithm for tracking
self.num_detect = 0
self.detection_frames = {}
self.detection_index= {}
self.track_frame = None
def apply_detection(self, image):
"""
Return a list of tuple: List[(text, position)]
"""
pos_list = []
for c in self.detectors:
pos_list += c.get_position(image)
return pos_list
def visualize_detection(self, image):
"""
Return a image with visualized locations detected
"""
H, W, _ = image.shape
pos_list = self.apply_detection(image)
detections = {}
hasDetection = False
for i, L in enumerate(pos_list):
text, coordinates = L[0], L[1]
COLOR = COLORS[text]
for x, y, w, h in coordinates:
# prune bad homography points
if x < 0 or y < 0 or x + w > W or \
y + h > H or w <= 1 or h <= 1:
continue
# add the detection to the dict for tracking
detections[self.num_detect] = (x, y, w, h)
self.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text)
self.num_detect += 1
hasDetection = True
# if the detection is human
if text == 'face':
gender = self.genderDetect.classify(image[y:y+h, x:x+w, :])
gender = 'female' if gender[0] < 0.5 else 'male'
cv2.putText(image, gender, (x + w // 2 -10, y + h + 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)
image = cv2.rectangle(image, (x, y), (x + w, y + h), COLOR, 2)
cv2.putText(image, text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)
if hasDetection:
self.detection_frames[self.num_save] = detections
self.num_save +=1
return image
def save_detection(self, image):
"""
Save the visualized image
"""
img = self.visualize_detection(image)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imwrite(f'{SAVE_PATH}{self.clip}{self.num_save}.jpg', img)
def tracking(self, frameList, thres, relabel=False):
'''
Perform detection tracking.
Input: the list the video frames frameList
Output: Example: 1. Dict{'track 1':[obj_1, obj_3], 'track 2': [obj_2] ...}
'''
if relabel:
print('detecting objects ...')
for frame in frameList:
self._get_positions(frame)
track_obj = {}
track_frame = {}
num_track = 0
left_over = []
detected_frames = list(self.detection_frames.keys())
print('start tracking ...')
for i, frame in enumerate(detected_frames[:-1]):
# extact frame image
img1 = frameList[detected_frames[i]]
img2 = frameList[detected_frames[i + 1]]
# frame detections
frame_1 = self.detection_frames[detected_frames[i]]
frame_2 = self.detection_frames[detected_frames[i + 1]]
# detection id
detects_1 = list(frame_1.keys())
detects_2 = list(frame_2.keys())
# ============= frame wise compare stage (consectutive) =============
# init a heap for later ranking
HEAP = []
# get only human left over
only_human = []
detects_1 += left_over
for obj_1 in detects_1:
max_score = -9999
next_score = -9999
pair = None
ratio = None
x1, y1, w1, h1, f, text1, _ = self.detection_index[obj_1]
# pnly track human
if text1 != 'face' and text1 != 'super woman':
continue
only_human.append(obj_1)
for obj_2 in detects_2:
x2, y2, w2, h2, _, text2, _ = self.detection_index[obj_2]
# only track human
if text2 != 'face' and text2 != 'super woman':
continue
patch_1 = img1[y1 : y1 + h1, x1 : x1 + w1, :]
patch_2 = img2[y2 : y2 + h2, x2 : x2 + w2, :]
score = self.similarity_score(patch_1, patch_2)
if score > max_score:
next_score = max_score
max_score = score
pair = obj_1, f, obj_2
ratio = next_score / max_score
# calculate min over max ratio
if ratio is None or ratio > thres:
continue
else: heapq.heappush(HEAP, (-max_score, (pair[0], pair[1], pair[2])))
# ================= track adding stage =================
repeated_obj1 = []
repeated_obj2 = []
for _ in range(len(HEAP)):
track = heapq.heappop(HEAP)
obj_1, frame_idx, obj_2 = track[1]
# if already paired, skip
if obj_2 in repeated_obj2:
continue
repeated_obj1.append(obj_1)
repeated_obj2.append(obj_2)
isPreviousTracked = False
for track_id in track_obj.keys():
if obj_1 in track_obj[track_id]:
track_obj[track_id].append(obj_2)
x, y, w, h, f, t, _ = self.detection_index[obj_2]
self.detection_index[obj_2] = (x, y, w, h, f, t, track_id)
isPreviousTracked = True
break
if isPreviousTracked:
continue
# if no previous track, add new one
track_obj[num_track] = [obj_1, obj_2]
x1, y1, w1, h1, f1, t1, _ = self.detection_index[obj_1]
x2, y2, w2, h2, f2, t2, _ = self.detection_index[obj_2]
self.detection_index[obj_1] = (x1, y1, w1, h1, f1, t1, num_track)
self.detection_index[obj_2] = (x2, y2, w2, h2, f2, t2, num_track)
num_track += 1
left_over = [obj for obj in only_human if obj not in repeated_obj1]
print(len(left_over))
self.track_obj = track_obj
print('saving annotations ...')
self._annotate_images(frameList)
print('Done !')
def _get_positions(self, image):
"""
SHOULD NOT BE CALL EXPLICITY, HIDDEN FUNCTION
return detection postion without visualize positon
"""
H, W, _ = image.shape
pos_list = self.apply_detection(image)
detections = {}
hasDetection = False
for i, L in enumerate(pos_list):
text, coordinates = L[0], L[1]
for x, y, w, h in coordinates:
if x < 0 or y < 0 or x + w > W or \
y + h > H or w <= 1 or h <= 1:
continue
# add the detection to the dict for tracking
if text == 'face' or text == 'super woman':
self.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text, -1)
else:
self.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text, -2)
detections[self.num_detect] = (x, y, w, h)
self.num_detect += 1
hasDetection = True
if hasDetection:
self.detection_frames[self.num_save] = detections
self.num_save +=1
def _annotate_images(self, frameList):
"""
SHOULD NOT BE CALL EXPLICITY, HIDDEN FUNCTION
annotate positions in each frames with given frames
"""
image_array = frameList
for i, image in enumerate(image_array):
if i in list(self.detection_frames.keys()):
for obj in list(self.detection_frames[i].keys()):
x, y, w, h, frame, text, track_id = self.detection_index[obj]
COLOR = COLORS[text]
# if the detection is human
if text == 'face':
text = text + " id:{}".format(track_id)
# predict
gender = self.genderDetect.classify(image[y:y+h, x:x+w, :])
gender = 'female' if gender[0] < 0.5 else 'male'
cv2.putText(image, gender, (x + w // 2 - 10, y + h + 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)
image_array[i] = cv2.rectangle(image_array[i], (x, y), (x + w, y + h), COLOR, 2)
cv2.putText(image_array[i], text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)
cv2.imwrite(f'{SAVE_PATH}{self.clip}{i}.jpg', image_array[i])
def similarity_score(self, img1, img2):
"""
Calculate the similarity score used for detection tracking
"""
# resize into the same shape first
if img1.shape != img2.shape:
v, h = max(img1.shape[0], img2.shape[0]), max(img1.shape[1], img2.shape[1])
dim = (h, v)
h_scale = min(img1.shape[1], img2.shape[1]) / h
v_scale = min(img1.shape[0], img2.shape[0]) / v
img1 = cv2.resize(img1, dim, interpolation = cv2.INTER_AREA)
img2 = cv2.resize(img2, dim, interpolation = cv2.INTER_AREA)
# # histogram
# diff = 0
# for c in range(3):
# hist1 = cv2.calcHist([img1], [c], None, [256], [0, 256])
# hist2 = cv2.calcHist([img2], [c], None, [256], [0, 256])
# diff += np.linalg.norm(hist1 - hist2)
# HoG
fd1, _ = hog(img1, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualize=True, multichannel=True)
fd2, _ = hog(img2, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualize=True, multichannel=True)
# Combine both
dist = np.linalg.norm(fd1 - fd2)
aim = mean_pixel_intensity_diff(img1, img2)
score = 1 / (dist + aim + 1)
return score
```
|
{
"source": "JerryIshihara/coordinated-transit-signal-priority",
"score": 3
}
|
#### File: coordinated-transit-signal-priority/Aimsun/BusInPOZ.py
```python
class BusInPOZ:
def __init__(self, intersection, check_in_bus_info, check_in_phase, check_in_phasetime, check_in_time, last_check_in):
self.intersection_of_interest = intersection
self.bus_id = check_in_bus_info.idVeh
self.check_in_time = check_in_time
self.check_in_phase = check_in_phase
self.check_in_phasetime = check_in_phasetime
self.last_check_in = last_check_in # previous bus check in time
self.check_in_headway = check_in_time - last_check_in
self.check_out_time = -1
self.check_out_headway = -1
self.last_update_time = check_in_time
self.original_action = None
self.original_state = None # state generated at check in
def check_out(self, check_out_time, last_check_out=0):
self.check_out_time = check_out_time
self.check_out_headway = check_out_time - last_check_out
self.last_update_time = check_out_time
def set_action(self, action):
if self.original_action is None:
self.original_action = action
else:
print("duplicate set original action, check to make sure implementation is correct")
def set_state(self, state):
if self.original_state is None:
self.original_state = state
else:
print("duplicate set original state, check to make sure implementation is correct")
```
#### File: coordinated-transit-signal-priority/Aimsun/prePOZ.py
```python
from AAPI import *
class PrePOZ:
def __init__(self, config):
self.CONFIG = config
self.last_in_info = None
self.last_out_info = None
self.time_list = []
def get_state(self):
if len(self.time_list) == 0:
return [0, 0]
return [self.time_list[0], len(self.time_list)]
def update(self, time, timeSta):
self._enter_prePOZ(time, timeSta)
self._exit_prePOZ(time, timeSta)
def _enter_prePOZ(self, time, timeSta):
# retrieve intersection info from CONFIG
busExitDetector = self.CONFIG['busExitDetector']
# get bus internal position
busVehiclePosition = AKIVehGetVehTypeInternalPosition(1171922)
# bus exit check
exitNum = AKIDetGetCounterCyclebyId(busExitDetector, busVehiclePosition) # Number of exit vehicle in last step
if exitNum > 0:
# First vehicle info
busout_info = AKIDetGetInfVehInDetectionInfVehCyclebyId(
busExitDetector, 0, busVehiclePosition)
# Last vehicle info
temp_info = AKIDetGetInfVehInDetectionInfVehCyclebyId(
busExitDetector, AKIDetGetNbVehsEquippedInDetectionCyclebyId(
busExitDetector, busVehiclePosition) - 1, busVehiclePosition)
for i in range(exitNum):
# If first vehicle equals last vehicle of last step
if i == 0 and busout_info.idVeh == self.last_out_info:
# Skip first vehicle and loop
continue
else:
print("prePOZ-{} enter-{}".format(busExitDetector, time))
self.time_list.append(time)
self.last_out_info = temp_info.idVeh
def _exit_prePOZ(self, time, timeSta):
busCallDetector = self.CONFIG['busCallDetector']
# get bus internal position
busVehiclePosition = AKIVehGetVehTypeInternalPosition(1171922)
# bus enter check
enterNum = AKIDetGetCounterCyclebyId(busCallDetector, busVehiclePosition)
if enterNum > 0:
# First vehicle info
busin_info = AKIDetGetInfVehInDetectionInfVehCyclebyId(
busCallDetector, 0, busVehiclePosition)
# Last vehicle info
temp_info = AKIDetGetInfVehInDetectionInfVehCyclebyId(
busCallDetector,
AKIDetGetNbVehsEquippedInDetectionCyclebyId(busCallDetector, busVehiclePosition) - 1,
busVehiclePosition)
for i in range(enterNum):
# If first vehicle equals last vehicle of last step
if i == 0 and busin_info.idVeh == self.last_in_info:
# Skip first vehicle and loop
continue
else:
print("prePOZ-{} exit-{}".format(busCallDetector, time))
self.time_list.pop(0)
self.last_in_info = temp_info.idVeh
```
#### File: coordinated-transit-signal-priority/Aimsun/RunSeveralReplications.py
```python
import sys
import os.path
import locale
from datetime import datetime
import subprocess # This library allows you to open a command prompt with aconsole.exe
def RunSimulation(replicationID,modelPath): # This calls a subprocess like C:>ProgramFiles>Aimsuns>Aimsun Next 8.2_R5233>aconsole.exe -v -log -project **PROJECT** -cmd execute -target 1060
#So each of the subprocesses generated by this function is an aconsole execution
print "modelPath: " + modelPath
print "replication id: " + str(replicationID)
args = [execmd, '-v', '-log', '-project', modelPath, '-cmd', 'execute', '-target', replicationID]
for x in range(0, 1):
print(x)
popen = subprocess.Popen(args)
popen.wait() # This makes the script wait until the subprocess (aconsole) has finished. This way the memory consumption wont skyrocket. (There will be only one replication running at a time. )
argv=sys.argv # The arguments this script will take are the ones provided via command prompt
if argv[1] == '-aconsolePath':
execmd = argv[2]
print "\n Aconsole: " + execmd + "\n"
if argv[3] == '-modelPath':
modelPath = argv[4]
print "------------\n"
print "Model: " + modelPath + "\n"
else:
print "no -modelPath parameter"
raw_input("Press enter to exit ;)")
sys.exit()
else:
print "No -aconsolePath parameter"
raw_input("Press enter to exit ;)")
sys.exit()
if argv[5] == '-targets':
print "targets: \n "
for i in range(len(argv[6:])):
j = i +6
if argv[j].isdigit():
print argv[j] + "\n "
else:
if argv[j] =='-modelPath':
print "------------\n"
print "Model: " + argv[j+1] + "\n"
if argv[j] == '-targets':
print "targets: \n"
print '===== NOW ===== \n'
print datetime.now()
else:
print "no -targets parameter"
raw_input("Press enter to exit ;)")
sys.exit()
# answer = raw_input("Continue? [y/n] \n")
answer = 'y'
if answer == 'y':
for j in range(len(argv[6:])):
i = j+6
if argv[i].isdigit():
print "Running simulation: " + argv[i] + " in model: " + modelPath
RunSimulation(argv[i],modelPath)
elif argv[i] == '-modelPath':
modelPath = argv[i+1]
else:
print "execution canceled "
raw_input("Press enter to exit ;)")
sys.exit()
print "Done"
# raw_input("Press enter to exit ;)")
```
#### File: coordinated-transit-signal-priority/Aimsun/script.py
```python
from AAPI import *
import os, sys, inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from config import *
from corridor import *
def AAPILoad():
"""
Create Intersection objects. Called when the module is loaded by Aimsum Next
"""
global corridor
corridor = Corridor(CORRIDOR)
return 0
def AAPIInit():
"""Summary
Initializes the module. Called when Aimsum Next starts the simulation
"""
ANGConnEnableVehiclesInBatch(True)
return 0
def AAPIManage(time, timeSta, timeTrans, acycle):
"""Summary
Called in every simulation step at the beginning of the cycle, and can be used to update states
and output states to DQN, and implement TSP stategies
Parameters
----------
time : double
Absolute time of simulation in seconds
timeSta : double
Time of simulation in stationary period, in seconds
timeTrans : double
Duration of warm-up period, in seconds
acycle : double
Duration of each simulation step in seconds
"""
return 0
def AAPIPostManage(time, timeSta, timeTrans, acycle):
"""Summary
Called in every simulation step at the beginning of the cycle, and can be used to update states
and output states to DQN, and implement TSP stategies
Parameters
----------
time : double
Absolute time of simulation in seconds
timeSta : double
Time of simulation in stationary period, in seconds
timeTrans : double
Duration of warm-up period, in seconds
acycle : double
Duration of each simulation step in seconds
"""
global corridor
corridor.aapi_post_manage(time, timeSta, timeTrans, acycle)
return 0
def AAPIFinish():
"""Summary
Called when Aimsun Next finishes the simulation and can be used to terminate the module operations,
write summary information, close files, etc.
"""
global corridor
# write last reward to indicate that the replication is done
corridor.write_last_reward()
return 0
def AAPIUnLoad():
"""Summary
Called when the module is unloaded by Aimsun Next.
"""
return 0
```
|
{
"source": "JerryIshihara/lyft-motion-prediction-for-autonomous-vehivle",
"score": 2
}
|
#### File: lyft-motion-prediction-for-autonomous-vehivle/model/resnet18_gru.py
```python
import sys
import time
from typing import Dict
import numpy as np
import torch
from torch import nn
from torch import Tensor, nn, optim
from torchvision.models import resnet18
class GRUDecoder(nn.Module):
def __init__(self, device, batch=32, in_dim=512, out_dim=100, hidden_size=2048):
super().__init__()
self.batch = batch
self.in_dim = in_dim
self.out_dim = out_dim
self.h0 = torch.zeros(2, batch, hidden_size).to(device)
self.decoder1 = nn.GRU(in_dim, hidden_size, batch_first=True, bidirectional=True).to(device)
self.linear1 = nn.Linear(2 * hidden_size, out_dim + 1).to(device)
self.decoder2 = nn.GRU(out_dim + 1, hidden_size, batch_first=True, bidirectional=True).to(device)
self.linear2 = nn.Linear(2 * hidden_size, out_dim + 1).to(device)
self.decoder3 = nn.GRU(out_dim + 1, hidden_size, batch_first=True, bidirectional=True).to(device)
self.linear2 = nn.Linear(2* hidden_size, out_dim + 1).to(device)
self.softmax = nn.Softmax(dim=1).to(device)
def forward(self, x):
x1, h = self.decoder1(x.view(self.batch, 1, self.in_dim), self.h0)
x1 = self.linear1(x1)
coord1, conf1 = torch.split(x1.view(self.batch, self.out_dim + 1), self.out_dim, dim=1)
x2, h = self.decoder2(x1, h)
x2 = self.linear2(x2)
coord2, conf2 = torch.split(x2.view(self.batch, self.out_dim + 1), self.out_dim, dim=1)
x3, h = self.decoder3(x2, h)
x3 = self.linear1(x3)
coord3, conf3 = torch.split(x3.view(self.batch, self.out_dim + 1), self.out_dim, dim=1)
conf = self.softmax(torch.cat([conf1, conf2, conf3], dim=1))
output = torch.cat([coord1, coord2, coord3], dim=1).view(self.batch, 3, 50, 2)
return conf, output
class Resnet18GRU(nn.Module):
"""Multi Mode Baseline
"""
def __init__(self, cfg: Dict, device, num_modes=3):
"""Init Mode Instance
Args:
cfg (Dict): Configuration Dict
num_modes (int, optional): number of trajectories. Defaults to 3.
device: needed to move GRU to cuda
"""
super().__init__()
# TODO: support other than resnet18?
backbone = resnet18(pretrained=True, progress=True)
self.backbone = backbone
num_history_channels = (
cfg["model_params"]["history_num_frames"] + 1) * 2
num_in_channels = 3 + num_history_channels
self.backbone.conv1 = nn.Conv2d(
num_in_channels,
self.backbone.conv1.out_channels,
kernel_size=self.backbone.conv1.kernel_size,
stride=self.backbone.conv1.stride,
padding=self.backbone.conv1.padding,
bias=False,
)
# This is 512 for resnet18 and resnet34;
# And it is 2048 for the other resnets
backbone_out_features = 512
# X, Y coords for the future positions (output shape: Bx50x2)
self.future_len = cfg["model_params"]["future_num_frames"]
self.batch_size = cfg['train_data_loader']['batch_size']
num_targets = 2 * self.future_len
self.num_preds = num_targets * num_modes
self.num_modes = num_modes
self.gru_decoder = GRUDecoder(
device,
batch=self.batch_size,
in_dim=backbone_out_features
)
def forward(self, x):
x = self.backbone.conv1(x)
x = self.backbone.bn1(x)
x = self.backbone.relu(x)
x = self.backbone.maxpool(x)
x = self.backbone.layer1(x)
x = self.backbone.layer2(x)
x = self.backbone.layer3(x)
x = self.backbone.layer4(x)
x = self.backbone.avgpool(x)
x = torch.flatten(x, 1)
confidences, pred = self.gru_decoder(x)
assert confidences.shape == (self.batch_size, self.num_modes)
return pred, confidences
```
|
{
"source": "JerryJack121/Automatic-optical-defect-detection",
"score": 3
}
|
#### File: JerryJack121/Automatic-optical-defect-detection/my_dataloader.py
```python
from torch.utils.data.dataset import Dataset
from PIL import Image
class TestDataset(Dataset):
def __init__(self, img_list, transform):
super(TestDataset, self).__init__()
self.img_list = img_list
self.transform = transform
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
img = Image.open(self.img_list[index]).convert('RGB')
trans_img = self.transform(img)
return trans_img
```
|
{
"source": "JerryJack121/csv-data-preprocessing",
"score": 3
}
|
#### File: csv-data-preprocessing/prediction_for_lily_price_and_volume/utils.py
```python
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
#將前n天的資料作為訓練特徵,當天的資料作為Label。
def generate_df_affect_by_n_days(col_name, series, n, mode):
df = pd.DataFrame()
for i in range(n):
df['%s_d%d'%(col_name, i)] = series.tolist()[i:-(n - i)] #tolist解決index的問題
if not mode == 'test':
df['y_%s'%col_name] = series.tolist()[n:]
return df
#載入資料集
def read_data(path_csv, cloumn, n , path_lastyear_csv, train_end):
df = pd.read_csv(path_csv, encoding='utf-8')
df_col = df[cloumn].astype(float)
if path_lastyear_csv: #用於產生測試資料集
lastyear_df = pd.read_csv(path_lastyear_csv, encoding='utf-8')[-n:]
last_df_col = lastyear_df[cloumn].astype(float)
df_col = pd.concat([last_df_col,df_col],axis=0, ignore_index=True)
test_df = generate_df_affect_by_n_days(cloumn, df_col, n, mode ='test')
return test_df
train_series, test_series = df_col[:train_end], df_col[train_end - n:]
train_df = generate_df_affect_by_n_days(cloumn, train_series, n, mode='train')
test_df = generate_df_affect_by_n_days(cloumn, test_series, n, mode='valid')
return train_df, test_df
def read_col_data(path_csv, cloumn, n , path_lastyear_csv=None, train_end=None):
if train_end: #訓練、驗證集
train_df = pd.DataFrame()
val_df = pd.DataFrame()
for col in cloumn:
train_col_df, val_col_df = read_data(path_csv, col, n, path_lastyear_csv, train_end) # shape = (train_end-n)*(n+1)
train_df = pd.concat([train_df,train_col_df],axis=1)
val_df = pd.concat([val_df,val_col_df],axis=1)
return train_df, val_df
else: #測試集
test_df = pd.DataFrame()
for col in cloumn:
test_col_df = read_data(path_csv, col, n, path_lastyear_csv, train_end) # shape = (train_end-n)*(n+1)
test_df = pd.concat([test_df,test_col_df],axis=1)
return test_df
# 切分特徵、label
def split_xy(df, num_col, n):
arr = np.array(df)
for i in range(num_col):
if i == 0:
x = arr[:, 0:n]
y = arr[:, n].reshape(-1,1)
else:
x = np.concatenate((x, arr[:, (n+1)*i:(n+1)*(i+1)-1]), axis=1)
y = np.concatenate((y, arr[:, (n+1)*(i+1)-1].reshape(-1,1)), axis=1)
return x, y
class Setloader(Dataset):
def __init__(self, data, label):
# self.data, self.label = data[:, :-1].float(), data[:, -1].float()
self.data = data
self.label = label
def __getitem__(self, index):
return self.data[index], self.label[index]
def __len__(self):
return len(self.data)
```
|
{
"source": "JerryJack121/Digital_Twin_Solutions_for_Smart_Farming_Competition",
"score": 3
}
|
#### File: Digital_Twin_Solutions_for_Smart_Farming_Competition/utils/Setloader.py
```python
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
class Setloader(Dataset):
def __init__(self, data, label):
super(Setloader, self).__init__()
self.data = data
self.label = label
def __getitem__(self, index):
return self.data[index], self.label[index]
def __len__(self):
return self.data.shape[0]
class TestSetloader(Dataset):
def __init__(self, data):
super(TestSetloader, self).__init__()
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.data.shape[0]
```
|
{
"source": "JerryJack121/Django",
"score": 3
}
|
#### File: JerryJack121/Django/classify.py
```python
import os
import shutil
def Price_classify(read_folder,write_folder):
i=0
j=0
allFileList = os.listdir(read_folder)
for filename in allFileList:
file = open(os.path.join(read_folder,filename),encoding="utf-8")
text = []
#讀取資料日期
for line in file:
text.append(line)
pos = line.find('/')
pos2 = line.find('\n')
pos4 = line.find(':')
if pos != -1: #如果那行有'/'
month = line[pos+1 : pos+3]
date = line[pos+4 : pos2]
#print('month=',month)
#print('date=',date)
elif pos4 == -1: #如果沒有'/'代表不是第一行,但沒有':',代表魚種那行
fish = line[: line.find('\n')]
#print('fish=',fish)
file.close()
write_Path = os.path.join(write_folder,month,date)
if os.path.isfile(os.path.join(write_Path,filename))==False:
shutil.move(os.path.join(read_folder,filename),write_Path)
i=i+1
else:
os.remove(os.path.join(read_folder, filename))
j=j+1
print('Price新增',i,'筆資料')
print('Price重複',j,'筆資料')
def Count_classify(read_folder,write_folder):
i=0
j=0
# =============================================================================
# allFileList = os.listdir(read_folder)
# for filename in allFileList:
# month = int(filename[filename.find('年')+1 : filename.find('月')])
# date = int(filename[filename.find('月')+1 : filename.find('號')])
# if(month < 10):
# month = '0' +str(month)
# if(date < 10):
# date = '0' +str(date)
# #print(month,'月',date,'號')
# write_Path = os.path.join(write_folder,str(month),str(date))
# if os.path.isfile(os.path.join(write_Path,filename))==False:
# shutil.move(os.path.join(read_folder,filename),write_Path)
# i=i+1
# else:
# os.remove(os.path.join(read_folder,filename))
# =============================================================================
allFileList = os.listdir(read_folder)
for filename in allFileList:
month = filename[filename.find('-')+1:filename.find('-')+3]
date = filename[filename.find('-')+4:filename.find('-')+6]
write_Path = os.path.join(write_folder,month,date,filename)
#print(write_Path)
if os.path.isfile(write_Path)==False:
shutil.move(os.path.join(read_folder,filename),write_Path)
i=i+1
else:
os.remove(os.path.join(read_folder,filename))
j=j+1
print('Count新增',i,'筆資料')
print('Count重複',j,'筆資料')
#Count_classify('./data/Count', 'data_classification/Count')
```
|
{
"source": "JerryJack121/House_price_regression",
"score": 3
}
|
#### File: JerryJack121/House_price_regression/HousePrices_Regression(Howard).py
```python
import pandas as pd
import numpy as np
# import keras's Sequential model
from keras.models import Sequential
#from keras.layers.advanced_activations import PReLU
from keras import optimizers
from keras.layers import Dense,Dropout
from sklearn import preprocessing
np.random.seed(10)
# read data
data_train = pd.read_csv(r'D:\DATASET\ntut-ml-regression-2020\train-v3.csv')
x_train = data_train.drop(['price','id'],axis=1).values
y_train = data_train['price'].values
#y_train = y_train.reshape((-1, 1))
data_valid = pd.read_csv(r'D:\DATASET\ntut-ml-regression-2020\valid-v3.csv')
x_valid = data_valid.drop(['price','id'],axis=1).values
y_valid = data_valid['price'].values
#y_valid = y_valid.reshape((-1, 1))
data_test = pd.read_csv(r'D:\DATASET\ntut-ml-regression-2020\test-v3.csv')
x_test = data_test.drop(['id'],axis=1).values
#test_id = data_valid['id'].values
#print(x_test[:1])
sqft_living = data_test['sqft_living'].values
# data normalize use sklearn Preprocess model
"""
min_max_scaler = preprocessing.MinMaxScaler()
x_train_minmax = min_max_scaler.fit_transform(x_train)
x_valid_minmax = min_max_scaler.fit_transform(x_valid)
x_test_minmax = min_max_scaler.fit_transform(x_test)
"""
X_train = preprocessing.scale(x_train)
X_valid = preprocessing.scale(x_valid)
X_test = preprocessing.scale(x_test)
print('x_train=',x_train.shape)
# create keras's Sequential model
model = Sequential()
model.add(Dense(units=80,input_dim=x_train.shape[1], kernel_initializer='random_normal',activation='relu'))
model.add(Dense(units=100, kernel_initializer='random_normal',activation='relu'))
model.add(Dense(units=400, kernel_initializer='random_normal',activation='relu'))
model.add(Dense(units=200, kernel_initializer='random_normal',activation='relu'))
model.add(Dense(units=100, kernel_initializer='random_normal',activation='relu'))
model.add(Dense(units=80, kernel_initializer='random_normal',activation='relu'))
# output layer
model.add(Dense(units=1, kernel_initializer='random_normal'))
adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=0.0)
print(model.summary())
model.compile(loss='MAE',optimizer=adam)
#sgd = SGD(lr=0.1, decay=1e-6, momentum=0.5, nesterov=True)
#model.compile(loss='MAE',optimizer='sgd')
train_history = model.fit(x=X_train,y=y_train,validation_data=(X_valid,y_valid),epochs=200,batch_size=500)
Y_predict = model.predict(X_test)
np.savetxt('D:/Doucuments/GitHub/House_price_regression/test1127.csv',Y_predict,delimiter=',')
#output = np.column_stack((test_id, Y_predict))
#np.savetxt('test200.csv', output, delimiter=',', header='id,price', comments='')
# Using pyplot
import matplotlib.pyplot as plt
def show_train_history(train_history, train, validation):
plt.plot(train_history.history[train])
plt.plot(train_history.history[validation])
plt.title('Train History')
plt.ylabel('loss')
plt.xlabel('Epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
show_train_history(train_history, 'loss', 'val_loss')
```
#### File: House_price_regression/Net/model.py
```python
import torch.nn as nn
import torch
from tensorboardX import SummaryWriter
from torchsummary import summary
class Net(nn.Module):
def __init__(self, features):
super(Net, self).__init__()
self.layer0 = nn.Sequential(nn.Linear(features, 16), nn.ReLU(),nn.BatchNorm1d(16))
self.layer1 = nn.Sequential(nn.Linear(16, 32), nn.ReLU())
self.dropout1 = nn.Dropout(p=0.25)
self.layer2 = nn.Sequential(nn.Linear(32, 64), nn.ReLU())
self.dropout2 = nn.Dropout(p=0.25)
self.layer3 = nn.Sequential(nn.Linear(64, 128), nn.ReLU())
self.dropout3 = nn.Dropout(p=0.25)
self.layer4 = nn.Sequential(nn.Linear(128, 256), nn.ReLU())
self.dropout4 = nn.Dropout(p=0.25)
self.layer5 = nn.Sequential(nn.Linear(256, 512), nn.ReLU())
self.layer6 = nn.Sequential(nn.Linear(512, 1024), nn.ReLU())
self.layer7 = nn.Sequential(nn.Linear(1024, 1024), nn.ReLU())
self.layer8 = nn.Sequential(nn.Linear(1024, 1024), nn.ReLU())
self.layer9 = nn.Sequential(nn.Linear(1024, 1024), nn.ReLU())
self.layer10 = nn.Sequential(nn.Linear(1024, 256), nn.ReLU())
self.layer11 = nn.Sequential(nn.Linear(256, 64), nn.ReLU())
self.layer12 = nn.Sequential(nn.Linear(64, 16), nn.ReLU())
self.layer13 = nn.Sequential(nn.Linear(16, 1), nn.ReLU())
def forward(self, x):
y_pred = self.layer0(x)
y_pred = self.layer1(y_pred)
# y_pred = self.dropout1(y_pred)
y_pred = self.layer2(y_pred)
# y_pred = self.dropout2(y_pred)
y_pred = self.layer3(y_pred)
# y_pred = self.dropout3(y_pred)
y_pred = self.layer4(y_pred)
# y_pred = self.dropout4(y_pred)
y_pred = self.layer5(y_pred)
y_pred = self.layer6(y_pred)
y_pred = self.layer7(y_pred)
y_pred = self.layer8(y_pred)
y_pred = self.layer9(y_pred)
y_pred = self.layer10(y_pred)
y_pred = self.layer11(y_pred)
y_pred = self.layer12(y_pred)
y_pred = self.layer13(y_pred)
return y_pred
class Howard(nn.Module):
def __init__(self, features):
super(Howard, self).__init__()
self.linear_relu1 = nn.Linear(features, 64)
self.linear_relu2 = nn.Linear(64, 256)
self.linear_relu3 = nn.Linear(256, 256)
self.linear_relu4 = nn.Linear(256, 256)
self.linear_relu5 = nn.Linear(256, 256)
self.linear_relu6 = nn.Linear(256, 256)
self.linear_relu7 = nn.Linear(256, 256)
self.linear_relu8 = nn.Linear(256, 256)
self.linear_relu9 = nn.Linear(256, 256)
self.linear_relu10 = nn.Linear(256, 256)
self.linear_relu11 = nn.Linear(256, 256)
self.linear_relu12 = nn.Linear(256, 256)
self.linear_relu13 = nn.Linear(256, 256)
self.linear_relu14 = nn.Linear(256, 16)
self.linear_relu15 = nn.Linear(16, features)
self.linear_relu16 = nn.Linear(features, 1)
def forward(self, x):
y_pred = self.linear_relu1(x)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu2(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu3(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu4(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu5(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu6(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu7(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu8(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu9(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu10(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu11(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu12(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu13(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu14(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu15(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu16(y_pred)
return y_pred
class JackNet(nn.Module):
def __init__(self, features):
super(JackNet, self).__init__()
self.layer0 = nn.Sequential(nn.Linear(features, 128), nn.ReLU())
self.layer1 = nn.Sequential(nn.Linear(128, 256), nn.ReLU())
self.dropout1 = nn.Dropout(p=0.25)
self.layer2 = nn.Sequential(nn.Linear(256, 256), nn.ReLU())
self.dropout2 = nn.Dropout(p=0.25)
self.layer3 = nn.Sequential(nn.Linear(256, 256), nn.ReLU())
self.dropout3 = nn.Dropout(p=0.25)
self.layer4 = nn.Sequential(nn.Linear(256, 256), nn.ReLU())
self.dropout4 = nn.Dropout(p=0.25)
self.layer5 = nn.Sequential(nn.Linear(256, 256), nn.ReLU())
self.layer6 = nn.Sequential(nn.Linear(256, 128), nn.ReLU())
self.layer7 = nn.Sequential(nn.Linear(128, 1))
def forward(self, x):
y_pred = self.layer0(x)
y_pred = self.layer1(y_pred)
# y_pred = self.dropout1(y_pred)
y_pred = self.layer2(y_pred)
# y_pred = self.dropout2(y_pred)
y_pred = self.layer3(y_pred)
# y_pred = self.dropout3(y_pred)
y_pred = self.layer4(y_pred)
# y_pred = self.dropout4(y_pred)
y_pred = self.layer5(y_pred)
y_pred = self.layer6(y_pred)
y_pred = self.layer7(y_pred)
# y_pred = self.layer8(y_pred)
# y_pred = self.layer9(y_pred)
# y_pred = self.layer10(y_pred)
# y_pred = self.layer11(y_pred)
# y_pred = self.layer12(y_pred)
return y_pred
class fusion_net(nn.Module):
def __init__(self, features):
super(fusion_net, self).__init__()
self.layer1 = nn.Sequential(nn.Linear(features, 8), nn.ReLU())
self.layer2 = nn.Sequential(nn.Linear(8, 8), nn.ReLU())
self.layer3 = nn.Sequential(nn.Linear(8, 4), nn.ReLU())
self.layer4 = nn.Sequential(nn.Linear(4, 2), nn.ReLU())
self.layer5 = nn.Sequential(nn.Linear(2, 1))
def forward(self, x):
y_pred = self.layer1(x)
y_pred = self.layer2(y_pred)
y_pred = self.layer3(y_pred)
y_pred = self.layer4(y_pred)
y_pred = self.layer5(y_pred)
return y_pred
if __name__ == "__main__":
#畫出模型架構
x = torch.rand(1, 5).cuda()
model = fusion_net(5).cuda()
summary(model, (1,5))
with SummaryWriter(comment='Net') as w:
w.add_graph(model, x)
```
#### File: JerryJack121/House_price_regression/preprocessing.py
```python
import pandas as pd
import torch
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder, StandardScaler
def one_hot(df, colNames):
for col in colNames:
labelencoder = LabelEncoder()
df[col] = labelencoder.fit_transform(df[col])
col_dummies = pd.get_dummies(df[col])
col_dummies.columns = [
col + str(i + 1) for i in range(col_dummies.shape[1])
]
df = pd.concat([df, col_dummies], axis=1)
df.drop(colNames, axis=1, inplace=True)
return df
def PCA_function(all_features):
pca = PCA(n_components=0.99)
pca_features = pca.fit_transform(all_features)
return pca_features
train_data = pd.read_csv(r'D:\DATASET\ntut-ml-regression-2020\train-v3.csv')
val_data = pd.read_csv(r'D:\DATASET\ntut-ml-regression-2020\valid-v3.csv')
test_data = pd.read_csv(r'D:\DATASET\ntut-ml-regression-2020\test-v3.csv')
# 將所有資料合併做標準化
all_features = pd.concat(
(train_data.iloc[:, 2:], val_data.iloc[:, 2:], test_data.iloc[:, 1:]))
all_features = one_hot(all_features, ['sale_yr', 'sale_month', 'sale_day'])
## normalized
scaler = StandardScaler()
all_features = scaler.fit_transform(all_features)
# all_features = all_features.apply(lambda x: (x - x.mean()) / (x.std()))
pca_features = all_features
# pca_features = PCA_function(all_features)
# 分割訓練資料與測試
n_train = train_data.shape[0]
n_val = val_data.shape[0]
train_features = torch.tensor(pca_features[:n_train], dtype=torch.float)
val_features = torch.tensor(pca_features[n_train:n_train + n_val],
dtype=torch.float)
test_features = torch.tensor(pca_features[n_train + n_val:], dtype=torch.float)
train_labels = torch.tensor(train_data.price.values,
dtype=torch.float).view(-1, 1)
val_labels = torch.tensor(val_data.price.values, dtype=torch.float).view(-1, 1)
```
|
{
"source": "JerryJack121/Sound_features",
"score": 3
}
|
#### File: JerryJack121/Sound_features/augment.py
```python
import os
from cv2 import cv2
from PIL import Image
import numpy as np
import argparse
# 定义旋转rotate函数
def rotate(image, angle, center=None, scale=1.0):
# 获取图像尺寸
(h, w) = image.shape[:2]
# 若未指定旋转中心,则将图像中心设为旋转中心
if center is None:
center = (w / 2, h / 2)
# 执行旋转
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# 返回旋转后的图像
return rotated
# input_path
img_path = r'D:\dataset\automatic-optical-defect-detection\generate_dataset\train' # 輸入資料夾
# output_path
out_path = r'D:\dataset\automatic-optical-defect-detection\generate_dataset\rotate_train'
for fold in os.listdir(img_path):
fold_path = os.path.join(img_path, fold)
for img_name in os.listdir(fold_path):
img_path = os.path.join(fold_path, img_name)
img = =cv2.imread(img_path)
#圖片旋轉
for degree in (-90, 90, 15):
img_rotate = rotate(img , degree)
cv2.imshow(img_name, img_rotate)
cv2.waitKey(0)
```
|
{
"source": "JerryJack121/Speech_Recognition-PyTorch",
"score": 2
}
|
#### File: Speech_Recognition-PyTorch/utils/dataloader_bg.py
```python
from torch.utils.data import DataLoader
from prefetch_generator import BackgroundGenerator
class DataLoaderX(DataLoader):
def __iter__(self):
return BackgroundGenerator(super().__iter__())
```
|
{
"source": "jerry-jho/PythonLoaderInterface",
"score": 3
}
|
#### File: PythonLoaderInterface/_run/mytest.py
```python
def hello_python(a,b):
print "a=%s,b=%s" % (a,b)
return (a+b,)
```
|
{
"source": "JerryJiaGit/picture2openscad.py",
"score": 3
}
|
#### File: JerryJiaGit/picture2openscad.py/picture2openscad.py
```python
import cv2
import numpy as np
from itertools import islice
class picture2openscad(object):
"""This is a class that will be used for OpenSCAD 3D object code generation from a 2D picture.
"""
def __init__(self, debug_mode=None):
"""
Class constructor.
picture_x_limit, x limitation, default 500
picture_y_limit, y limitation, default 500
picture_limit_scale , auto scale if size larger than limitation, default False
Inputs:
optional:
debug mode - 1 if we want huge debug spew
Return:
Remote Agent handle
"""
if debug_mode:
self.debug_mode = 1
else:
self.debug_mode = 0
self.picture_x_limit = 500
self.picture_y_limit = 500
self.picture_limit_scale = False
self.scad_script_content = []
self.scad_output_sign = '// This is OpenSCAD script generated by picture2openscad tool v1.3. \n// Please do not remove sign and check more information from https://github.com/JerryJiaGit/picture2openscad.py \n'
#self.binary_threshold = 0
def EnableDebug(self):
"""Externally callable function to enable debug mode.
"""
self.debug_mode = 1
def DisableDebug(self):
"""Externally callable function to disable debug mode.
"""
self.debug_mode = 0
def ExportScad(self, scad_filename = "output.scad"):
"""Export OpenScad output file name
Keyword Arguments:
scad_filename {str} -- (default: {"output.scad"})
"""
with open(scad_filename, "w") as scad_output:
scad_output.seek(0, 0)
scad_output.write(self.scad_output_sign)
for script_line in self.scad_script_content:
scad_output.write(script_line + '\n')
return
def ImportPicture(self,picture_filename, picture_color_space="GRAY",picture_color_invert=True, picture_flip= "NONE",picture_norm_type="NORM_MINMAX" ):
"""This function will import image from picture_filename, support common picture format.
Keyword Arguments:
picture filename
picture_color_space , color space of picture, default "GRAY", can be "BGR"
picture_color_invert , invert gray, default True
picture_flip = NONE , do picture flip default NONE disable, set to VERT -> vertical, HORI -> horizontal, VERT_HORI -> vertical and horizontal
picture_norm_type="NORM_MINMAX" , do picture normalization default NORM_MINMAX, NORM_INF, NORM_L1, NORM_L2
Output: image object in gray range[0:1]
"""
try:
im = cv2.imread(picture_filename)
except:
if self.debug_mode: print("Error: ImportPicture error with opening file " + inputfilename)
return
InputPicture_shape = im.shape
if InputPicture_shape[0] > self.picture_y_limit or InputPicture_shape[1] > self.picture_x_limit:
if self.debug_mode: print("Warning: ImportPicture size is larger than limit " + str(self.picture_x_limit) +"," + str(self.picture_y_limit) + ",auto scale is " + str(self.picture_limit_scable))
if self.picture_limit_scale:
if InputPicture_shape[0] < InputPicture_shape[1]:
cv2.resize(src=im, dst=im, dsize=[self.picture_x_limit, self.picture_x_limit/InputPicture_shape[1]*InputPicture_shape[0]], interpolation=cv2.INTER_AREA)
if InputPicture_shape[0] > InputPicture_shape[1]:
cv2.resize(src=im, dst=im, dsize=[self.picture_y_limit/InputPicture_shape[0]*InputPicture_shape[1], self.picture_u_limit], interpolation=cv2.INTER_AREA)
ScaleInputPicture_shape = im.shape
# picture color space convert
if len(ScaleInputPicture_shape)==3 and picture_color_space == "GRAY":
if ScaleInputPicture_shape[2]==3: im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# flip
if picture_flip == "VERT": im = cv2.flip(im,0) # flip
if picture_flip == "HORI": im = cv2.flip(im,1) # flip
if picture_flip == "VERT_HORI": im = cv2.flip(im,-1) # flip
# invert
if picture_color_invert: im = abs(im-255) # gray invert
im = np.float32(im)
# scale and shift by NORM_MINMAX
if picture_norm_type == "NORM_MINMAX":
cv2.normalize(im, dst=im, alpha=0, beta=1.0, norm_type=cv2.NORM_MINMAX)
if self.debug_mode:cv2.imshow("NORM_MINMAX", np.uint8(im*255))
# scale and shift by NORM_INF
if picture_norm_type == "NORM_INF":
cv2.normalize(im, dst=im, alpha=1.0, beta=0, norm_type=cv2.NORM_INF)
if self.debug_mode:cv2.imshow("NORM_INF", np.uint8(im*255))
# scale and shift by NORM_L1
if picture_norm_type == "NORM_L1":
cv2.normalize(im, dst=im, alpha=1.0, beta=0, norm_type=cv2.NORM_L1)
if self.debug_mode:cv2.imshow("NORM_L1", np.uint8(im*10000000))
# scale and shift by NORM_L2
if picture_norm_type == "NORM_L2":
cv2.normalize(im, dst=im, alpha=1.0, beta=0, norm_type=cv2.NORM_L2)
if self.debug_mode:cv2.imshow("NORM_L2", np.uint8(im*10000))
if self.debug_mode:
cv2.waitKey(0)
cv2.destroyAllWindows()
return im
def ScatterCube(self, shape= [1,1,1], translate = [0,0,0], rotation = [0,0,0], colorcode=[0.3,0.3,0.3,0.8], indextext = None):
"""This function will create a scatter cube with different size, translate, rotation, colors - 13-D
Keyword Arguments:
dimensions {[type]} -- [description] (default: {None})
shape {list} -- [description] (default: {[1,1,1]})
translate {list} -- [description] (default: {[0,0,0]})
rotation {list} -- [description] (default: {[0,0,0]})
colorcode {list} -- [description] (default: {[0.3,0.3,0.3,0.8]})
indextext {type} -- [] (default: {None})
"""
scad_script_color = "color(c=["+ str(colorcode[0]) +","+ str(colorcode[1]) +","+ str(colorcode[2])+","+ str(colorcode[3])+"])"
self.scad_script_content.append(scad_script_color)
scad_script_rotate = "rotate(a=["+ str(rotation[0]) +","+ str(rotation[1]) +","+ str(rotation[2])+"])"
self.scad_script_content.append(scad_script_rotate)
scad_script_translate = "translate([" + str(translate[0]) + "," + str(translate[1]) + ","+str(translate[2])+"])"
self.scad_script_content.append(scad_script_translate)
scad_script_pix2cube = "cube(["+str(shape[0])+","+str(shape[1])+","+str(shape[2])+"],center = false);"
self.scad_script_content.append(scad_script_pix2cube)
if indextext is not None:
scad_script_text = 'text("' + str(indextext) + '", size = 0.1, font = "Liberation Sans:style=Bold Italic");'
scad_script_linear_extrude = "linear_extrude(height=0.01)"
scad_script_translate = "translate([" + str(translate[0]) + "," + str(translate[1]) + ","+str(translate[2]-0.1)+"])"
scad_script_rotate = "rotate(a=["+ str(90) +","+ str(0) +","+ str(0)+"])"
scad_script_linear_extrude = "linear_extrude(height=0.01)"
scad_script_color = "color(c=["+ str(.1) +","+ str(.1) +","+ str(.1)+","+ str(.6)+"])"
self.scad_script_content.append(scad_script_color+scad_script_translate+scad_script_rotate+scad_script_linear_extrude+scad_script_text)
return self.scad_script_content
def ScatterSphere(self, shape= [1,1,1], translate = [0,0,0], rotation = [0,0,0], colorcode=[0.3,0.3,0.3,0.8], indextext = None):
"""This function will create a scatter cube with different size, translate, rotation, colors - 13-D
Keyword Arguments:
size {list} -- [size[0] is Diameter and size[1] is a full circle rendered using this number of fragments] (default: {[1,12]})
translate {list} -- [description] (default: {[0,0,0]})
rotation {list} -- [description] (default: {[0,0,0]})
colorcode {list} -- [description] (default: {[0.3,0.3,0.3,0.8]})
indextext {type} -- [] (default: {None})
Returns:
[type] -- [description]
"""
scad_script_color = "color(c=["+ str(colorcode[0]) +","+ str(colorcode[1]) +","+ str(colorcode[2])+","+ str(colorcode[3])+"])"
self.scad_script_content.append(scad_script_color)
scad_script_rotate = "rotate(a=["+ str(rotation[0]) +","+ str(rotation[1]) +","+ str(rotation[2])+"])"
self.scad_script_content.append(scad_script_rotate)
scad_script_translate = "translate([" + str(translate[0]) + "," + str(translate[1]) + ","+str(translate[2])+"])"
self.scad_script_content.append(scad_script_translate)
scad_script_pix2sphere = "sphere(d="+str(shape[0])+",$fa="+str(shape[1])+",$fs="+str(shape[2])+",$fn=0);"
self.scad_script_content.append(scad_script_pix2sphere)
if indextext is not None:
scad_script_text = 'text("' + str(indextext) + '", size = 0.1, font = "Liberation Sans:style=Bold Italic");'
scad_script_linear_extrude = "linear_extrude(height=0.01)"
scad_script_translate = "translate([" + str(translate[0]) + "," + str(translate[1]) + ","+str(translate[2]-0.1)+"])"
scad_script_rotate = "rotate(a=["+ str(90) +","+ str(0) +","+ str(0)+"])"
scad_script_linear_extrude = "linear_extrude(height=0.01)"
scad_script_color = "color(c=["+ str(.1) +","+ str(.1) +","+ str(.1)+","+ str(.6)+"])"
self.scad_script_content.append(scad_script_color+scad_script_translate+scad_script_rotate+scad_script_linear_extrude+scad_script_text)
return self.scad_script_content
def ScatterChart(self, dimensions, index=False):
"""This function will create a scatter chart
Arguments:
dimensions {[type]} -- [description]
index {bool} -- [] (default: {False})
"""
if len(dimensions[0])==14:
length_split = [1, 3, 3, 3,4]
i = 0
for scatter in dimensions:
if index: i += 1
else: i = None
scatter = iter(scatter)
splitlist = [list(islice(scatter, elem)) for elem in length_split]
if splitlist[0][0] == 1:
self.ScatterCube(shape= splitlist[1], translate = splitlist[2], rotation = splitlist[3], colorcode=splitlist[4], indextext = i)
elif splitlist[0][0] == 2:
self.ScatterSphere(shape= splitlist[1], translate = splitlist[2], rotation = splitlist[3], colorcode=splitlist[4], indextext = i)
else:
return self.scad_script_content
return self.scad_script_content
def PixelCubeZDepth(self, im_src, offset=[0,0,0], translate = [1,1,0], pixelcube = [.2,.2,.2], zdepth = 5, exclude_threshold = [0], color_mode="GRAY", color_alpha=1):
"""This function will create cube based image from ImportPicture()
Keyword Arguments:
im_src
offset=[0,0,0] ,final pixel location offset
translate = [1,1,0] ,final pixel cube scale, z=0 means cube z will depends on ZDepth, otherwise z location will be controlled by picture color
pixelcube = [.2,.2,.2] ,pixel cube size
zdepth = 5 ,zdepth will control cube z length, zdepth=0 meas cube z depth will not controlled by picture color
exclude_threshold = [0,0,0] ,color excluded to build cube, default is 0
color_mode="GRAY" ,color mode, GRAY means will color cube based on picture color
Output: self.scad_script_content {list}
"""
pixel_index = 0
Y = 0
#print(np.array(im_src[100][150]))
for y in range(0,im_src.shape[0]):
for x in range(0,im_src.shape[1]):
if not np.array_equal(np.array(im_src[y][x]),np.array(exclude_threshold)): # exclude_threshold
if color_mode == "GRAY":
if len(im_src.shape)==3:
if im_src.shape[2]==3:
Y=0.299 * im_src[y][x][2] + 0.587 * im_src[y][x][1] + 0.114 * im_src[y][x][0]
r,g,b=Y,Y,Y
a=color_alpha
if im_src.shape[2]==1:
Y=im_src[y][x]
r,g,b=Y,Y,Y
a=color_alpha
if len(im_src.shape)==2:
Y=im_src[y][x]
r,g,b=Y,Y,Y
a=color_alpha
scad_script_color = "color( c = ["+str(abs(r))+","+str(abs(g))+","+str(abs(b))+","+str(abs(a))+"])"
self.scad_script_content.append(scad_script_color)
if color_mode == "BGR":
if len(im_src.shape)==3:
if im_src.shape[2]==3:
Y=0.299 * im_src[y][x][2] + 0.587 * im_src[y][x][1] + 0.114 * im_src[y][x][0]
b,g,r=im_src[y][x]
a=color_alpha
if im_src.shape[2]==1:
Y=im_src[y][x]
r,g,b=Y,Y,Y
a=color_alpha
if len(im_src.shape)==2:
Y=im_src[y][x]
r,g,b=Y,Y,Y
a=color_alpha
scad_script_color = "color( c = ["+str(abs(r))+","+str(abs(g))+","+str(abs(b))+","+str(abs(a))+"])"
self.scad_script_content.append(scad_script_color)
if color_mode == "BGRA":
if len(im_src.shape)==3:
if im_src.shape[2]==4:
Y=0.299 * im_src[y][x][2] + 0.587 * im_src[y][x][1] + 0.114 * im_src[y][x][0]
b,g,r,a=im_src[y][x]
scad_script_color = "color( c = ["+str(abs(r))+","+str(abs(g))+","+str(abs(b))+","+str(abs(a))+"])"
self.scad_script_content.append(scad_script_color)
translate_x = x * translate[0] + offset[0]
translate_y = y * translate[1] + offset[1]
translate_z = (1-Y)*translate[2] + offset[2]
cube_x = pixelcube[0]
cube_y = pixelcube[1]
if zdepth == 0: cube_z = pixelcube[2]
else: cube_z = pixelcube[2]*(1-Y) * zdepth
scad_script_translate = "translate([" + str(translate_x) + "," + str(translate_y) + ","+str(translate_z)+"])"
self.scad_script_content.append(scad_script_translate)
scad_script_pix2cube = "cube(["+str(cube_x)+","+str(cube_y)+","+str(cube_z)+"],center = false);"
self.scad_script_content.append(scad_script_pix2cube)
pixel_index = pixel_index + 1
return self.scad_script_content
def ModelUnion(self, script_content = ""):
"""This function will add union for OpenScad models at first line.
Keyword Arguments:
script_content {list}, default: self.scad_script_content
Output: self.scad_script_content {list}
"""
if script_content == "": script_content = self.scad_script_content
UnionCode = "union(){"
script_content.insert(0, UnionCode)
script_content.append("}")
self.scad_script_content = script_content
return self.scad_script_content
def ModelRotate(self, script_content = "", rotate_a=[0,0,0]):
"""This function will add rotate for OpenScad models at first line.
Keyword Arguments:
rotate_a {list}, default: [0,0,0]
script_content {list}, default: self.scad_script_content
Output: self.scad_script_content {list}
"""
if script_content == "": script_content = self.scad_script_content
RotateCode = "rotate(a=["+ str(rotate_a[0]) +","+ str(rotate_a[1]) +","+ str(rotate_a[2])+"]){"
script_content.insert(0, RotateCode)
script_content.append("}")
self.scad_script_content = script_content
return self.scad_script_content
def ModelScale(self, script_content = "", scale_v=[1,1,1]):
"""This function will add scale for OpenScad models at first line.
Keyword Arguments:
scale_v {list}, default: [1,1,1]
script_content {list}, default: self.scad_script_content
Output: self.scad_script_content {list}
"""
if script_content == "": script_content = self.scad_script_content
ScaleCode = "scale(v=["+ str(scale_v[0]) +","+ str(scale_v[1]) +","+ str(scale_v[2])+"]){"
script_content.insert(0, ScaleCode)
script_content.append("}")
self.scad_script_content = script_content
return self.scad_script_content
# def ModelResize(self, script_content = "", resize_newsize=[1,1,1]):
# """This function will add scale for OpenScad models at first line.
# Keyword Arguments:
# resize_newsize {list}, default: [1,1,1]
# script_content {list}, default: self.scad_script_content
# Output: self.scad_script_content {list}
# """
# if script_content == "": script_content = self.scad_script_content
# ResizeCode = "resize(newsize=["+ str(resize_newsize[0]) +","+ str(resize_newsize[1]) +","+ str(resize_newsize[2])+"]){"
# script_content.insert(0, ResizeCode)
# script_content.append("}")
# self.scad_script_content = script_content
# return self.scad_script_content
def ModelTranslate(self, script_content = "", translate_v=[1,1,1]):
"""This function will add scale for OpenScad models at first line.
Keyword Arguments:
translate_v {list}, default: [1,1,1]
script_content {list}, default: self.scad_script_content
Output: self.scad_script_content {list}
"""
if script_content == "": script_content = self.scad_script_content
TranslateCode = "translate(v=["+ str(translate_v[0]) +","+ str(translate_v[1]) +","+ str(translate_v[2])+"]){"
script_content.insert(0, TranslateCode)
script_content.append("}")
self.scad_script_content = script_content
return self.scad_script_content
def ModelMirror(self, script_content = "", mirror_v=[1,1,1]):
"""This function will add scale for OpenScad models at first line.
Keyword Arguments:
mirror_v {list}, default: [1,1,1]
script_content {list}, default: self.scad_script_content
Output: self.scad_script_content {list}
"""
if script_content == "": script_content = self.scad_script_content
MirrorCode = "mirror(v=["+ str(mirror_v[0]) +","+ str(mirror_v[1]) +","+ str(mirror_v[2])+"]){"
script_content.insert(0, MirrorCode)
script_content.append("}")
self.scad_script_content = script_content
return self.scad_script_content
def ModelColor(self, script_content = "", color_c=[.2,1,0.2,0.2]):
"""This function will add scale for OpenScad models at first line.
Keyword Arguments:
color_c {list}, if color mode is not GRAY, you can customize cube color [r,g,b,a], default: [.2,1,0.2,0.2]
script_content {list}, default: self.scad_script_content
Output: self.scad_script_content {list}
"""
if script_content == "": script_content = self.scad_script_content
ColorCode = "color(c=["+ str(color_c[0]) +","+ str(color_c[1]) +","+ str(color_c[2])+","+ str(color_c[3])+"]){"
script_content.insert(0, ColorCode)
script_content.append("}")
self.scad_script_content = script_content
return self.scad_script_content
```
|
{
"source": "jerryjiahaha/full-stack-fastapi-postgresql",
"score": 2
}
|
#### File: app/app/worker.py
```python
import sentry_sdk
from sentry_sdk.integrations.celery import CeleryIntegration
from app.core import config
from app.core.celery_app import celery_app
import logging
sentry_sdk.init(config.SENTRY_DSN, integrations=[CeleryIntegration()])
@celery_app.task(acks_late=True)
def test_celery(word: str):
logging.info(f"start test celery {word}")
return f"test task return {word}"
```
|
{
"source": "jerryjiahaha/spe2fits",
"score": 2
}
|
#### File: jerryjiahaha/spe2fits/eventQueue.py
```python
from os import cpu_count
from queue import Queue, Empty
from threading import Thread, Event
from uuid import uuid4 as gen_uid
from warnings import warn
from enum import Enum, unique
from functools import partial
from debug import debug_method_info
@unique
class EventType(Enum):
"""
# parent event: distribute child event
# child event: process one
# feedback event: return child event result
# final event: complete parent work
"""
parent = 1
child = 2
feedback = 3
final = 4
class EventWrapper:
def __init__(self, master, etype, eid, handler, *args, **kwargs):
"""
master: EventQueue
etype: EventType
eid: Unique id for group of events
handler: event handler to be invoked
"""
self._type = etype
self._id = eid
self._master = master
try:
self._handler = partial(handler, *args, **kwargs)
except TypeError:
self._handler = None
def dispatch(self):
if self._handler is not None:
return self._handler()
@property
def getType(self):
return self._type
@property
def getId(self):
return self._id
@unique
class QueueStatus(Enum):
un_init = 0 # un-initialized
init = 1 # distributing event
waiting = 2 # distributed, waiting final
final = 3 # all children is done
class EventQueue:
""" Hold queue of events
call setChildren to set children iter
call startEvents to start
method can be overrided: createQueue, on_started, on_child_process,
on_child_done, on_finished
"""
@debug_method_info()
def __init__(self, children = ()):
self._child_event_count = 0
self._child_event_done_count = 0
self.queue_router.update(self.createQueue())
self.setChildren(children)
self._stop_event = Event()
self._worker_router = {
EventType.parent: self._worker_parent,
EventType.child : self._worker_child,
EventType.feedback: self._worker_feedback,
EventType.final : self._worker_final,
}
@debug_method_info()
def __del__(self):
self._stop_event.set()
for queue in self._queue_router.values():
self.remove_queue(queue)
def remove_queue(self, Q):
# ref: http://stackoverflow.com/questions/6517953/clear-all-items-from-the-queue
with Q.mutex:
for ele in filter(lambda x: x.getId == self.task_id, Q.queue):
Q.queue.remove(ele)
Q.task_done()
def createQueue(self):
""" create alternative Queue, to be overrided
"""
return {}
def _wrap_event(self, etype, handler, *args, **kwargs):
return EventWrapper(self, etype, self.task_id, handler, *args, **kwargs)
@property
def queue_router(self):
if not hasattr(self, "_queue_router"):
self._queue_router = { et: Queue() for et in EventType }
print("queue count:", len(self._queue_router))
return self._queue_router
@property
def state(self):
if not hasattr(self, "_state"):
self._state = QueueStatus.un_init
return self._state
@property
def task_id(self):
if not hasattr(self, "_task_id"):
self._task_id = gen_uid()
return self._task_id
@property
def queue_parent(self):
return self._queue_router[EventType.parent]
@property
def queue_child(self):
return self._queue_router[EventType.child]
@property
def queue_feedback(self):
return self._queue_router[EventType.feedback]
@property
def queue_final(self):
return self._queue_router[EventType.final]
@debug_method_info()
def _worker(self, queue):
while not self._stop_event.is_set():
try:
event = queue.get(timeout=1)
except Empty:
continue
if event.getId == self.task_id:
self._dispatch(event)
queue.task_done()
else:
queue.put(event)
print("quit worker")
@debug_method_info()
def _worker_parent(self, event):
event.dispatch()
@debug_method_info()
def _put_child(self, chid):
self._child_event_count += 1
chandler = self.on_child_process
cevent = self._wrap_event(EventType.child, chandler, chid)
self._queue_router[EventType.child].put(cevent)
@debug_method_info()
def _worker_child(self, event):
result = event.dispatch()
self._put_feedback(result)
@debug_method_info()
def _put_feedback(self, result):
fevent = self._wrap_event(EventType.feedback, self._feedback_process, result)
self.queue_feedback.put(fevent)
@debug_method_info()
def _feedback_process(self, result):
""" check child process
"""
self._child_event_done_count += 1
if self.state == QueueStatus.waiting and \
self._child_event_done_count == self._child_event_count:
self._state = QueueStatus.final
print("all children finished")
self.on_child_done(result)
if self.state == QueueStatus.final:
self._put_final()
@debug_method_info()
def _put_final(self):
fievent = self._wrap_event(EventType.final, self._final_process)
self.queue_final.put(fievent)
@debug_method_info()
def _worker_feedback(self, event):
print("_worker_feedback", event)
event.dispatch()
@debug_method_info()
def _worker_final(self, event):
print("_worker_final", event)
event.dispatch()
@debug_method_info()
def _dispatch(self, event):
self._worker_router[event.getType](event)
@debug_method_info()
def startEvents(self):
""" Process events
"""
if self.state != QueueStatus.un_init:
raise RuntimeError("At present the queue can only be started once!")
self._thread_parent = Thread(target = self._worker, args = ( self.queue_parent,))
self._thread_parent.start()
self._child_worker_count = 1 if cpu_count() <= 1 else cpu_count() - 1
self._thread_children = [
Thread(target = self._worker, args = ( self.queue_child,)) \
for i in range(self._child_worker_count) ]
[ x.start() for x in self._thread_children ]
self._thread_feedback = Thread(target = self._worker, args = ( self.queue_feedback,))
self._thread_feedback.start()
self._thread_final = Thread(target = self._worker, args = ( self.queue_final,))
self._thread_final.start()
self._put_parent()
self.on_started()
@debug_method_info()
def on_started(self):
""" Hook after queue started
"""
pass
@debug_method_info()
def _put_parent(self):
""" Starting queue, put parent event
"""
pevent = self._wrap_event(EventType.parent, self._parent_process)
self.queue_parent.put(pevent)
@debug_method_info()
def _parent_process(self):
""" Distributing child event
"""
self._state = QueueStatus.init
for child in self._children:
self._put_child(child)
self._state = QueueStatus.waiting
def setChildren(self, childIter):
if childIter is None:
self._children = ()
return
try:
self._children = iter(childIter)
except TypeError:
self._children = iter(childIter,)
# def _get_child(self):
# """ Distribute child events ...
# """
# try:
# child = next(self._children)
# yield self._put_child(child)
# except StopIteration:
# return
@debug_method_info()
def on_child_process(self, child):
""" Child event that can be executed concurrently
"""
pass
@debug_method_info()
def on_child_done(self, result):
""" Give feedback when one child process finished
"""
pass
@debug_method_info()
def _final_process(self):
self.on_finished()
self._stop_event.set()
@debug_method_info()
def on_finished(self):
""" Hook when all events are done
"""
pass
```
|
{
"source": "jerryjrchen/jerryjrchen.github.io",
"score": 4
}
|
#### File: assets/files/quiz2.py
```python
def test(f, x):
if f(x) % 2 == 0:
return lambda g, x: g(lambda x: x * x, x)
else:
return f(x)
def tester():
return test(lambda s: s // 2, 20)(test, 7)
# What would display in the interpreter:
# >>> tester()
# ???
# Hint: draw an environment diagram?
```
|
{
"source": "JERRYJUQU/cumulatus",
"score": 2
}
|
#### File: migrations/versions/6d18c7ff8a87_users_and_histories_table.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6d18c7ff8a87'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('age', sa.Integer(), nullable=True),
sa.Column('weight', sa.Integer(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('volume', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_user_email'), ['email'], unique=True)
batch_op.create_index(batch_op.f('ix_user_username'), ['username'], unique=True)
op.create_table('history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=True),
sa.Column('weight', sa.Integer(), nullable=True),
sa.Column('set', sa.Integer(), nullable=True),
sa.Column('reps', sa.Integer(), nullable=True),
sa.Column('weights', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('history', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_history_time'), ['time'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('history', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_history_time'))
op.drop_table('history')
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_user_username'))
batch_op.drop_index(batch_op.f('ix_user_email'))
op.drop_table('user')
# ### end Alembic commands ###
```
#### File: site-packages/wtforms_ext/widgets.py
```python
from wtforms.widgets import HTMLString, html_params, TextArea, Select
from wtforms.compat import text_type
try:
from html import escape
except ImportError:
from cgi import escape
# https://gist.github.com/playpauseandstop/1590178
class ExtendedSelectWidget(Select):
"""Add support of choices with ``optgroup`` to the ``Select`` widget."""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
if self.multiple:
kwargs['multiple'] = True
html = ['<select %s>' % html_params(name=field.name, **kwargs)]
for item1, item2 in field.choices:
if isinstance(item2, (list, tuple)):
group_label = item1
group_items = item2
html.append('<optgroup %s>' % html_params(label=group_label))
for inner_val, inner_label in group_items:
html.append(self.render_option(inner_val, inner_label, inner_val == field.data))
html.append('</optgroup>')
else:
val = item1
label = item2
html.append(self.render_option(val, label, val == field.data))
html.append('</select>')
return HTMLString(''.join(html))
class FilterSelectWidget(Select):
"""Add support of choices with ``display`` to the ``Select`` widget."""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
if self.multiple:
kwargs['multiple'] = True
html = ['<select %s>' % html_params(name=field.name, **kwargs)]
for val, label, display in field.choices:
html.append(self.render_option(val, label, val == field.data, display))
html.append('</select>')
return HTMLString(''.join(html))
@classmethod
def render_option(cls, value, label, selected, display):
if value is True:
# Handle the special case of a 'True' value.
value = text_type(value)
options = dict(value=value)
if selected:
options['selected'] = True
if not display:
options['style'] = 'display: none;'
return HTMLString('<option %s>%s</option>' % (html_params(**options), escape(text_type(label), quote=False)))
class CKTextAreaWidget(TextArea):
def __call__(self, field, **kwargs):
# Add WYSIWYG class to existing classes
existing_classes = kwargs.pop('class', '') or kwargs.pop('class_', '')
kwargs['class'] = u'%s %s' % (existing_classes, "ckeditor")
return super(CKTextAreaWidget, self).__call__(field, **kwargs)
```
|
{
"source": "jerryjvl/confluence-inspector",
"score": 4
}
|
#### File: confluence-inspector/utils/ratelimiter.py
```python
from datetime import datetime, timedelta
from threading import Lock
from time import sleep
ZERO_DURATION = timedelta(0)
def create_ratelimit_waiter(rate_per_second, window_in_seconds = 0):
"""Construct a rate-limiting waiter that will sleep the minimum amount of
time to continue satisfying the specified rate-limiting constraints.
Non-default constraints for "overshoot" and "window" allow extra flexibility
in the short-term request rate; otherwise strict adherence to the stated rate
will be followed.
Args:
rate_per_second (int): Number of requests to allow to pass per second
window_in_seconds (int): Length of time window to amortize request rate over
Returns:
Parameter-less and result-less function that honours the defined rate-limiting
constraints by sleeping the minimum amount of time for them to be satisfied.
"""
ratelimiter = create_ratelimiter(rate_per_second, window_in_seconds)
def waiter():
wait = ratelimiter(datetime.now())
sleep(wait.total_seconds())
return waiter
def create_ratelimiter(rate_per_second, window_in_seconds = 0):
"""Helper-method that builds the underlying rate-limiter function in a testable form.
Args:
rate_per_second (int): Number of requests to allow to pass per second
window_in_seconds (int): Length of time window to amortize request rate over
Returns:
Function taking the 'datetime' a rate-limited event takes place at, and returning
a 'timedelta' describing the amount of delay required to satisfy rate-limiting.
"""
count = 0
last = None
lock = Lock()
max = rate_per_second * window_in_seconds
def _seconds(delta):
return delta.days * 86400 + delta.seconds
def ratelimiter(timestamp):
nonlocal count
nonlocal last
with lock:
if last is None:
last = timestamp
else:
delta = timestamp - last
if delta > ZERO_DURATION:
last += timedelta(delta.days, delta.seconds)
count -= (delta.days * 86400 + delta.seconds) * rate_per_second
if count < 0:
count = 0
result = ZERO_DURATION if count <= max else timedelta(microseconds = ((count - max) * 1000000) // rate_per_second)
count += 1
return result
return ratelimiter
```
|
{
"source": "jerrykan-formulas/exim-formula",
"score": 2
}
|
#### File: exim-formula/tests/test_mailname.py
```python
def test_aliases_file(File):
conf = File('/etc/mailname')
assert conf.content == 'otherhost.example.com\n'
```
|
{
"source": "jerrykan/herder",
"score": 2
}
|
#### File: herder/roundup/actions.py
```python
from roundup.exceptions import *
from roundup import hyperdb
from roundup.i18n import _
class Action:
def __init__(self, db, translator):
self.db = db
self.translator = translator
def handle(self, *args):
"""Action handler procedure"""
raise NotImplementedError
def execute(self, *args):
"""Execute the action specified by this object."""
self.permission(*args)
return self.handle(*args)
def permission(self, *args):
"""Check whether the user has permission to execute this action.
If not, raise Unauthorised."""
pass
def gettext(self, msgid):
"""Return the localized translation of msgid"""
return self.translator.gettext(msgid)
_ = gettext
class Retire(Action):
def handle(self, designator):
classname, itemid = hyperdb.splitDesignator(designator)
# make sure we don't try to retire admin or anonymous
if (classname == 'user' and
self.db.user.get(itemid, 'username') in ('admin', 'anonymous')):
raise ValueError(self._(
'You may not retire the admin or anonymous user'))
# do the retire
self.db.getclass(classname).retire(itemid)
self.db.commit()
def permission(self, designator):
classname, itemid = hyperdb.splitDesignator(designator)
if not self.db.security.hasPermission('Edit', self.db.getuid(),
classname=classname, itemid=itemid):
raise Unauthorised(self._('You do not have permission to '
'retire the %(classname)s class.')%classname)
```
#### File: cgi/PageTemplates/GlobalTranslationService.py
```python
import re
from roundup.cgi.TAL.TALDefs import NAME_RE
ustr = str
class DummyTranslationService:
"""Translation service that doesn't know anything about translation."""
def translate(self, domain, msgid, mapping=None,
context=None, target_language=None, default=None):
def repl(m, mapping=mapping):
return ustr(mapping[m.group(m.lastindex)])
cre = re.compile(r'\$(?:(%s)|\{(%s)\})' % (NAME_RE, NAME_RE))
return cre.sub(repl, default or msgid)
# XXX Not all of Zope.I18n.ITranslationService is implemented.
translationService = DummyTranslationService()
def setGlobalTranslationService(service):
"""Sets the global translation service, and returns the previous one."""
global translationService
old_service = translationService
translationService = service
return old_service
def getGlobalTranslationService():
"""Returns the global translation service."""
return translationService
```
#### File: cgi/ZTUtils/Iterator.py
```python
__doc__='''Iterator class
Unlike the builtin iterators of Python 2.2+, these classes are
designed to maintain information about the state of an iteration.
The Iterator() function accepts either a sequence or a Python
iterator. The next() method fetches the next item, and returns
true if it succeeds.
'''
__docformat__ = 'restructuredtext'
import string
class Iterator:
'''Simple Iterator class'''
__allow_access_to_unprotected_subobjects__ = 1
nextIndex = 0
def __init__(self, seq):
self.seq = iter(seq) # force seq to be an iterator
self._inner = iterInner
self._prep_next = iterInner.prep_next
def __getattr__(self, name):
try:
inner = getattr(self._inner, 'it_' + name)
except AttributeError:
raise AttributeError, name
return inner(self)
def next(self):
if not (hasattr(self, '_next') or self._prep_next(self)):
return 0
self.index = i = self.nextIndex
self.nextIndex = i+1
self._advance(self)
return 1
def _advance(self, it):
self.item = self._next
del self._next
del self.end
self._advance = self._inner.advance
self.start = 1
def number(self): return self.nextIndex
def even(self): return not self.index % 2
def odd(self): return self.index % 2
def letter(self, base=ord('a'), radix=26):
index = self.index
s = ''
while 1:
index, off = divmod(index, radix)
s = chr(base + off) + s
if not index: return s
def Letter(self):
return self.letter(base=ord('A'))
def Roman(self, rnvalues=(
(1000,'M'),(900,'CM'),(500,'D'),(400,'CD'),
(100,'C'),(90,'XC'),(50,'L'),(40,'XL'),
(10,'X'),(9,'IX'),(5,'V'),(4,'IV'),(1,'I')) ):
n = self.index + 1
s = ''
for v, r in rnvalues:
rct, n = divmod(n, v)
s = s + r * rct
return s
def roman(self, lower=string.lower):
return lower(self.Roman())
def first(self, name=None):
if self.start: return 1
return not self.same_part(name, self._last, self.item)
def last(self, name=None):
if self.end: return 1
return not self.same_part(name, self.item, self._next)
def same_part(self, name, ob1, ob2):
if name is None:
return ob1 == ob2
no = []
return getattr(ob1, name, no) == getattr(ob2, name, no) is not no
def __iter__(self):
return IterIter(self)
class InnerBase:
'''Base Inner class for Iterators'''
# Prep sets up ._next and .end
def prep_next(self, it):
it.next = self.no_next
it.end = 1
return 0
# Advance knocks them down
def advance(self, it):
it._last = it.item
it.item = it._next
del it._next
del it.end
it.start = 0
def no_next(self, it):
return 0
def it_end(self, it):
if hasattr(it, '_next'):
return 0
return not self.prep_next(it)
class SeqInner(InnerBase):
'''Inner class for sequence Iterators'''
def _supports(self, ob):
try: ob[0]
except (TypeError, AttributeError): return 0
except: pass
return 1
def prep_next(self, it):
i = it.nextIndex
try:
it._next = it.seq[i]
except IndexError:
it._prep_next = self.no_next
it.end = 1
return 0
it.end = 0
return 1
def it_length(self, it):
it.length = l = len(it.seq)
return l
try:
StopIteration=StopIteration
except NameError:
StopIteration="StopIteration"
class IterInner(InnerBase):
'''Iterator inner class for Python iterators'''
def _supports(self, ob):
try:
if hasattr(ob, 'next') and (ob is iter(ob)):
return 1
except:
return 0
def prep_next(self, it):
try:
it._next = it.seq.next()
except StopIteration:
it._prep_next = self.no_next
it.end = 1
return 0
it.end = 0
return 1
class IterIter:
def __init__(self, it):
self.it = it
self.skip = it.nextIndex > 0 and not it.end
def next(self):
it = self.it
if self.skip:
self.skip = 0
return it.item
if it.next():
return it.item
raise StopIteration
seqInner = SeqInner()
iterInner = IterInner()
```
#### File: templates/minimal/schema.py
```python
user = Class(db, "user", username=String(), password=Password(),
address=String(), alternate_addresses=String(), roles=String())
user.setkey("username")
db.security.addPermission(name='Register', klass='user',
description='User is allowed to register new user')
#
# TRACKER SECURITY SETTINGS
#
# See the configuration and customisation document for information
# about security setup.
#
# REGULAR USERS
#
# Give the regular users access to the web and email interface
db.security.addPermissionToRole('User', 'Web Access')
db.security.addPermissionToRole('User', 'Email Access')
# May users view other user information?
# Comment these lines out if you don't want them to
p = db.security.addPermission(name='View', klass='user',
properties=('id', 'username'))
db.security.addPermissionToRole('User', p)
# Users should be able to edit their own details -- this permission is
# limited to only the situation where the Viewed or Edited item is their own.
def own_record(db, userid, itemid):
'''Determine whether the userid matches the item being accessed.'''
return userid == itemid
p = db.security.addPermission(name='View', klass='user', check=own_record,
description="User is allowed to view their own user details")
db.security.addPermissionToRole('User', p)
p = db.security.addPermission(name='Edit', klass='user', check=own_record,
properties=('username', 'password', 'address', 'alternate_addresses'),
description="User is allowed to edit their own user details")
db.security.addPermissionToRole('User', p)
#
# ANONYMOUS USER PERMISSIONS
#
# Let anonymous users access the web interface. Note that almost all
# trackers will need this Permission. The only situation where it's not
# required is in a tracker that uses an HTTP Basic Authenticated front-end.
db.security.addPermissionToRole('Anonymous', 'Web Access')
# Let anonymous users access the email interface (note that this implies
# that they will be registered automatically, hence they will need the
# "Create" user Permission below)
db.security.addPermissionToRole('Anonymous', 'Email Access')
# Assign the appropriate permissions to the anonymous user's
# Anonymous Role. Choices here are:
# - Allow anonymous users to register
db.security.addPermissionToRole('Anonymous', 'Register', 'user')
# vim: set et sts=4 sw=4 :
```
#### File: herder/test/test_textfmt.py
```python
import unittest
from roundup.support import wrap
class WrapTestCase(unittest.TestCase):
def testWrap(self):
lorem = '''Lorem ipsum dolor sit amet, consectetuer adipiscing elit.'''
wrapped = '''Lorem ipsum dolor
sit amet,
consectetuer
adipiscing elit.'''
self.assertEquals(wrap(lorem, 20), wrapped)
```
#### File: herder/test/tx_Source_detector.py
```python
import time as time
def tx_SourceCheckAudit(db, cl, nodeid, newvalues):
''' An auditor to print the value of the source of the
transaction that trigger this change. The sleep call
is used to delay the transaction so that multiple changes will
overlap. The expected output from this detector are 2 lines
with the same value for tx_Source. Tx source is:
None - Reported when using a script or it is an error if
the change arrives by another method.
"cli" - reported when using roundup-admin
"web" - reported when using any web based technique
"email" - reported when using an unautheticated email based technique
"email-sig-openpgp" - reported when email with a valid pgp
signature is used
'''
if __debug__ and False:
print "\n tx_SourceCheckAudit(%s) db.tx_Source: %s"%(nodeid, db.tx_Source)
newvalues['tx_Source'] = db.tx_Source
# example use for real to prevent a change from happening if it's
# submited via email
#
# if db.tx_Source == "email":
# raise Reject, 'Change not allowed via email'
def tx_SourceCheckReact(db, cl, nodeid, oldvalues):
''' An reactor to print the value of the source of the
transaction that trigger this change. The sleep call
is used to delay the transaction so that multiple changes will
overlap. The expected output from this detector are 2 lines
with the same value for tx_Source. Tx source is:
None - Reported when using a script or it is an error if
the change arrives by another method.
"cli" - reported when using roundup-admin
"web" - reported when using any web based technique
"email" - reported when using an unautheticated email based technique
"email-sig-openpgp" - reported when email with a valid pgp
signature is used
'''
if __debug__ and False:
print " tx_SourceCheckReact(%s) db.tx_Source: %s"%(nodeid, db.tx_Source)
def init(db):
db.issue.audit('create', tx_SourceCheckAudit)
db.issue.audit('set', tx_SourceCheckAudit)
db.issue.react('set', tx_SourceCheckReact)
db.issue.react('create', tx_SourceCheckReact)
db.msg.audit('create', tx_SourceCheckAudit)
```
#### File: herder/tools/fixroles.py
```python
import sys
from roundup import admin
class AdminTool(admin.AdminTool):
def __init__(self):
self.commands = admin.CommandDict()
for k in AdminTool.__dict__.keys():
if k[:3] == 'do_':
self.commands[k[3:]] = getattr(self, k)
self.help = {}
for k in AdminTool.__dict__.keys():
if k[:5] == 'help_':
self.help[k[5:]] = getattr(self, k)
self.instance_home = ''
self.db = None
def do_fixroles(self, args):
'''Usage: fixroles
Set the roles property for all users to reasonable defaults.
The admin user gets "Admin", the anonymous user gets "Anonymous"
and all other users get "User".
'''
# get the user class
cl = self.get_class('user')
for userid in cl.list():
username = cl.get(userid, 'username')
if username == 'admin':
roles = 'Admin'
elif username == 'anonymous':
roles = 'Anonymous'
else:
roles = 'User'
cl.set(userid, roles=roles)
return 0
if __name__ == '__main__':
tool = AdminTool()
sys.exit(tool.main())
```
|
{
"source": "jerrykan/opsdroid",
"score": 3
}
|
#### File: skill/skilltest/__init__.py
```python
from opsdroid.skill import Skill
from opsdroid.matchers import match_regex
class TestSkill(Skill):
"""A mocked skill."""
@match_regex("test")
def test_method(self, message):
"""A test method skill."""
pass
@property
def bad_property(self):
"""Bad property which raises exceptions."""
raise Exception
```
#### File: opsdroid/tests/test_connector_github.py
```python
import os.path
import asyncio
import unittest
import unittest.mock as mock
import asynctest
import asynctest.mock as amock
from opsdroid.__main__ import configure_lang
from opsdroid.core import OpsDroid
from opsdroid.connector.github import ConnectorGitHub
from opsdroid.events import Message
class TestConnectorGitHub(unittest.TestCase):
"""Test the opsdroid github connector class."""
def setUp(self):
self.loop = asyncio.new_event_loop()
def test_init(self):
"""Test that the connector is initialised properly."""
connector = ConnectorGitHub({
'name': 'github',
'token': 'test'
})
self.assertEqual(None, connector.default_target)
self.assertEqual("github", connector.name)
def test_missing_token(self):
"""Test that attempt to connect without info raises an error."""
ConnectorGitHub({})
self.assertLogs('_LOGGER', 'error')
class TestConnectorGitHubAsync(asynctest.TestCase):
"""Test the async methods of the opsdroid github connector class."""
def setUp(self):
opsdroid = amock.CoroutineMock()
configure_lang({})
self.connector = ConnectorGitHub({
'name': 'github',
'token': 'test'
}, opsdroid=opsdroid)
async def test_connect(self):
with amock.patch('aiohttp.ClientSession.get') as patched_request:
mockresponse = amock.CoroutineMock()
mockresponse.status = 200
mockresponse.json = amock.CoroutineMock(return_value={
"login": 'opsdroid'
})
patched_request.return_value = asyncio.Future()
patched_request.return_value.set_result(mockresponse)
await self.connector.connect()
self.assertEqual(self.connector.github_username, "opsdroid")
self.assertTrue(self.connector.opsdroid.web_server.web_app.router.add_post.called)
async def test_connect_failure(self):
result = amock.MagicMock()
result.status = 401
with OpsDroid() as opsdroid, \
amock.patch('aiohttp.ClientSession.get') as patched_request:
patched_request.return_value = asyncio.Future()
patched_request.return_value.set_result(result)
await self.connector.connect()
self.assertLogs('_LOGGER', 'error')
async def test_disconnect(self):
self.assertEqual(await self.connector.disconnect(), None)
async def test_get_comment(self):
"""Test a comment create event creates a message and parses it."""
with open(os.path.join(os.path.dirname(__file__),
'responses',
'github_comment_payload.json'), 'r') as f:
mock_request = amock.CoroutineMock()
mock_request.post = amock.CoroutineMock(return_value={
'payload': f.read()
})
self.connector.opsdroid = amock.CoroutineMock()
self.connector.opsdroid.parse = amock.CoroutineMock()
await self.connector.github_message_handler(mock_request)
message = self.connector.opsdroid.parse.call_args[0][0]
self.assertEqual(message.connector.name, 'github')
self.assertEqual(message.text, 'hello')
self.assertEqual(message.target, 'opsdroid/opsdroid#237')
self.assertTrue(self.connector.opsdroid.parse.called)
async def test_get_pr(self):
"""Test a PR create event creates a message and parses it."""
with open(os.path.join(os.path.dirname(__file__),
'responses',
'github_pr_payload.json'), 'r') as f:
mock_request = amock.CoroutineMock()
mock_request.post = amock.CoroutineMock(return_value={
'payload': f.read()
})
self.connector.opsdroid = amock.CoroutineMock()
self.connector.opsdroid.parse = amock.CoroutineMock()
await self.connector.github_message_handler(mock_request)
message = self.connector.opsdroid.parse.call_args[0][0]
self.assertEqual(message.connector.name, 'github')
self.assertEqual(message.text, 'hello world')
self.assertEqual(message.target, 'opsdroid/opsdroid-audio#175')
self.assertTrue(self.connector.opsdroid.parse.called)
async def test_get_issue(self):
"""Test an issue create event creates a message and parses it."""
with open(os.path.join(os.path.dirname(__file__),
'responses',
'github_issue_payload.json'), 'r') as f:
mock_request = amock.CoroutineMock()
mock_request.post = amock.CoroutineMock(return_value={
'payload': f.read()
})
self.connector.opsdroid = amock.CoroutineMock()
self.connector.opsdroid.parse = amock.CoroutineMock()
await self.connector.github_message_handler(mock_request)
message = self.connector.opsdroid.parse.call_args[0][0]
self.assertEqual(message.connector.name, 'github')
self.assertEqual(message.text, 'test')
self.assertEqual(message.target, 'opsdroid/opsdroid#740')
self.assertTrue(self.connector.opsdroid.parse.called)
async def test_get_label(self):
"""Test a label create event doesn't create a message and parse it."""
with open(os.path.join(os.path.dirname(__file__),
'responses',
'github_label_payload.json'), 'r') as f:
mock_request = amock.CoroutineMock()
mock_request.post = amock.CoroutineMock(return_value={
'payload': f.read()
})
self.connector.opsdroid = amock.CoroutineMock()
self.connector.opsdroid.parse = amock.CoroutineMock()
await self.connector.github_message_handler(mock_request)
self.assertFalse(self.connector.opsdroid.parse.called)
async def test_get_no_action(self):
"""Test a status event doesn't create a message and parse it."""
with open(os.path.join(os.path.dirname(__file__),
'responses',
'github_status_payload.json'), 'r') as f:
mock_request = amock.CoroutineMock()
mock_request.post = amock.CoroutineMock(return_value={
'payload': f.read()
})
self.connector.opsdroid = amock.CoroutineMock()
self.connector.opsdroid.parse = amock.CoroutineMock()
await self.connector.github_message_handler(mock_request)
self.assertFalse(self.connector.opsdroid.parse.called)
async def test_listen(self):
"""Test the listen method.
The GitHub connector listens using an API endoint and so the listen
method should just pass and do nothing. We just need to test that it
does not block.
"""
self.assertEqual(await self.connector.listen(), None)
async def test_respond(self):
with amock.patch('aiohttp.ClientSession.post') as patched_request:
mockresponse = amock.CoroutineMock()
mockresponse.status = 201
patched_request.return_value = asyncio.Future()
patched_request.return_value.set_result(mockresponse)
resp = await self.connector.send(
Message("test", 'jacobtomlinson', 'opsdroid/opsdroid#1',
self.connector))
self.assertTrue(patched_request.called)
self.assertTrue(resp)
async def test_respond_bot_short(self):
with amock.patch('aiohttp.ClientSession.post') as patched_request:
mockresponse = amock.CoroutineMock()
mockresponse.status = 201
patched_request.return_value = asyncio.Future()
patched_request.return_value.set_result(mockresponse)
self.connector.github_username = 'opsdroid-bot'
resp = await self.connector.send(
Message('test', 'opsdroid-bot', 'opsdroid/opsdroid#1',
self.connector))
self.assertFalse(patched_request.called)
self.assertTrue(resp)
async def test_respond_failure(self):
with amock.patch('aiohttp.ClientSession.post') as patched_request:
mockresponse = amock.CoroutineMock()
mockresponse.status = 400
mockresponse.json = amock.CoroutineMock(return_value={
"error": 'some error'
})
patched_request.return_value = asyncio.Future()
patched_request.return_value.set_result(mockresponse)
resp = await self.connector.send(
Message('test', 'opsdroid-bot', 'opsdroid/opsdroid#1',
self.connector))
self.assertTrue(patched_request.called)
self.assertFalse(resp)
```
#### File: opsdroid/tests/test_database_redis.py
```python
import asyncio
import datetime
import unittest
import asynctest
import asynctest.mock as amock
from contextlib import suppress
from opsdroid.database.redis import RedisDatabase
from opsdroid.__main__ import configure_lang
class MockRedisClient:
execute = None
class TestRedisDatabase(unittest.TestCase):
"""Test the opsdroid Redis database class."""
def setUp(self):
self.loop = asyncio.new_event_loop()
configure_lang({})
def test_init(self):
"""Test initialisation of database class.
This method will test the initialisation of the database
class. It will assert if the database class properties are
declared and equated to None.
"""
database = RedisDatabase({})
self.assertEqual(None, database.client)
self.assertEqual(0, database.database)
self.assertEqual("localhost", database.host)
self.assertEqual(6379, database.port)
self.assertEqual(None, database.password)
def test_other(self):
unserialized_data = {
"example_string": "test",
"example_datetime": datetime.datetime.utcfromtimestamp(1538389815),
"example_date": datetime.date.fromtimestamp(1538366400),
}
serialized_data = RedisDatabase.convert_object_to_timestamp(unserialized_data)
self.assertEqual(serialized_data["example_string"], "test")
# Typically I would do assertDictEqual on the result, but as datetime are parsed based on the
# timezone of the computer it makes the unittest fragile depending on the timezone of the user.
self.assertEqual(serialized_data["example_datetime"][0:10], "datetime::")
self.assertEqual(serialized_data["example_date"][0:6], "date::")
def test_convert_timestamp_to_object(self):
serialized_data = {
"example_date": "date::1538366400",
"example_datetime": "datetime::1538389815",
"example_string": "test"
}
unserialized_data = RedisDatabase.convert_timestamp_to_object(serialized_data)
self.assertEqual(unserialized_data["example_string"], "test")
# Typically I would do assertDictEqual on the result, but as datetime are parsed based on the
# timezone of the computer it makes the unittest fragile depending on the timezone of the user.
self.assertIsInstance(unserialized_data["example_datetime"], datetime.datetime)
self.assertIsInstance(unserialized_data["example_date"], datetime.date)
class TestRedisDatabaseAsync(asynctest.TestCase):
"""Test the opsdroid Redis Database class."""
async def test_connect(self):
opsdroid = amock.CoroutineMock()
database = RedisDatabase({}, opsdroid=opsdroid)
import asyncio_redis
with amock.patch.object(asyncio_redis.Connection, 'create') as mocked_connection:
mocked_connection.side_effect = NotImplementedError
with suppress(NotImplementedError):
await database.connect()
self.assertTrue(mocked_connection.called)
async def test_get(self):
db = RedisDatabase({})
db.client = MockRedisClient()
db.client.get = amock.CoroutineMock(return_value='{"key":"value"}')
result = await db.get("string")
self.assertDictEqual(result, dict(key="value"))
async def test_get_return_None(self):
db = RedisDatabase({})
db.client = MockRedisClient()
db.client.get = amock.CoroutineMock(return_value=None)
result = await db.get("string")
self.assertEqual(result, None)
async def test_put(self):
db = RedisDatabase({})
db.client = MockRedisClient()
db.client.set = amock.CoroutineMock(return_value='{"key":"value"}')
result = await db.put("string", dict(key="value"))
async def test_disconnect(self):
db = RedisDatabase({})
db.client = MockRedisClient()
db.client.close = amock.CoroutineMock()
result = await db.disconnect()
self.assertTrue(db.client.close.called)
```
|
{
"source": "jerrykan/roundup",
"score": 2
}
|
#### File: roundup/cgi/wsgi_handler.py
```python
import os
import weakref
from contextlib import contextmanager
from roundup.anypy.html import html_escape
import roundup.instance
from roundup.cgi import TranslationService
from roundup.anypy import http_
from roundup.anypy.strings import s2b
from roundup.cgi.client import BinaryFieldStorage
BaseHTTPRequestHandler = http_.server.BaseHTTPRequestHandler
DEFAULT_ERROR_MESSAGE = http_.server.DEFAULT_ERROR_MESSAGE
class Headers(object):
""" Idea more or less stolen from the 'apache.py' in same directory.
Except that wsgi stores http headers in environment.
"""
def __init__(self, environ):
self.environ = environ
def mangle_name(self, name):
""" Content-Type is handled specially, it doesn't have a HTTP_
prefix in cgi.
"""
n = name.replace('-', '_').upper()
if n == 'CONTENT_TYPE':
return n
return 'HTTP_' + n
def get(self, name, default=None):
return self.environ.get(self.mangle_name(name), default)
getheader = get
class Writer(object):
'''Perform a start_response if need be when we start writing.'''
def __init__(self, request):
self.request = request #weakref.ref(request)
def write(self, data):
f = self.request.get_wfile()
self.write = f
return self.write(data)
class RequestHandler(object):
def __init__(self, environ, start_response):
self.__start_response = start_response
self.__wfile = None
self.headers = Headers(environ)
self.rfile, self.wfile = None, Writer(self)
def start_response(self, headers, response_code):
"""Set HTTP response code"""
message, explain = BaseHTTPRequestHandler.responses[response_code]
self.__wfile = self.__start_response('%d %s' % (response_code,
message), headers)
def get_wfile(self):
if self.__wfile is None:
raise ValueError('start_response() not called')
return self.__wfile
class RequestDispatcher(object):
def __init__(self, home, debug=False, timing=False, lang=None):
assert os.path.isdir(home), '%r is not a directory' % (home,)
self.home = home
self.debug = debug
self.timing = timing
if lang:
self.translator = TranslationService.get_translation(lang,
tracker_home=home)
else:
self.translator = None
self.preload()
def __call__(self, environ, start_response):
"""Initialize with `apache.Request` object"""
request = RequestHandler(environ, start_response)
if environ['REQUEST_METHOD'] == 'OPTIONS':
if environ["PATH_INFO"][:5] == "/rest":
# rest does support options
# This I hope will result in self.form=None
environ['CONTENT_LENGTH'] = 0
else:
code = 501
message, explain = BaseHTTPRequestHandler.responses[code]
request.start_response([('Content-Type', 'text/html'),
('Connection', 'close')], code)
request.wfile.write(s2b(DEFAULT_ERROR_MESSAGE % locals()))
return []
# need to strip the leading '/'
environ["PATH_INFO"] = environ["PATH_INFO"][1:]
if self.timing:
environ["CGI_SHOW_TIMING"] = self.timing
if environ['REQUEST_METHOD'] in ("OPTIONS", "DELETE"):
# these methods have no data. When we init tracker.Client
# set form to None to get a properly initialized empty
# form.
form = None
else:
form = BinaryFieldStorage(fp=environ['wsgi.input'], environ=environ)
with self.get_tracker() as tracker:
client = tracker.Client(tracker, request, environ, form,
self.translator)
try:
client.main()
except roundup.cgi.client.NotFound:
request.start_response([('Content-Type', 'text/html')], 404)
request.wfile.write(s2b('Not found: %s' %
html_escape(client.path)))
# all body data has been written using wfile
return []
def preload(self):
""" Trigger pre-loading of imports and templates """
with self.get_tracker():
pass
@contextmanager
def get_tracker(self):
# get a new instance for each request
yield roundup.instance.open(self.home, not self.debug)
```
#### File: roundup/test/test_config.py
```python
import unittest
import logging
import os, shutil, errno
import pytest
from roundup import configuration
config = configuration.CoreConfig()
config.DATABASE = "db"
config.RDBMS_NAME = "rounduptest"
config.RDBMS_HOST = "localhost"
config.RDBMS_USER = "rounduptest"
config.RDBMS_PASSWORD = "<PASSWORD>"
config.RDBMS_TEMPLATE = "template0"
# these TRACKER_WEB and MAIL_DOMAIN values are used in mailgw tests
config.MAIL_DOMAIN = "your.tracker.email.domain.example"
config.TRACKER_WEB = "http://tracker.example/cgi-bin/roundup.cgi/bugs/"
# uncomment the following to have excessive debug output from test cases
# FIXME: tracker logging level should be increased by -v arguments
# to 'run_tests.py' script
#config.LOGGING_FILENAME = "/tmp/logfile"
#config.LOGGING_LEVEL = "DEBUG"
config.init_logging()
config.options['FOO'] = "value"
# for TrackerConfig test class
from roundup import instance
from . import db_test_base
class ConfigTest(unittest.TestCase):
def test_badConfigKeyword(self):
"""Run configure tests looking for invalid option name
"""
self.assertRaises(configuration.InvalidOptionError, config._get_option, "BadOptionName")
def test_validConfigKeyword(self):
"""Run configure tests looking for invalid option name
"""
self.assertEqual(config._get_option("FOO"), "value")
def testTrackerWeb(self):
config = configuration.CoreConfig()
self.assertEqual(None,
config._get_option('TRACKER_WEB').set("http://foo.example/bar/"))
self.assertEqual(None,
config._get_option('TRACKER_WEB').set("https://foo.example/bar/"))
self.assertRaises(configuration.OptionValueError,
config._get_option('TRACKER_WEB').set, "https://foo.example/bar")
self.assertRaises(configuration.OptionValueError,
config._get_option('TRACKER_WEB').set, "htt://foo.example/bar/")
self.assertRaises(configuration.OptionValueError,
config._get_option('TRACKER_WEB').set, "htt://foo.example/bar")
self.assertRaises(configuration.OptionValueError,
config._get_option('TRACKER_WEB').set, "")
def testLoginAttemptsMin(self):
config = configuration.CoreConfig()
self.assertEqual(None,
config._get_option('WEB_LOGIN_ATTEMPTS_MIN').set("0"))
self.assertEqual(None,
config._get_option('WEB_LOGIN_ATTEMPTS_MIN').set("200"))
self.assertRaises(configuration.OptionValueError,
config._get_option('WEB_LOGIN_ATTEMPTS_MIN').set, "fred")
self.assertRaises(configuration.OptionValueError,
config._get_option('WEB_LOGIN_ATTEMPTS_MIN').set, "-1")
self.assertRaises(configuration.OptionValueError,
config._get_option('WEB_LOGIN_ATTEMPTS_MIN').set, "")
def testTimeZone(self):
config = configuration.CoreConfig()
self.assertEqual(None,
config._get_option('TIMEZONE').set("0"))
# not a valid timezone
self.assertRaises(configuration.OptionValueError,
config._get_option('TIMEZONE').set, "Zot")
# 25 is not a valid UTC offset: -12 - +14 is range
# possibly +/- 1 for DST. But roundup.date doesn't
# constrain to this range.
#self.assertRaises(configuration.OptionValueError,
# config._get_option('TIMEZONE').set, "25")
try:
import pytz
self.assertEqual(None,
config._get_option('TIMEZONE').set("UTC"))
self.assertEqual(None,
config._get_option('TIMEZONE').set("America/New_York"))
self.assertEqual(None,
config._get_option('TIMEZONE').set("EST"))
self.assertRaises(configuration.OptionValueError,
config._get_option('TIMEZONE').set, "Zool/Zot")
except ImportError:
# UTC is a known offset of 0 coded into roundup.date
# so it works even without pytz.
self.assertEqual(None,
config._get_option('TIMEZONE').set("UTC"))
# same with EST known timeone offset of 5
self.assertEqual(None,
config._get_option('TIMEZONE').set("EST"))
self.assertRaises(configuration.OptionValueError,
config._get_option('TIMEZONE').set, "America/New_York")
def testWebSecretKey(self):
config = configuration.CoreConfig()
self.assertEqual(None,
config._get_option('WEB_SECRET_KEY').set("skskskd"))
self.assertRaises(configuration.OptionValueError,
config._get_option('WEB_SECRET_KEY').set, "")
def testStaticFiles(self):
config = configuration.CoreConfig()
self.assertEqual(None,
config._get_option('STATIC_FILES').set("foo /tmp/bar"))
self.assertEqual(config.STATIC_FILES,
["./foo", "/tmp/bar"])
self.assertEqual(config['STATIC_FILES'],
["./foo", "/tmp/bar"])
def testIsolationLevel(self):
config = configuration.CoreConfig()
self.assertEqual(None,
config._get_option('RDBMS_ISOLATION_LEVEL').set("read uncommitted"))
self.assertEqual(None,
config._get_option('RDBMS_ISOLATION_LEVEL').set("read committed"))
self.assertEqual(None,
config._get_option('RDBMS_ISOLATION_LEVEL').set("repeatable read"))
self.assertRaises(configuration.OptionValueError,
config._get_option('RDBMS_ISOLATION_LEVEL').set, "not a level")
def testConfigSave(self):
config = configuration.CoreConfig()
# make scratch directory to create files in
self.startdir = os.getcwd()
self.dirname = os.getcwd() + '_test_config'
os.mkdir(self.dirname)
try:
os.chdir(self.dirname)
self.assertFalse(os.access("config.ini", os.F_OK))
self.assertFalse(os.access("config.bak", os.F_OK))
config.save()
config.save() # creates .bak file
self.assertTrue(os.access("config.ini", os.F_OK))
self.assertTrue(os.access("config.bak", os.F_OK))
self.assertFalse(os.access("foo.bar", os.F_OK))
self.assertFalse(os.access("foo.bak", os.F_OK))
config.save("foo.bar")
config.save("foo.bar") # creates .bak file
self.assertTrue(os.access("foo.bar", os.F_OK))
self.assertTrue(os.access("foo.bak", os.F_OK))
finally:
# cleanup scratch directory and files
try:
os.chdir(self.startdir)
shutil.rmtree(self.dirname)
except OSError as error:
if error.errno not in (errno.ENOENT, errno.ESRCH): raise
def testFloatAndInt_with_update_option(self):
config = configuration.CoreConfig()
# Update existing IntegerNumberGeqZeroOption to IntegerNumberOption
config.update_option('WEB_LOGIN_ATTEMPTS_MIN',
configuration.IntegerNumberOption,
"0", description="new desc")
# -1 is allowed now that it is an int.
self.assertEqual(None,
config._get_option('WEB_LOGIN_ATTEMPTS_MIN').set("-1"))
# but can't float this
self.assertRaises(configuration.OptionValueError,
config._get_option('WEB_LOGIN_ATTEMPTS_MIN').set, "2.4")
# but fred is still an issue
self.assertRaises(configuration.OptionValueError,
config._get_option('WEB_LOGIN_ATTEMPTS_MIN').set, "fred")
# Update existing IntegerNumberOption to FloatNumberOption
config.update_option('WEB_LOGIN_ATTEMPTS_MIN',
configuration.FloatNumberOption,
"0.0")
self.assertEqual(config['WEB_LOGIN_ATTEMPTS_MIN'], -1)
# can float this
self.assertEqual(None,
config._get_option('WEB_LOGIN_ATTEMPTS_MIN').set("3.1415926"))
# but fred is still an issue
self.assertRaises(configuration.OptionValueError,
config._get_option('WEB_LOGIN_ATTEMPTS_MIN').set, "fred")
self.assertAlmostEqual(config['WEB_LOGIN_ATTEMPTS_MIN'], 3.1415926,
places=6)
class TrackerConfig(unittest.TestCase):
""" Arguably this should be tested in test_instance since it is triggered
by instance.open. But it raises an error in the configuration module
with a missing required param in config.ini."""
backend = 'anydbm'
def setUp(self):
self.dirname = '_test_instance'
# set up and open a tracker
self.instance = db_test_base.setupTracker(self.dirname, self.backend)
# open the database
self.db = self.instance.open('admin')
self.db.commit()
self.db.close()
def tearDown(self):
if self.db:
self.db.close()
try:
shutil.rmtree(self.dirname)
except OSError as error:
if error.errno not in (errno.ENOENT, errno.ESRCH): raise
def testNoDBInConfig(self):
# comment out the backend key in config.ini
import fileinput
for line in fileinput.input(os.path.join(self.dirname, "config.ini"),
inplace=True):
if line.startswith("backend = "):
continue
print(line)
# this should fail as backend isn't defined.
self.assertRaises(configuration.OptionUnsetError, instance.open,
self.dirname)
```
|
{
"source": "jerrykcode/kkFileView",
"score": 2
}
|
#### File: wizards/agenda/AgendaDocument.py
```python
import uno
import traceback
from ..text.TextElement import TextElement
from ..text.TextDocument import TextDocument
from ..text.TextSectionHandler import TextSectionHandler
from ..common.FileAccess import FileAccess
from datetime import datetime
from com.sun.star.text.PlaceholderType import TEXT
from com.sun.star.i18n.NumberFormatIndex import TIME_HHMM, DATE_SYSTEM_LONG
'''
The classes here implement the whole document-functionality of the agenda wizard:
the live-preview and the final "creation" of the document,
when the user clicks "finish". <br/>
<br/>
<h2>Some terminology:<h2/>
items are names or headings. we don't make any distinction.
<br/>
The Agenda Template is used as general "controller"
of the whole document, whereas the two child-classes ItemsTable
and TopicsTable control the item tables (note plural!) and the
topics table (note singular).<br/>
<br/>
Other small classes are used to abstract the handling of cells and text and we
try to use them as components.
<br/><br/>
We tried to keep the Agenda Template as flexible as possible, though there
must be many limitations, because it is generated dynamically.<br/><br/>
To keep the template flexible the following decisions were made:<br/>
1. Item tables.<br/>
1.a. there might be arbitrary number of Item tables.<br/>
1.b. Item tables design (bordewr, background) is arbitrary.<br/>
1.c. Items text styles are individual,
and use stylelist styles with predefined names.<br/>
As result the following limitations:<br/>
Pairs of Name->value for each item.<br/>
Tables contain *only* those pairs.<br/>
2. Topics table.<br/>
2.a. arbitrary structure.<br/>
2.b. design is arbitrary.<br/>
As result the following limitations:<br/>
No column merge is allowed.<br/>
One compulsory Heading row.<br/>
<br/><br/>
To let the template be flexible, we use a kind of "detection": we look where
the items are read the design of each table, re-applying it after writing the
table.self.xTextDocument
<br/><br/>
A note about threads:<br/>
Many methods here are synchronized, in order to avoid collision made by
events fired too often.
'''
class AgendaDocument(TextDocument):
'''
constructor. The document is *not* loaded here.
only some formal members are set.
'''
def __init__(self, xmsf, agenda, resources, templateConsts, listener):
super(AgendaDocument,self).__init__(xmsf,listener, None,
"WIZARD_LIVE_PREVIEW")
self.agenda = agenda
self.templateConsts = templateConsts
self.resources = resources
self.itemsMap = {}
self.allItems = []
def load(self, templateURL):
# Each template is duplicated. aw-XXX.ott is the template itself
# and XXX.ott is a section link.
self.template = self.calcTemplateName(templateURL)
self.loadAsPreview(templateURL, False)
self.xFrame.ComponentWindow.Enable = False
self.xTextDocument.lockControllers()
self.initialize()
self.initializeData()
self.xTextDocument.unlockControllers()
'''
The agenda templates are in format of aw-XXX.ott
the templates name is then XXX.ott.
This method calculates it.
'''
def calcTemplateName(self, url):
return FileAccess.connectURLs(
FileAccess.getParentDir(url), FileAccess.getFilename(url)[3:])
'''synchronize the document to the model.<br/>
this method rewrites all titles, item tables , and the topics table-
thus synchronizing the document to the data model (CGAgenda).
information (it is only actualized on save) the given list
supplies this information.
'''
def initializeData(self):
for i in self.itemsTables:
try:
i.write()
except Exception:
traceback.print_exc()
self.redrawTitle("txtTitle")
self.redrawTitle("txtDate")
self.redrawTitle("txtTime")
self.redrawTitle("cbLocation")
'''
redraws/rewrites the table which contains the given item
This method is called when the user checks/unchecks an item.
The table is being found, in which the item is, and redrawn.
'''
def redraw(self, itemName):
self.xTextDocument.lockControllers()
try:
# get the table in which the item is...
itemsTable = self.itemsMap[itemName]
# rewrite the table.
itemsTable.write()
except Exception:
traceback.print_exc()
self.xTextDocument.unlockControllers()
'''
checks the data model if the
item corresponding to the given string should be shown
'''
def isShowItem(self, itemName):
if itemName == self.templateConsts.FILLIN_MEETING_TYPE:
return self.agenda.cp_ShowMeetingType
elif itemName == self.templateConsts.FILLIN_READ:
return self.agenda.cp_ShowRead
elif itemName == self.templateConsts.FILLIN_BRING:
return self.agenda.cp_ShowBring
elif itemName == self.templateConsts.FILLIN_NOTES:
return self.agenda.cp_ShowNotes
elif itemName == self.templateConsts.FILLIN_FACILITATOR:
return self.agenda.cp_ShowFacilitator
elif itemName == self.templateConsts.FILLIN_TIMEKEEPER:
return self.agenda.cp_ShowTimekeeper
elif itemName == self.templateConsts.FILLIN_NOTETAKER:
return self.agenda.cp_ShowNotetaker
elif itemName == self.templateConsts.FILLIN_PARTICIPANTS:
return self.agenda.cp_ShowAttendees
elif itemName == self.templateConsts.FILLIN_CALLED_BY:
return self.agenda.cp_ShowCalledBy
elif itemName == self.templateConsts.FILLIN_OBSERVERS:
return self.agenda.cp_ShowObservers
elif itemName == self.templateConsts.FILLIN_RESOURCE_PERSONS:
return self.agenda.cp_ShowResourcePersons
else:
raise ValueError("No such item")
'''itemsCache is a Map containing all agenda item. These are object which
"write themselves" to the table, given a table cursor.
A cache is used in order to reuse the objects, instead of recreate them.
This method fills the cache will all items objects (names and headings).
'''
def initItemsCache(self):
self.itemsCache = {}
# Headings
self.itemsCache[
self.templateConsts.FILLIN_MEETING_TYPE] = \
AgendaItem(self.templateConsts.FILLIN_MEETING_TYPE,
self.resources.itemMeetingType,
PlaceholderElement(
self.resources.reschkMeetingTitle_value,
self.resources.resPlaceHolderHint, self.xTextDocument))
self.itemsCache[
self.templateConsts.FILLIN_BRING] = \
AgendaItem(self.templateConsts.FILLIN_BRING,
self.resources.itemBring,
PlaceholderElement (
self.resources.reschkBring_value,
self.resources.resPlaceHolderHint, self.xTextDocument))
self.itemsCache[
self.templateConsts.FILLIN_READ] = \
AgendaItem (self.templateConsts.FILLIN_READ,
self.resources.itemRead,
PlaceholderElement (
self.resources.reschkRead_value,
self.resources.resPlaceHolderHint, self.xTextDocument))
self.itemsCache[
self.templateConsts.FILLIN_NOTES] = \
AgendaItem (self.templateConsts.FILLIN_NOTES,
self.resources.itemNote,
PlaceholderElement (
self.resources.reschkNotes_value,
self.resources.resPlaceHolderHint, self.xTextDocument))
# Names
self.itemsCache[
self.templateConsts.FILLIN_CALLED_BY] = \
AgendaItem(self.templateConsts.FILLIN_CALLED_BY,
self.resources.itemCalledBy,
PlaceholderElement (
self.resources.reschkConvenedBy_value,
self.resources.resPlaceHolderHint, self.xTextDocument))
self.itemsCache[
self.templateConsts.FILLIN_FACILITATOR] = \
AgendaItem(self.templateConsts.FILLIN_FACILITATOR,
self.resources.itemFacilitator,
PlaceholderElement (
self.resources.reschkPresiding_value,
self.resources.resPlaceHolderHint, self.xTextDocument))
self.itemsCache[
self.templateConsts.FILLIN_PARTICIPANTS] = \
AgendaItem(self.templateConsts.FILLIN_PARTICIPANTS,
self.resources.itemAttendees,
PlaceholderElement(
self.resources.reschkAttendees_value,
self.resources.resPlaceHolderHint, self.xTextDocument))
self.itemsCache[
self.templateConsts.FILLIN_NOTETAKER] = \
AgendaItem(self.templateConsts.FILLIN_NOTETAKER,
self.resources.itemNotetaker,
PlaceholderElement(
self.resources.reschkNoteTaker_value,
self.resources.resPlaceHolderHint, self.xTextDocument))
self.itemsCache[
self.templateConsts.FILLIN_TIMEKEEPER] = \
AgendaItem(self.templateConsts.FILLIN_TIMEKEEPER,
self.resources.itemTimekeeper,
PlaceholderElement(
self.resources.reschkTimekeeper_value,
self.resources.resPlaceHolderHint, self.xTextDocument))
self.itemsCache[
self.templateConsts.FILLIN_OBSERVERS] = \
AgendaItem(self.templateConsts.FILLIN_OBSERVERS,
self.resources.itemObservers,
PlaceholderElement(
self.resources.reschkObservers_value,
self.resources.resPlaceHolderHint, self.xTextDocument))
self.itemsCache[
self.templateConsts.FILLIN_RESOURCE_PERSONS] = \
AgendaItem(self.templateConsts.FILLIN_RESOURCE_PERSONS,
self.resources.itemResource,
PlaceholderElement(
self.resources.reschkResourcePersons_value,
self.resources.resPlaceHolderHint, self.xTextDocument))
'''Initializes a template.<br/>
This method does the following tasks:<br/>
get a Time and Date format for the document, and retrieve the null
date of the document (which is document-specific).<br/>
Initializes the Items Cache map.
Analyses the document:<br/>
-find all "filled-ins" (appear as >xxx< in the document).
-analyze all items sections (and the tables in them).
-locate the titles and actualize them
-analyze the topics table
'''
def initialize(self):
'''
Get the default locale of the document,
and create the date and time formatters.
'''
self.dateUtils = self.DateUtils(self.xMSF, self.xTextDocument)
self.formatter = self.dateUtils.formatter
self.dateFormat = self.dateUtils.getFormat(DATE_SYSTEM_LONG)
self.timeFormat = self.dateUtils.getFormat(TIME_HHMM)
self.initItemsCache()
self.allItems = self.searchFillInItems(0)
self.initializeTitles()
self.initializeItemsSections()
self.textSectionHandler = TextSectionHandler(
self.xTextDocument, self.xTextDocument)
self.topics = Topics(self)
'''
locates the titles (name, location, date, time)
and saves a reference to their Text ranges.
'''
def initializeTitles(self):
auxList = []
for i in self.allItems:
text = i.String.lstrip().lower()
if text == self.templateConsts.FILLIN_TITLE:
self.teTitle = PlaceholderTextElement(
i, self.resources.resPlaceHolderTitle,
self.resources.resPlaceHolderHint, self.xTextDocument)
self.trTitle = i
elif text == self.templateConsts.FILLIN_DATE:
self.teDate = PlaceholderTextElement(
i, self.resources.resPlaceHolderDate,
self.resources.resPlaceHolderHint, self.xTextDocument)
self.trDate = i
elif text == self.templateConsts.FILLIN_TIME:
self.teTime = PlaceholderTextElement(
i, self.resources.resPlaceHolderTime,
self.resources.resPlaceHolderHint, self.xTextDocument)
self.trTime = i
elif text == self.templateConsts.FILLIN_LOCATION:
self.teLocation = PlaceholderTextElement(
i, self.resources.resPlaceHolderLocation,
self.resources.resPlaceHolderHint, self.xTextDocument)
self.trLocation = i
else:
auxList.append(i)
self.allItems = auxList
'''
analyze the item sections in the template.
delegates the analyze of each table to the ItemsTable class.
'''
def initializeItemsSections(self):
sections = self.getSections(
self.xTextDocument, self.templateConsts.SECTION_ITEMS)
# for each section - there is a table...
self.itemsTables = []
for i in sections:
try:
self.itemsTables.append(
ItemsTable(self.getSection(i), self.getTable(i), self))
except Exception:
traceback.print_exc()
raise AttributeError (
"Fatal Error while initializing \
Template: items table in section " + i)
def getSections(self, document, s):
allSections = document.TextSections.ElementNames
return self.getNamesWhichStartWith(allSections, s)
def getSection(self, name):
return self.xTextDocument.TextSections.getByName(name)
def getTable(self, name):
return self.xTextDocument.TextTables.getByName(name)
def redrawTitle(self, controlName):
try:
if controlName == "txtTitle":
self.teTitle.placeHolderText = self.agenda.cp_Title
self.teTitle.write(self.trTitle)
elif controlName == "txtDate":
self.teDate.placeHolderText = \
self.getDateString(self.agenda.cp_Date)
self.teDate.write(self.trDate)
elif controlName == "txtTime":
self.teTime.placeHolderText = self.agenda.cp_Time
self.teTime.write(self.trTime)
elif controlName == "cbLocation":
self.teLocation.placeHolderText = self.agenda.cp_Location
self.teLocation.write(self.trLocation)
else:
raise Exception("No such title control...")
except Exception:
traceback.print_exc()
def getDateString(self, date):
if not date:
return ""
dateObject = datetime.strptime(date, '%d/%m/%y').date()
return self.dateUtils.format(self.dateFormat, dateObject)
def finish(self, topics):
self.createMinutes(topics)
self.deleteHiddenSections()
self.textSectionHandler.removeAllTextSections()
'''
hidden sections exist when an item's section is hidden because the
user specified not to display any items which it contains.
When finishing the wizard removes this sections
entirely from the document.
'''
def deleteHiddenSections(self):
allSections = self.xTextDocument.TextSections.ElementNames
try:
for i in allSections:
self.section = self.getSection(i)
visible = bool(self.section.IsVisible)
if not visible:
self.section.Anchor.String = ""
except Exception:
traceback.print_exc()
'''
create the minutes for the given topics or remove the minutes
section from the document.
If no topics are supplied, or the user specified not to create minutes,
the minutes section will be removed,
@param topicsData supplies PropertyValue arrays containing
the values for the topics.
'''
def createMinutes(self, topicsData):
# if the minutes section should be removed (the
# user did not check "create minutes")
if not self.agenda.cp_IncludeMinutes \
or len(topicsData) <= 1:
try:
minutesAllSection = self.getSection(
self.templateConsts.SECTION_MINUTES_ALL)
minutesAllSection.Anchor.String = ""
except Exception:
traceback.print_exc()
# the user checked "create minutes"
else:
try:
topicStartTime = int(self.agenda.cp_Time)
# first I replace the minutes titles...
self.items = self.searchFillInItems()
itemIndex = 0
for item in self.items:
itemText = item.String.lstrip().lower()
if itemText == \
self.templateConsts.FILLIN_MINUTES_TITLE:
self.fillMinutesItem(
item, self.agenda.cp_Title,
self.resources.resPlaceHolderTitle)
elif itemText == \
self.templateConsts.FILLIN_MINUTES_LOCATION:
self.fillMinutesItem(
item, self.agenda.cp_Location,
self.resources.resPlaceHolderLocation)
elif itemText == \
self.templateConsts.FILLIN_MINUTES_DATE:
self.fillMinutesItem(
item, getDateString(self.agenda.cp_Date),
self.resources.resPlaceHolderDate)
elif itemText == \
self.templateConsts.FILLIN_MINUTES_TIME:
self.fillMinutesItem( item, self.agenda.cp_Time,
self.resources.resPlaceHolderTime)
self.items.clear()
'''
now add minutes for each topic.
The template contains *one* minutes section, so
we first use the one available, and then add a one...
topics data has *always* an empty topic at the end...
'''
for i in xrange(len(topicsData) - 1):
topic = topicsData[i]
items = self.searchFillInItems()
itemIndex = 0
for item in items:
itemText = item.String.lstrip().lower()
if itemText == \
self.templateConsts.FILLIN_MINUTE_NUM:
self.fillMinutesItem(item, topic[0].Value, "")
elif itemText == \
self.templateConsts.FILLIN_MINUTE_TOPIC:
self.fillMinutesItem(item, topic[1].Value, "")
elif itemText == \
self.templateConsts.FILLIN_MINUTE_RESPONSIBLE:
self.fillMinutesItem(item, topic[2].Value, "")
elif itemText == \
self.templateConsts.FILLIN_MINUTE_TIME:
topicTime = 0
try:
topicTime = topic[3].Value
except Exception:
pass
'''
if the topic has no time, we do not
display any time here.
'''
if topicTime == 0 or topicStartTime == 0:
time = topic[3].Value
else:
time = str(topicStartTime) + " - "
topicStartTime += topicTime * 1000
time += str(topicStartTime)
self.fillMinutesItem(item, time, "")
self.textSectionHandler.removeTextSectionbyName(
self.templateConsts.SECTION_MINUTES)
# after the last section we do not insert a one.
if i < len(topicsData) - 2:
self.textSectionHandler.insertTextSection(
self.templateConsts.SECTION_MINUTES,
self.template, False)
except Exception:
traceback.print_exc()
'''given a text range and a text, fills the given
text range with the given text.
If the given text is empty, uses a placeholder with the given
placeholder text.
@param range text range to fill
@param text the text to fill to the text range object.
@param placeholder the placeholder text to use, if the
text argument is empty (null or "")
'''
def fillMinutesItem(self, Range, text, placeholder):
paraStyle = Range.ParaStyleName
Range.setString(text)
Range.ParaStyleName = paraStyle
if text is None or text == "":
if placeholder is not None and not placeholder == "":
placeHolder = self.createPlaceHolder(
self.xTextDocument, placeholder,
self.resources.resPlaceHolderHint)
try:
Range.Start.Text.insertTextContent(
Range.Start, placeHolder, True)
except Exception:
traceback.print_exc()
'''
creates a placeholder field with the given text and given hint.
'''
@classmethod
def createPlaceHolder(self, xmsf, ph, hint):
try:
placeHolder = xmsf.createInstance(
"com.sun.star.text.TextField.JumpEdit")
except Exception:
traceback.print_exc()
return None
placeHolder.PlaceHolder = ph
placeHolder.Hint = hint
placeHolder.PlaceHolderType = uno.Any("short",TEXT)
return placeHolder
def getNamesWhichStartWith(self, allNames, prefix):
v = []
for i in allNames:
if i.startswith(prefix):
v.append(i)
return v
'''
Convenience method for inserting some cells into a table.
'''
@classmethod
def insertTableRows(self, table, start, count):
rows = table.Rows
rows.insertByIndex(start, count)
'''
returns the rows count of this table, assuming
there is no vertical merged cells.
'''
@classmethod
def getRowCount(self, table):
cells = table.getCellNames()
return int(cells[len(cells) - 1][1:])
class ItemsTable(object):
'''
the items in the table.
'''
items = []
table = None
def __init__(self, section, table, agenda):
self.agenda = agenda
ItemsTable.table = table
self.section = section
self.items = []
'''
go through all <*> items in the document
and each one if it is in this table.
If they are, register them to belong here, notice their order
and remove them from the list of all <*> items, so the next
search will be faster.
'''
aux = []
for item in self.agenda.allItems:
t = item.TextTable
if t == ItemsTable.table:
iText = item.String.lower().lstrip()
ai = self.agenda.itemsCache[iText]
if ai is not None:
self.items.append(ai)
self.agenda.itemsMap[iText] = self
else:
aux.append(item)
self.agenda.allItems = aux
'''
link the section to the template. this will restore the original table
with all the items.<br/>
then break the link, to make the section editable.<br/>
then, starting at cell one, write all items that should be visible.
then clear the rest and remove obsolete rows.
If no items are visible, hide the section.
'''
def write(self):
name = self.section.Name
# link and unlink the section to the template.
self.agenda.textSectionHandler.linkSectiontoTemplate(
self.agenda.template, name, self.section)
self.agenda.textSectionHandler.breakLinkOfTextSection(
self.section)
# we need to get an instance after linking
ItemsTable.table = self.agenda.getTable(name)
self.section = self.agenda.getSection(name)
cursor = ItemsTable.table.createCursorByCellName("A1")
# should this section be visible?
visible = False
# write items
cellName = ""
'''
now go through all items that belong to this
table. Check each one against the model. If it should
be displayed, call its write method.
All items are of type AgendaItem which means they write
two cells to the table: a title (text) and a placeholder.
see AgendaItem class below.
'''
for i in self.items:
if self.agenda.isShowItem(i.name):
visible = True
i.table = ItemsTable.table
i.write(cursor)
# I store the cell name which was last written...
cellName = cursor.RangeName
cursor.goRight(1, False)
if visible:
boolean = True
else:
boolean = False
self.section.IsVisible = boolean
if not visible:
return
'''
if the cell that was last written is the current cell,
it means this is the end of the table, so we end here.
(because after getting the cellName above,
I call the goRight method.
If it did not go right, it means it's the last cell.
'''
if cellName == cursor.RangeName:
return
'''
if not, we continue and clear all cells until
we are at the end of the row.
'''
while not cellName == cursor.RangeName and \
not cursor.RangeName.startswith("A"):
cell = ItemsTable.table.getCellByName(cursor.RangeName)
cell.String = ""
cellName = cursor.RangeName
cursor.goRight(1, False)
'''
again: if we are at the end of the table, end here.
'''
if cellName == cursor.RangeName:
return
'''
now before deleting i move the cursor up so it
does not disappear, because it will crash office.
'''
cursor.gotoStart(False)
'''
This class handles the preview of the topics table.
You can call it the controller of the topics table.
It differs from ItemsTable in that it has no data model -
the update is done programmatically.<br/>
<br/>
The decision to make this class a class by its own
was done out of logic reasons and not design/functionality reasons,
since there is anyway only one instance of this class at runtime
it could have also be implemented in the AgendaDocument class
but for clarity and separation I decided to make a sub class for it.
'''
class Topics(object):
'''Analyze the structure of the Topics table.
The structure Must be as follows:<br>
-One Header Row. <br>
-arbitrary number of rows per topic <br>
-arbitrary content in the topics row <br>
-only soft formatting will be restored. <br>
-the topic rows must repeat three times. <br>
-in the topics rows, placeholders for number, topic, responsible,
and duration must be placed.<br><br>
A word about table format: to reconstruct the format of the table we hold
to the following formats: first row (header), topic, and last row.
We hold the format of the last row, because one might wish to give it
a special format, other than the one on the bottom of each topic.
The left and right borders of the whole table are, on the other side,
part of the topics rows format, and need not be preserved separately.
'''
table = None
lastRowFormat = []
rowsPerTopic = None
def __init__(self, agenda):
self.firstRowFormat = []
self.agenda = agenda
self.writtenTopics = -1
try:
Topics.table = self.agenda.getTable(
self.agenda.templateConsts.SECTION_TOPICS)
except Exception:
traceback.print_exc()
raise AttributeError (
"Fatal error while loading template: table " + \
self.agenda.templateConsts.SECTION_TOPICS + " could not load.")
'''
first I store all <*> ranges
which are in the topics table.
I store each <*> range in this - the key
is the cell it is in. Later when analyzing the topic,
cell by cell, I check in this map to know
if a cell contains a <*> or not.
'''
try:
items = {}
for i in self.agenda.allItems:
t = i.TextTable
if t == Topics.table:
cell = i.Cell
iText = cell.CellName
items[iText] = i
'''
in the topics table, there are always one
title row and three topics defined.
So no mutter how many rows a topic takes - we
can restore its structure and format.
'''
rows = self.agenda.getRowCount(Topics.table)
Topics.rowsPerTopic = int((rows - 1) / 3)
firstCell = "A" + str(1 + Topics.rowsPerTopic + 1)
afterLastCell = "A" + str(1 + (Topics.rowsPerTopic * 2) + 1)
# go to the first row of the 2. topic
cursor = Topics.table.createCursorByCellName(firstCell)
# analyze the structure of the topic rows.
while not cursor.RangeName == afterLastCell:
cell = Topics.table.getCellByName(cursor.RangeName)
# first I store the content and para style of the cell
ae = TextElement(cell, cell.String)
ae.write()
# goto next cell.
cursor.goRight(1, False)
except Exception:
traceback.print_exc()
'''rewrites a single cell containing.
This is used in order to refresh the topic/responsible/duration data
in the preview document, in response to a change in the gui (by the user)
Since the structure of the topics table is flexible,
The Topics object, which analyzed the structure of the topics table upon
initialization, refreshes the appropriate cell.
'''
def writeCell(self, row, column, data):
# if the whole row should be written...
if self.writtenTopics < row:
self.writtenTopics += 1
rows = self.agenda.getRowCount(Topics.table)
reqRows = 1 + (row + 1) * Topics.rowsPerTopic
firstRow = reqRows - Topics.rowsPerTopic + 1
diff = reqRows - rows
if diff > 0:
# set the item's text...
self.agenda.insertTableRows(Topics.table, rows, diff)
column = 0
cursor = Topics.table.createCursorByCellName("A" + str(firstRow))
else:
# calculate the table row.
firstRow = 1 + (row * Topics.rowsPerTopic) + 1
cursor = Topics.table.createCursorByCellName("A" + str(firstRow))
# move the cursor to the needed cell...
cursor.goRight(column, False)
xc = Topics.table.getCellByName(cursor.RangeName)
# and write it !
te = TextElement(xc, data[column].Value)
te.write()
'''removes obsolete rows, reducing the
topics table to the given number of topics.
Note this method does only reducing - if
the number of topics given is greater than the
number of actual topics it does *not* add
rows!
Note also that the first topic will never be removed.
If the table contains no topics, the whole section will
be removed upon finishing.
The reason for that is a "table-design" one: the first topic is
maintained in order to be able to add rows with a design of this topic,
and not of the header row.
@param topics the number of topics the table should contain.
@throws Exception
'''
def reduceDocumentTo(self, topics):
# we never remove the first topic...
if topics <= 0:
topics = 1
tableRows = Topics.table.Rows
targetNumOfRows = topics * Topics.rowsPerTopic + 1
if tableRows.Count > targetNumOfRows:
tableRows.removeByIndex(
targetNumOfRows, tableRows.Count - targetNumOfRows)
'''
A Text element which, if the text to write is empty (null or "")
inserts a placeholder instead.
'''
class PlaceholderTextElement(TextElement):
def __init__(self, textRange, placeHolderText_, hint_, xmsf_):
super(PlaceholderTextElement,self).__init__(textRange, "")
self.text = placeHolderText_
self.hint = hint_
self.xmsf = xmsf_
self.xTextContentList = []
def write(self, textRange):
textRange.String = self.placeHolderText
if self.placeHolderText is None or self.placeHolderText == "":
try:
xTextContent = AgendaDocument.createPlaceHolder(
self.xmsf, self.text, self.hint)
self.xTextContentList.append(xTextContent)
textRange.Text.insertTextContent(
textRange.Start, xTextContent, True)
except Exception:
traceback.print_exc()
else:
if self.xTextContentList:
for i in self.xTextContentList:
textRange.Text.removeTextContent(i)
self.xTextContentList = []
'''
An Agenda element which writes no text, but inserts a placeholder, and formats
it using a ParaStyleName.
'''
class PlaceholderElement(object):
def __init__(self, placeHolderText_, hint_, textDocument):
self.placeHolderText = placeHolderText_
self.hint = hint_
self.textDocument = textDocument
def write(self, textRange):
try:
xTextContent = AgendaDocument.createPlaceHolder(
self.textDocument, self.placeHolderText, self.hint)
textRange.Text.insertTextContent(
textRange.Start, xTextContent, True)
except Exception:
traceback.print_exc()
'''
An implementation of AgendaElement which
gets as a parameter a table cursor, and writes
a text to the cell marked by this table cursor, and
a place holder to the next cell.
'''
class AgendaItem(object):
def __init__(self, name_, te, f):
self.name = name_
self.field = f
self.textElement = te
def write(self, tableCursor):
cellname = tableCursor.RangeName
cell = ItemsTable.table.getCellByName(cellname)
cell.String = self.textElement
tableCursor.goRight(1, False)
# second field is actually always null...
# this is a preparation for adding placeholders.
if self.field is not None:
self.field.write(ItemsTable.table.getCellByName(
tableCursor.RangeName))
```
#### File: wizards/agenda/CGAgenda.py
```python
from ..common.ConfigGroup import ConfigGroup
from ..common.ConfigSet import ConfigSet
from .CGTopic import CGTopic
class CGAgenda(ConfigGroup):
def __init__(self):
self.cp_AgendaType = int()
self.cp_IncludeMinutes = bool()
self.cp_Title = ""
self.cp_Date = str()
self.cp_Time = str()
self.cp_Location = ""
self.cp_ShowMeetingType = bool()
self.cp_ShowRead = bool()
self.cp_ShowBring = bool()
self.cp_ShowNotes = bool()
self.cp_ShowCalledBy = bool()
self.cp_ShowFacilitator = bool()
self.cp_ShowNotetaker = bool()
self.cp_ShowTimekeeper = bool()
self.cp_ShowAttendees = bool()
self.cp_ShowObservers = bool()
self.cp_ShowResourcePersons = bool()
self.cp_TemplateName = str()
self.cp_TemplatePath = str()
self.cp_ProceedMethod = int()
self.cp_Topics = ConfigSet(CGTopic)
```
#### File: wizards/agenda/CGTopic.py
```python
from ..common.ConfigGroup import ConfigGroup
'''
CGTopic means: Configuration Group Topic.
This object encapsulates a configuration group with topic information.
Since the topic's gui control uses its own data model, there is
also code here to convert from the data model to CGTopic object (the constructor)
and vice versa (setDataToRow method - used when loading the last session...)
'''
class CGTopic(ConfigGroup):
'''
create a new CGTopic object with data from the given row.
the row object is a PropertyValue array, as used
by the TopicsControl's data model.
@param row PropertyValue array as used by the TopicsControl's data model.
'''
def __init__(self, row=None):
if row is None:
self.cp_Index = int()
self.cp_Topic = str()
self.cp_Responsible = str()
self.cp_Time = str()
else:
self.cp_Index = int(row[0].Value[:-1])
self.cp_Topic = row[1].Value
self.cp_Responsible = row[2].Value
self.cp_Time = row[3].Value
'''
copies the data in this CGTopic object
to the given row.
@param row the row object (PropertyValue array) to
copy the data to.
'''
def setDataToRow(self, row):
row[0].Value = "" + str(self.cp_Index) + "."
row[1].Value = self.cp_Topic
row[2].Value = self.cp_Responsible
row[3].Value = self.cp_Time
```
#### File: wizards/common/ConfigSet.py
```python
import traceback
from .ConfigGroup import ConfigGroup
class ConfigSet(ConfigGroup):
'''
After reading the configuration set items,
the ConfigSet checks this field.
If it is true, it will remove any nulls from
the vector.
subclasses can change this field in the constructor
to avoid this "deletion" of nulls.
'''
def __init__(self, childType):
self.childType = childType
self.childrenList = []
self.childrenListLen = 0
def writeConfiguration(self, configurationView, param):
for i in range(self.childrenListLen):
#remove previous configuration
configurationView.removeByName(i)
for index,item in enumerate(self.childrenList):
try:
childView = configurationView.createInstance()
configurationView.insertByName(index, childView)
if callable( self.childType ):
topic = self.childType()
topic.cp_Index = item[0].Value
topic.cp_Topic = item[1].Value
topic.cp_Responsible = item[2].Value
topic.cp_Time = item[3].Value
topic.writeConfiguration(childView, param)
except Exception:
traceback.print_exc()
def readConfiguration(self, configurationView, param):
#each iteration represents a Topic row
names = configurationView.ElementNames
if names:
for i in names:
try:
if callable( self.childType ):
topic = self.childType()
topic.readConfiguration(
configurationView.getByName(i), param)
self.childrenList.append(topic)
except Exception:
traceback.print_exc()
self.childrenListLen = len(self.childrenList)
```
#### File: wizards/common/FileAccess.py
```python
import traceback
from os import sep as FileSeparator
'''
This class delivers static convenience methods
to use with ucb SimpleFileAccess service.
You can also instantiate the class, to encapsulate
some functionality of SimpleFileAccess. The instance
keeps a reference to an XSimpleFileAccess and an
XFileIdentifierConverter, saves the permanent
overhead of querying for those interfaces, and delivers
convenience methods for using them.
These Convenience methods include mainly Exception-handling.
'''
class FileAccess(object):
def __init__(self, xmsf):
#get the file identifier converter
self.filenameConverter = xmsf.createInstance(
"com.sun.star.ucb.FileContentProvider")
self.xInterface = xmsf.createInstance(
"com.sun.star.ucb.SimpleFileAccess")
@classmethod
def deleteLastSlashfromUrl(self, _sPath):
if _sPath.endswith("/"):
return _sPath[:-1]
else:
return _sPath
'''
Further information on arguments value see in OO Developer Guide,
chapter 6.2.7
@param xMSF
@param sPath
@param xSimpleFileAccess
@return the respective path of the office application.
A probable following "/" at the end is trimmed.
'''
@classmethod
def getOfficePath(self, xMSF, sPath, xSimpleFileAccess):
try:
ResultPath = ""
xInterface = xMSF.createInstance("com.sun.star.util.PathSettings")
ResultPath = str(getattr(xInterface, sPath))
ResultPath = self.deleteLastSlashfromUrl(ResultPath)
return ResultPath
except Exception:
traceback.print_exc()
return ""
@classmethod
def getFolderTitles(self, xMSF, FilterName, FolderName, resDict=None):
#Returns and ordered dict containing the template's name and path
locLayoutFiles = []
try:
xDocInterface = xMSF.createInstance(
"com.sun.star.document.DocumentProperties")
xInterface = xMSF.createInstance(
"com.sun.star.ucb.SimpleFileAccess")
nameList = xInterface.getFolderContents(FolderName, False)
if FilterName is None or FilterName == "":
FilterName = None
else:
FilterName += "-"
locLayoutDict = {}
for i in nameList:
fileName = self.getFilename(i)
if FilterName is None or fileName.startswith(FilterName):
xDocInterface.loadFromMedium(i, tuple())
if resDict is None:
title = xDocInterface.Title
else:
if xDocInterface.Title in resDict:
# localise string at runtime
title = resDict[xDocInterface.Title]
else:
title = xDocInterface.Title
locLayoutDict[title] = i
#sort the dictionary and create a list containing the
#keys list and the values list
keysList = sorted(locLayoutDict.keys())
valuesList= []
for i in keysList:
valuesList.append(locLayoutDict[i])
locLayoutFiles.append(keysList)
locLayoutFiles.append(valuesList)
except Exception:
traceback.print_exc()
return locLayoutFiles
@classmethod
def getTitle(self, xMSF, _sFile):
sTitle = ""
try:
xDocInterface = xMSF.createInstance(
"com.sun.star.document.DocumentProperties")
noArgs = []
xDocInterface.loadFromMedium(_sFile, noArgs)
sTitle = xDocInterface.getTitle()
except Exception:
traceback.print_exc()
return sTitle
def getPath(self, parentURL, childURL):
string = ""
if childURL is not None and childURL != "":
string = "/" + childURL
return self.filenameConverter.getSystemPathFromFileURL(
parentURL + string)
def copy(self, source, target):
try:
self.xInterface.copy(source, target)
return True
except Exception:
traceback.print_exc()
return False
def exists(self, filename, default):
try:
return self.xInterface.exists(filename)
except Exception:
traceback.print_exc()
return default
def delete(self, filename):
try:
self.xInterface.kill(filename)
return True
except Exception:
traceback.print_exc()
return False
# lists the files in a given directory
# @param dir
# @param includeFolders
# @return
def listFiles(self, folder, includeFolders):
try:
return self.xInterface.getFolderContents(folder, includeFolders)
except Exception:
traceback.print_exc()
return [""]
def getSize(self, url):
try:
return self.xInterface.getSize(url)
except Exception:
traceback.print_exc()
return -1
def getURL(self, parentURL, childPath):
if len(childPath) > 0 and childPath[0] == "/":
path = parentURL + childPath
else:
path = parentURL + "/" + childPath
return path
@classmethod
def getFilename(self, path, pathSeparator = "/"):
return path.split(pathSeparator)[-1]
'''
if the path points to file, gives the directory in which the file is.
'''
@classmethod
def getParentDir(self, url):
while url[-1] == "/":
url = url[:-1]
return url[:url.rfind("/")]
@classmethod
def connectURLs(self, urlFolder, urlFilename):
stringFolder = ""
stringFileName = urlFilename
if not urlFolder.endswith("/"):
stringFolder = "/"
if urlFilename.startswith("/"):
stringFileName = urlFilename[1:]
return urlFolder + stringFolder + stringFileName
# @param filename
# @return the extension of the given filename.
@classmethod
def getExtension(self, filename):
p = filename.find(".")
if (p == -1):
return ""
else:
while (True):
filename = filename[(p+1):]
p = filename.find(".")
if (p == -1):
break
return filename
@classmethod
def filename(self, name, ext, i):
return name + ("" if (i == 0) else str(i)) + ("" if (ext == "") else ("." + ext))
```
#### File: wizards/fax/CGFax.py
```python
from ..common.ConfigGroup import ConfigGroup
class CGFax(ConfigGroup):
def __init__(self):
self.cp_Style = int()
self.cp_PrintCompanyLogo = bool()
self.cp_PrintDate = bool()
self.cp_PrintSubjectLine = bool()
self.cp_PrintSalutation = bool()
self.cp_PrintCommunicationType = bool()
self.cp_PrintGreeting = bool()
self.cp_PrintFooter = bool()
self.cp_CommunicationType = str()
self.cp_Salutation = str()
self.cp_Greeting = str()
self.cp_SenderAddressType = int()
self.cp_SenderCompanyName = str()
self.cp_SenderStreet = str()
self.cp_SenderPostCode = str()
self.cp_SenderState = str()
self.cp_SenderCity = str()
self.cp_SenderFax = str()
self.cp_ReceiverAddressType = int()
self.cp_Footer = str()
self.cp_FooterOnlySecondPage = bool()
self.cp_FooterPageNumbers = bool()
self.cp_CreationType = int()
self.cp_TemplateName = str()
self.cp_TemplatePath = str()
```
#### File: ui/event/DataAware.py
```python
import traceback
import uno
from abc import ABCMeta, abstractmethod
from com.sun.star.util import Date
from com.sun.star.util import Time
from datetime import datetime
'''
DataAware objects are used to live-synchronize UI and DataModel/DataObject.
It is used as listener on UI events, to keep the DataObject up to date.
This class, as a base abstract class, sets a frame of functionality,
delegating the data Object get/set methods to a Value object,
and leaving the UI get/set methods abstract.
Note that event listening is *not* a part of this model.
the updateData() or updateUI() methods should be programmatically called.
in child classes, the updateData() will be bound to UI event calls.
<br><br>
This class holds references to a Data Object and a Value object.
The Value object "knows" how to get and set a value from the
Data Object.
'''
class DataAware(object):
__metaclass__ = ABCMeta
'''
creates a DataAware object for the given data object and Value object.
@param dataObject_
@param value_
'''
def __init__(self, dataObject_, field_):
self._dataObject = dataObject_
self._field = field_
'''
sets the given value to the UI control
@param newValue the value to set to the ui control.
'''
@abstractmethod
def setToUI (self,newValue):
pass
'''
gets the current value from the UI control.
@return the current value from the UI control.
'''
@abstractmethod
def getFromUI (self):
pass
'''
updates the UI control according to the
current state of the data object.
'''
def updateUI(self):
try:
data = getattr(self._dataObject, self._field)
except Exception:
data = uno.invoke(self._dataObject, "get" + self._field, ())
ui = self.getFromUI()
if data is not ui:
try:
self.setToUI(data)
except Exception:
traceback.print_exc()
'''
updates the DataObject according to
the current state of the UI control.
'''
def updateData(self):
useUno = False
try:
try:
data = getattr(self._dataObject, self._field)
except Exception:
useUno = True
data = uno.invoke(self._dataObject, "get" + self._field, ())
ui = self.getFromUI()
if data is not ui:
if isinstance(ui,Date):
d = datetime(ui.Year, ui.Month, ui.Day)
ui = d.strftime('%d/%m/%y')
elif isinstance(ui,Time):
t = datetime(1, 1, 1, ui.Hours, ui.Minutes)
ui = t.strftime('%H:%M')
if useUno:
uno.invoke(self._dataObject, "set" + self._field, (ui,))
else:
if isinstance(ui,tuple):
#Listbox Element
ui = ui[0]
setattr(self._dataObject, self._field, ui)
except Exception:
traceback.print_exc()
```
#### File: ui/event/ListModelBinder.py
```python
from abc import abstractmethod
from .ListDataListener import ListDataListener
class ListModelBinder(ListDataListener):
def __init__(self, unoListBox, listModel_):
self.unoList = unoListBox
self.unoListModel = unoListBox.Model
self.listModel = None
self.setListModel(listModel_)
self.renderer = self.Renderer()
def setListModel(self, newListModel):
if self.listModel is not None:
self.listModel.removeListDataListener(self)
self.listModel = newListModel
self.listModel.addListDataListener(self)
def update(self, i):
self.remove(i, i)
self.insert(i)
def remove(self, i1, i2):
self.unoList.removeItems(i1, i2 - i1 + 1)
def insert(self, i):
self.unoList.addItem(self.getItemString(i), i)
def getItemString(self, i):
return self.getItemString1(self.listModel.getElementAt(i))
def getItemString1(self, item):
return self.renderer.render(item)
def getSelectedItems(self):
return self.unoListModel.SelectedItems
class Renderer:
@abstractmethod
def render(self, item):
if (item is None):
return ""
elif (isinstance(item, int)):
return str(item)
else:
return item.toString()
```
#### File: ui/event/RadioDataAware.py
```python
from .CommonListener import ItemListenerProcAdapter
from .DataAware import DataAware
class RadioDataAware(DataAware):
def __init__(self, data, value, radioButtons):
super(RadioDataAware,self).__init__(data, value)
self.radioButtons = radioButtons
def setToUI(self, value):
selected = int(value)
if selected == -1:
for i in self.radioButtons:
i.State = False
else:
self.radioButtons[selected].State = True
def getFromUI(self):
for index, workwith in enumerate(self.radioButtons):
if workwith.State:
return index
return -1
@classmethod
def attachRadioButtons(self, data, prop, buttons, field):
da = RadioDataAware(data, prop, buttons)
method = getattr(da,"updateData")
for i in da.radioButtons:
i.addItemListener(ItemListenerProcAdapter(method))
return da
```
|
{
"source": "jerrykuo7727/QA-FGC-embeddings",
"score": 3
}
|
#### File: scripts/.ipynb_checkpoints/custom_bert-checkpoint.py
```python
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.modeling_bert import BertPreTrainedModel, BertLayerNorm, BertEncoder, BertPooler
class CustomBertEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# ---- Custom embeddings ----
self.datedur_embeddings = nn.Embedding(2, config.hidden_size)
self.num_embeddings = nn.Embedding(2, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, \
datedur_mask=None, num_mask=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
# ---- Custom embeddings ----
if datedur_mask is None:
datedur_mask = torch.zeros(input_shape, dtype=torch.long, device=device)
if num_mask is None:
num_mask = torch.zeros(input_shape, dtype=torch.long, device=device)
datedur_embeddings = self.datedur_embeddings(datedur_mask)
num_embeddings = self.num_embeddings(num_mask)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings + \
datedur_embeddings + num_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class CustomBertModel(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = CustomBertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
datedur_mask=None,
num_mask=None
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
causal_mask = causal_mask.to(
torch.long
) # not converting to long will cause errors with pytorch version < 1.3
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
elif encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for encoder_hidden_shape (shape {}) or encoder_attention_mask (shape {})".format(
encoder_hidden_shape, encoder_attention_mask.shape
)
)
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, \
datedur_mask=datedur_mask, num_mask=num_mask
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class CustomBertForQuestionAnswering(BertPreTrainedModel):
def __init__(self, config):
super(CustomBertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = CustomBertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
datedur_mask=None,
num_mask=None
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
datedur_mask=datedur_mask,
num_mask=num_mask
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
```
|
{
"source": "jerrykuo7727/QA-FGC-finetune-datedur",
"score": 2
}
|
#### File: QA-FGC-finetune-datedur/scripts/prepare_bert_data.py
```python
import re
import sys
import json
from os.path import join, exists
from transformers import BertTokenizer
def tokenize_no_unk(tokenizer, text):
split_tokens = []
for token in tokenizer.basic_tokenizer.tokenize(text, never_split=tokenizer.all_special_tokens):
wp_tokens = tokenizer.wordpiece_tokenizer.tokenize(token)
if wp_tokens == [tokenizer.unk_token]:
split_tokens.append(token)
else:
split_tokens.extend(wp_tokens)
return split_tokens
def find_sublist(a, b, order=-1):
if not b:
return -1
counter = 0
for i in range(len(a)-len(b)+1):
if a[i:i+len(b)] == b:
counter += 1
if counter > order:
return i
return -1
if __name__ == '__main__':
if len(sys.argv) < 4:
print('Usage: python3 prepare_bert_data.py <pretrained_model> <split> <dataset_1> <dataset_2> ... <dataset_n>')
exit(1)
model_path = sys.argv[1]
split = sys.argv[2]
datasets = sys.argv[3:]
tokenizer = BertTokenizer.from_pretrained(model_path)
for dataset in datasets:
data = json.load(open('dataset/%s.json' % dataset))
passage_count = len(data)
impossible_questions = 0
for i, PQA in enumerate(data, start=1):
# Passage
raw_passage = PQA['DTEXT'].strip()
passage = tokenizer.tokenize(raw_passage)
passage_no_unk = tokenize_no_unk(tokenizer, raw_passage)
PID = PQA['DID']
# QA pairs
QAs = []
for QA in PQA['QUESTIONS']:
if split == 'train':
if QA['AMODE'] != 'Single-Span-Extraction' and \
'Single-Span-Extraction' not in QA['AMODE'] or \
'ANSWER' not in QA:
continue
else:
if QA['AMODE'] != 'Date-Duration' and \
'Date-Duration' not in QA['AMODE'] or \
'ANSWER' not in QA:
continue
processed_QA = {}
raw_question = QA['QTEXT'].strip()
question = tokenizer.tokenize(raw_question)
question_no_unk = tokenize_no_unk(tokenizer, raw_question)
raw_answers = [A['ATEXT'].strip() for A in QA['ANSWER']]
raw_answer_start = QA['ANSWER'][0]['ATOKEN'][0]['start']
found_answer_starts = [m.start() for m in re.finditer(raw_answers[0], raw_passage)]
answer_order, best_dist = -1, 10000
for order, found_start in enumerate(found_answer_starts):
dist = abs(found_start - raw_answer_start)
if dist < best_dist:
best_dist = dist
answer_order = order
answer_no_unk = tokenize_no_unk(tokenizer, raw_answers[0])
answer_start = find_sublist(passage_no_unk, answer_no_unk, order=answer_order)
answer_end = answer_start + len(answer_no_unk) - 1 if answer_start >= 0 else -1
if answer_start < 0:
impossible_questions += 1
if split != 'train':
processed_QA['question'] = raw_question
processed_QA['question_no_unk'] = raw_question
processed_QA['answer'] = raw_answers
processed_QA['answer_start'] = -1
processed_QA['answer_end'] = -1
processed_QA['id'] = QA['QID']
QAs.append(processed_QA)
elif answer_start >= 0:
processed_QA['question'] = question
processed_QA['question_no_unk'] = question_no_unk
processed_QA['answer'] = raw_answers
processed_QA['answer_start'] = answer_start
processed_QA['answer_end'] = answer_end
processed_QA['id'] = QA['QID']
QAs.append(processed_QA)
# Save processed data
with open('data/%s/passage/%s|%s' % (split, dataset, PID), 'w') as f:
if split == 'train':
assert passage == ' '.join(passage).split(' ')
f.write(' '.join(passage))
else:
f.write(raw_passage)
with open('data/%s/passage_no_unk/%s|%s' % (split, dataset, PID), 'w') as f:
if split == 'train':
assert passage_no_unk == ' '.join(passage_no_unk).split(' ')
f.write(' '.join(passage_no_unk))
else:
f.write(raw_passage)
for QA in QAs:
question = QA['question']
question_no_unk = QA['question_no_unk']
answers = QA['answer']
answer_start = QA['answer_start']
answer_end = QA['answer_end']
QID = QA['id']
with open('data/%s/question/%s|%s|%s' % (split, dataset, PID, QID), 'w') as f:
if split == 'train':
assert question == ' '.join(question).split(' ')
f.write(' '.join(question))
else:
f.write(question)
with open('data/%s/question_no_unk/%s|%s|%s' % (split, dataset, PID, QID), 'w') as f:
if split == 'train':
assert question_no_unk == ' '.join(question_no_unk).split(' ')
f.write(' '.join(question_no_unk))
else:
f.write(question_no_unk)
with open('data/%s/answer/%s|%s|%s' % (split, dataset, PID, QID), 'w') as f:
for answer in answers:
f.write('%s\n' % answer)
with open('data/%s/span/%s|%s|%s' % (split, dataset, PID, QID), 'w') as f:
f.write('%d %d' % (answer_start, answer_end))
print('%s: %d/%d (%.2f%%) \r' % (dataset, i, passage_count, 100*i/passage_count), end='')
print('\nimpossible_questions: %d' % impossible_questions)
exit(0)
```
#### File: QA-FGC-finetune-datedur/scripts/train_bert.py
```python
import sys
import numpy as np
from os.path import join
from copy import deepcopy
import torch
from torch.nn.functional import softmax
from torch.nn.utils import clip_grad_norm_
from transformers import BertTokenizer, BertForQuestionAnswering
from utils import AdamW
from data import get_dataloader
from evaluate import f1_score, exact_match_score, metric_max_over_ground_truths
np.random.seed(42)
torch.manual_seed(42)
norm_tokenizer = BertTokenizer.from_pretrained('/home/M10815022/Models/bert-wwm-ext')
def validate_dataset(model, split, tokenizer, topk=1, prefix=None):
assert split in ('dev', 'test')
fwd_dataloader = get_dataloader('bert', split, tokenizer, bwd=False, \
batch_size=16, num_workers=16, prefix=prefix)
bwd_dataloader = get_dataloader('bert', split, tokenizer, bwd=True, \
batch_size=16, num_workers=16, prefix=prefix)
em, f1, count = 0, 0, 0
model.eval()
for fwd_batch, bwd_batch in zip(fwd_dataloader, bwd_dataloader):
# FWD
input_ids, attention_mask, token_type_ids, margin_mask, fwd_input_tokens_no_unks, answers = fwd_batch
input_ids = input_ids.cuda(device=device)
attention_mask = attention_mask.cuda(device=device)
token_type_ids = token_type_ids.cuda(device=device)
margin_mask = margin_mask.cuda(device=device)
with torch.no_grad():
outputs = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
start_logits, end_logits = outputs[0], outputs[1]
start_logits += margin_mask
end_logits += margin_mask
start_logits = start_logits.cpu().clone()
fwd_end_logits = end_logits.cpu().clone()
start_probs = softmax(start_logits, dim=1)
fwd_start_probs, fwd_start_index = start_probs.topk(topk*5, dim=1)
# BWD
input_ids, attention_mask, token_type_ids, margin_mask, bwd_input_tokens_no_unks, answers = bwd_batch
input_ids = input_ids.cuda(device=device)
attention_mask = attention_mask.cuda(device=device)
token_type_ids = token_type_ids.cuda(device=device)
margin_mask = margin_mask.cuda(device=device)
with torch.no_grad():
outputs = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
start_logits, end_logits = outputs[0], outputs[1]
start_logits += margin_mask
end_logits += margin_mask
start_logits = start_logits.cpu().clone()
bwd_end_logits = end_logits.cpu().clone()
start_probs = softmax(start_logits, dim=1)
bwd_start_probs, bwd_start_index = start_probs.topk(topk*5, dim=1)
# FWD-BWD
for i, answer in enumerate(answers):
preds, probs = [], []
for n in range(topk):
# FWD
start_ind = fwd_start_index[i][n].item()
beam_end_logits = fwd_end_logits[i].clone().unsqueeze(0)
end_probs = softmax(beam_end_logits, dim=1)
end_probs[0, :start_ind] += -1e10
end_probs[0, start_ind+20:] += -1e10
end_probs, end_index = end_probs.topk(1, dim=1)
end_ind = end_index[0][0]
prob = (fwd_start_probs[i][n] * end_probs[0][0]).item()
span_tokens = fwd_input_tokens_no_unks[i][start_ind:end_ind+1]
pred = ''.join(tokenizer.convert_tokens_to_string(span_tokens).split())
if pred == tokenizer.sep_token or pred == '':
pass
elif pred and pred not in preds:
probs.append(prob)
preds.append(pred)
elif pred and pred in preds:
pred_idx = preds.index(pred)
if prob > probs[pred_idx]:
probs[pred_idx] = prob
#probs[preds.index(pred)] += prob
else:
pass
# BWD
start_ind = bwd_start_index[i][n].item()
beam_end_logits = bwd_end_logits[i].clone().unsqueeze(0)
end_probs = softmax(beam_end_logits, dim=1)
end_probs[0, :start_ind] += -1e10
end_probs[0, start_ind+20:] += -1e10
end_probs, end_index = end_probs.topk(1, dim=1)
end_ind = end_index[0][0]
prob = (bwd_start_probs[i][n] * end_probs[0][0]).item()
span_tokens = bwd_input_tokens_no_unks[i][start_ind:end_ind+1]
pred = ''.join(tokenizer.convert_tokens_to_string(span_tokens).split())
if pred == tokenizer.sep_token or pred == '':
pass
elif pred and pred not in preds:
probs.append(prob)
preds.append(pred)
elif pred and pred in preds:
pred_idx = pred.index(pred)
if prob > probs[pred_idx]:
probs[pred_idx] = prob
#probs[preds.index(pred)] += prob
else:
pass
count += 1
if len(preds) > 0:
sorted_probs_preds = list(reversed(sorted(zip(probs, preds))))
probs, preds = map(list, zip(*sorted_probs_preds))
probs, preds = probs[:topk], preds[:topk]
norm_preds_tokens = [norm_tokenizer.basic_tokenizer.tokenize(pred) for pred in preds]
norm_preds = [norm_tokenizer.convert_tokens_to_string(norm_pred_tokens) for norm_pred_tokens in norm_preds_tokens]
norm_answer_tokens = [norm_tokenizer.basic_tokenizer.tokenize(ans) for ans in answer]
norm_answer = [norm_tokenizer.convert_tokens_to_string(ans_tokens) for ans_tokens in norm_answer_tokens]
em += max(metric_max_over_ground_truths(exact_match_score, norm_pred, norm_answer) for norm_pred in norm_preds)
f1 += max(metric_max_over_ground_truths(f1_score, norm_pred, norm_answer) for norm_pred in norm_preds)
del fwd_dataloader, bwd_dataloader
return em, f1, count
def validate(model, tokenizer, topk=1, prefix=None):
if prefix:
print('---- Validation results on %s dataset ----' % prefix)
# Valid set
val_em, val_f1, val_count = validate_dataset(model, 'dev', tokenizer, topk, prefix)
val_avg_em = 100 * val_em / val_count
val_avg_f1 = 100 * val_f1 / val_count
# Test set
test_em, test_f1, test_count = validate_dataset(model, 'test', tokenizer, topk, prefix)
test_avg_em = 100 * test_em / test_count
test_avg_f1 = 100 * test_f1 / test_count
print('%d-best | val_em=%.5f, val_f1=%.5f | test_em=%.5f, test_f1=%.5f' \
% (topk, val_avg_em, val_avg_f1, test_avg_em, test_avg_f1))
return val_avg_f1
if __name__ == '__main__':
if len(sys.argv) != 4:
print('Usage: python3 train_bert.py cuda:<n> <model_path> <save_path>')
exit(1)
# Config
lr = 3e-5
batch_size = 4
accumulate_batch_size = 64
assert accumulate_batch_size % batch_size == 0
update_stepsize = accumulate_batch_size // batch_size
model_path = sys.argv[2]
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertForQuestionAnswering.from_pretrained(model_path)
device = torch.device(sys.argv[1])
model.to(device)
optimizer = AdamW(model.parameters(), lr=lr)
optimizer.zero_grad()
step = 0
patience, best_val = 0, 0
best_state_dict = model.state_dict()
dataloader = get_dataloader('bert', 'train', tokenizer, batch_size=batch_size, num_workers=16)
n_step_per_epoch = len(dataloader)
n_step_per_validation = n_step_per_epoch // 5
print('%d steps per epoch.' % n_step_per_epoch)
print('%d steps per validation.' % n_step_per_validation)
print('Start training...')
while True:
for batch in dataloader:
input_ids, attention_mask, token_type_ids, start_positions, end_positions = batch
input_ids = input_ids.cuda(device=device)
attention_mask = attention_mask.cuda(device=device)
token_type_ids = token_type_ids.cuda(device=device)
start_positions = start_positions.cuda(device=device)
end_positions = end_positions.cuda(device=device)
model.train()
loss = model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, \
start_positions=start_positions, end_positions=end_positions)[0]
loss.backward()
step += 1
print('step %d | Training...\r' % step, end='')
if step % update_stepsize == 0:
optimizer.step()
optimizer.zero_grad()
if step % n_step_per_validation == 0:
print("step %d | Validating..." % step)
val_f1 = validate(model, tokenizer, topk=1)
if val_f1 > best_val:
patience = 0
best_val = val_f1
best_state_dict = deepcopy(model.state_dict())
else:
patience += 1
if patience >= 10 or step >= 200000:
print('Finish training. Scoring 1-5 best results...')
save_path = join(sys.argv[3], 'finetune.ckpt')
torch.save(best_state_dict, save_path)
model.load_state_dict(best_state_dict)
for k in range(1, 6):
validate(model, tokenizer, topk=k)
print('Scoring 1-best for all test splits...')
for prefix in ('DRCD', 'Kaggle', 'Lee', 'FGC'):
validate(model, tokenizer, topk=1, prefix=prefix)
del model, dataloader
exit(0)
```
|
{
"source": "jerrylai19990120/Snake-Game",
"score": 4
}
|
#### File: jerrylai19990120/Snake-Game/game.py
```python
from game_environment import GameEnvironment
import pygame
class Game:
"""
Class to represent a single game.
instance variables:
int level = The difficulty level of the game. This is obtained from the
food in the environment
int score = The score the user has scored.
environment = The game environment object in which this game is player
"""
def __init__(self):
self.score = 0
self.environment = GameEnvironment()
self.level = self.environment.food.level
def set_level(self, level):
"""
Sets the initial difficulty level of the game. Changes the food levels
in the environment accordingly and changes the fps of the environment.
:param level: The initial level of the game.
:return: None
>>> game_object = Game()
>>> game_object.set_level(3)
>>> print(game_object.level)
3
>>> game_object.set_level(4)
>>> print(game_object.level)
4
"""
self.level = level
self.environment.food.level = self.level
self.environment.fps += 5
def run_game(self):
"""
Calls the events method on this game's environment and then calls
end_game_display when the game ends
:return: None
"""
self.environment.events()
if self.environment.status == 0:
pygame.quit()
self.end_game_display()
def end_game_display(self):
"""
This method creates and displays a pygame window with the appropriate
message displayed
"""
pygame.init()
end_screen = True
while end_screen:
red_font = (255, 0, 0)
white_font = (255, 255, 255)
screen = pygame.display
surface = screen.set_mode((800, 600))
pygame.display.set_caption('Sorry! You Lost.')
# Creates text for end screen, when snake dies
large_text = pygame.font.Font('freesansbold.ttf', 90)
text_surf = large_text.render('Sorry! You Died.', True, red_font)
text_rect = text_surf.get_rect()
text_rect.center = (400, 300)
surface.blit(text_surf, text_rect)
# Creates two rectangles, which will act as buttons
pygame.draw.rect(surface, red_font, (150, 450, 100, 50))
pygame.draw.rect(surface, red_font, (550, 450, 100, 50))
# Creates the text for the restart button
small_text = pygame.font.Font('freesansbold.ttf', 25)
button_restart_text = small_text.render('Restart', True, white_font)
button_restart_rect = text_surf.get_rect()
button_restart_rect.center = (504, 510)
surface.blit(button_restart_text, button_restart_rect)
# Creates text for te quit button
button_quit_text = small_text.render('Quit', True, white_font)
button_quit_rect = text_surf.get_rect()
button_quit_rect.center = (920, 510)
surface.blit(button_quit_text, button_quit_rect)
screen.flip()
mouse_pos = pygame.mouse.get_pos()
# If the mouse positions are in the area of the restart or quit buttons, carry out the appropriate action
if 250 > mouse_pos[0] > 150 and 500 > mouse_pos[1] > 450:
pygame.quit()
end_screen = False
new_game = Game()
new_game.run_game()
if 550 + 100 > mouse_pos[0] > 550 and 450 + 50 > mouse_pos[1] > 450:
end_screen = False
pygame.quit()
def get_score(game_object):
"""
Returns the score scored in given game. The score is calculated using the
level and amount eaten.
:param game_object: Game object
:return: int
>>> game = Game()
>>> print(game.score)
0
>>> get_score(game)
0
"""
game_object.score = game_object.level * game_object.environment.food.eaten
return game_object.score
def get_final_score(game_object):
"""
Implementation yet to decided.
:param game_object:
:return:
"""
if __name__ == '__main__':
game = Game()
game.run_game()
```
|
{
"source": "JerryLead/SparkFaultBench",
"score": 2
}
|
#### File: lcr/run/run_test.py
```python
import os
classdicts = {'Scan':'sql.standard.Scan', 'Join':'sql.standard.Join', 'Aggregate':'sql.standard.Aggregate', 'Mix':'sql.standard.Mix'}
jardicts = {'Scan':'ScanSQL', 'Join':'JoinSQL', 'Aggregate':'AggregateSQL', 'Mix':'MixSQL'}
params = {}
dirpath = os.getcwd()
f = open("%s/config.txt" %dirpath,"r")
lines = f.readlines()
f.close()
for line in lines:
line = line.strip()
if len(line)>1:
if not line.strip()[0] == '#':
pas = line.split("=")
params[pas[0].strip()] = pas[1].strip()
dfs_path = params["HDFS_PATH"]
rankings_file = params["RANKINGS_FILE"]
rankings_skewed_file = params["RANKINGS_SKEWED_FILE"]
uservisit_file = params["USERVISITS_FILE"]
uservisit_skewed_file = params["USERVISITS_SKEWED_FILE"]
runtype = params["RUN_TYPE"]
print (runtype)
def runspark(appname, taskname, f1, f2):
classname = classdicts[taskname]
testname = jardicts[taskname]
print ("start %s\n" % appname)
os.system("${SPARK_HOME}/bin/spark-submit "
"--master yarn "
"--deploy-mode cluster "
"--queue default "
"--class %s %s/%s.jar %s %s %s"
%(classname,dirpath,"SparkFaultBench", dfs_path, f1, f2))
print ("finish %s\n" % appname)
f = open("%s/testlist.txt" % dirpath,"r")
lines = f.readlines()
f.close()
for line in lines:
if len(line) <= 1:
continue
line = line.strip()
if (runtype == 'ALL'):
runspark(line, line, rankings_file, uservisit_file)
runspark('skewed '+line, line, rankings_skewed_file, uservisit_skewed_file)
elif runtype == 'NORMAL':
runspark(line, line, rankings_file, uservisit_file)
elif runtype == 'SKEWED':
runspark('skewed '+line, line, rankings_skewed_file, uservisit_skewed_file)
else:
print ("wrong run type, stop test!")
break
```
#### File: lcr/scripts/mod_config.py
```python
import configparser
import os
def getConfigBySection(section,filename):
config = configparser.ConfigParser()
path = os.path.split(os.path.realpath(__file__))[0] + '/'+filename+'.txt'
config.read(path)
return config.items(section)
```
|
{
"source": "jerry-le/computer-vision",
"score": 3
}
|
#### File: src/geometrix/flip.py
```python
import cv2
import numpy as np
from __utils__.general import show_image
def flip(img, axis=1):
"""
Flip image by vertical or horizontal
:param img_path:
:param axis: 1 is vertical, 0 is horizontal
:return:
"""
if axis != 1 and axis != 0:
raise Exception('Axis must be 1 for vertical flip, or 0 for horizontal flip')
return cv2.flip(img, axis)
if __name__ == '__main__':
image = cv2.imread('../../asserts/images/stop_sign.jpg')
out_image = '../../asserts/images/stop_sign_flip.jpg'
flipped_img = flip(image)
res_img = np.hstack((image, flipped_img))
show_image(res_img)
```
#### File: src/grayscaling/power_law.py
```python
import cv2
import numpy as np
from __utils__.general import show_image
def power_law(image, c=1, gamma=1):
out = image.copy()
for pixel in np.nditer(out, op_flags=['readwrite']):
pixel[...] = c * np.power(pixel, gamma)
return out
if __name__ == '__main__':
image = cv2.imread('../../asserts/images/elena.jpg', 0)
res = np.hstack((image, power_law(image, c=1, gamma=1.1)))
show_image(res)
```
#### File: src/segmentation/canny.py
```python
from segmentation.sobel import SobelDetectionEdge
from __utils__.general import pickle_load_object, show_image
from scipy import ndimage
import numpy as np
import scipy
import math
class CannyEdgeDetection:
def __init__(self, img, gradient_x=None, gradient_y=None, gradient=None):
self.img = img
self.gradient_magnitude_of_x = gradient_x
self.gradient_magnitude_of_y = gradient_y
self.gradient_magnitude = gradient
self.theta = None
if self.gradient_magnitude is None:
self.compute_gradient()
self.compute_theta()
def compute_gradient(self):
sobel = SobelDetectionEdge(img=self.img)
self.gradient_magnitude_of_x = sobel.get_gradient_of_x()
self.gradient_magnitude_of_y = sobel.get_gradient_of_y()
self.gradient_magnitude = sobel.get_gradient_magnitude()
def compute_theta(self):
try:
# self.theta = self.gradient_magnitude_of_y / self.gradient_magnitude_of_x
# for pixel in np.nditer(self.theta, op_flags=['readwrite']):
# if math.isnan(pixel):
# pixel[...] = 0
# if math.isinf(pixel):
# pixel[...] = 10
self.theta = np.arctan2(self.gradient_magnitude_of_y, self.gradient_magnitude_of_x)
except Exception as e:
pass
def canny(self):
self.non_maximal_suppression(self.gradient_magnitude, self.theta)
pass
@staticmethod
def non_maximal_suppression(G, theta):
"""
Performs non-maximal-suppression of gradients.
Bins into 4 directions (up/down, left/right, both diagonals),
and sets non-maximal elements in a 3x3 neighborhood to zero.
Args:
G: A (height, width) float numpy array of gradient magnitudes.
theta: A (height, width) float numpy array of gradient directions.
Returns:
suppressed: A (height, width) float numpy array of suppressed
gradient magnitudes.
"""
theta *= 180.0 / np.pi
theta[theta > 180.0] -= 180.0
hits = np.zeros_like(G, dtype=bool)
correlate = ndimage.correlate
correlate1d = ndimage.correlate1d
convolve = ndimage.convolve
convolve1d = ndimage.convolve1d
kernel = np.array([0.0, 1.0, -1.0])
mask = np.logical_or(theta < 22.5, theta > 157.5)
hits[mask] = np.logical_and(correlate1d(G, kernel, axis=-1)[mask] >= 0.0,
convolve1d(G, kernel, axis=-1)[mask] >= 0.0)
mask = np.logical_and(theta >= 67.5, theta < 112.5)
hits[mask] = np.logical_and(correlate1d(G, kernel, axis=0)[mask] >= 0.0,
convolve1d(G, kernel, axis=0)[mask] >= 0.0)
kernel = np.array([[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0]])
mask = np.logical_and(theta >= 22.5, theta < 67.5)
hits[mask] = np.logical_and(correlate(G, kernel)[mask] >= 0.0,
convolve(G, kernel)[mask] >= 0.0)
kernel = np.array([[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0]])
mask = np.logical_and(theta >= 112.5, theta < 157.5)
hits[mask] = np.logical_and(correlate(G, kernel)[mask] >= 0.0,
convolve(G, kernel)[mask] >= 0.0)
suppressed = G.copy()
suppressed[np.logical_not(hits)] = 0.0
return suppressed
@staticmethod
def double_thresholding(img, high, low):
out = np.copy(img)
height, width = img.shape
# if a pixel value greater than high threshold, it is strong edge
strong_edges = (out > high)
# strong edges is 2, weak edges is 1, non-edge is zero
threshold_edges = np.array(strong_edges.astype(np.uint8)) + (out > low)
for r in range(0, height - 1):
for c in range(0, width - 1):
if threshold_edges[r][c] != 1:
continue # not the weak edge
# patch 3x3 surrounding current pixel
local_patch = threshold_edges[r - 1: r + 2, c - 1: c + 2]
patch_max = np.max(local_patch)
if patch_max == 2:
threshold_edges[r][c] = 2
else:
threshold_edges[r][c] = 0
# fit image dtype
max_value = np.iinfo(threshold_edges.dtype).max
threshold_edges[threshold_edges > 0] = max_value
return threshold_edges
```
#### File: src/segmentation/sobel.py
```python
import numpy as np
class SobelDetectionEdge:
def __init__(self, img, Sx=None, Sy=None, threshold=None):
self.img = img
self.Sx = Sx
self.Sy = Sy
self.threshold = threshold
self.Gx = None
self.Gy = None
self.G = None
if threshold is None:
self.threshold = 100
if Sx is None:
self.Sx = [[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]]
if Sy is None:
self.Sy = [[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]]
def get_gradient_of_x(self):
"""
Calculate the gradient of x by convolution image with Sx
:return: Matrix of float
"""
if self.Gx is None:
self.set_gradient_of_x()
return self.Gx
def get_gradient_of_y(self):
"""
Calculate the gradient of y by convolution image with Sy
:return: Matrix of float
"""
if self.Gy is None:
self.set_gradient_of_y()
return self.Gy
def get_gradient_magnitude(self):
"""
Check if gradient of x and y exist then set the magnitude
:return:
"""
if self.Gx is not None and self.Gy is not None:
self.set_gradient_magnitude()
else:
self.set_gradient_of_x()
self.set_gradient_of_y()
self.set_gradient_magnitude()
return self.G
def get_gradient_magnitude_after_thresholding(self):
return self.thresholding(self.img, self.threshold)
def set_gradient_of_x(self):
self.Gx = self.calculate_convolution(self.img, self.Sx)
def set_gradient_of_y(self):
self.Gy = self.calculate_convolution(self.img, self.Sy)
def set_gradient_magnitude(self):
self.G = np.sqrt(np.square(self.Gx) + np.square(self.Gy))
def sobel(self):
self.set_gradient_of_x()
self.set_gradient_of_y()
self.set_gradient_magnitude()
return self.thresholding(image=self.G, threshold=self.threshold)
@staticmethod
def calculate_convolution(image, mask):
height, width = image.shape
k = int((len(mask) - 1) / 2)
# initialize the result
out = np.zeros((height, width))
# rendering
for i in range(k, height - k):
for j in range(k, width - k):
sum = 0
for m in range(-k, k + 1):
for n in range(-k, k + 1):
sum = sum + image[i + m][j + n] * mask[k + m][k + n]
out[i][j] = sum
return out
@staticmethod
def thresholding(image, threshold):
out = np.copy(image)
for pixel in np.nditer(out, op_flags=['readwrite']):
if pixel > threshold:
pixel[...] = 255
else:
pixel[...] = 0
out = out.astype(np.uint8)
return out
```
#### File: segmentation/thresholding/basic_global_thresholding.py
```python
import cv2
import numpy as np
class BasicGlobalThreshold:
def __init__(self, img=None, threshold=None):
self.img = img
self.threshold = threshold
def set_threshold(self, estimate_threshold):
self.threshold = estimate_threshold
def get_threshold(self):
if self.img is None or self.threshold is None:
raise Exception('BasicGlobalThreshold requires image and threshold as the input')
self.coverage_threshold(self.img, self.threshold)
return self.threshold
@staticmethod
def coverage_threshold(img, estimate_threshold):
threshold = 0
while threshold != estimate_threshold:
threshold = estimate_threshold
lt_threshold = img[img < threshold]
gte_threshold = img[img >= threshold]
estimate_threshold = (sum(lt_threshold) / len(lt_threshold) + sum(gte_threshold) / len(gte_threshold)) / 2
return threshold
```
#### File: computer-vision/test/test_basic_arithmetics.py
```python
import cv2
import numpy as np
from unittest import TestCase
from arithmetics import basic_arithmetics as ba
class TestBasicArithmetic(TestCase):
def setUp(self):
self.image_path = '../asserts/images/elena.jpg'
def test_add_gray_success(self):
gray = cv2.imread(self.image_path, 0)
gray_plus_10 = ba.add(gray, 10)
self.assertEqual(gray_plus_10.shape, gray.shape)
self.assertTrue(np.average(gray_plus_10) > np.average(gray))
def test_add_gray_with_color_input(self):
img = cv2.imread(self.image_path)
try:
gray_plus_10 = ba.add(img, 10)
except Exception as e:
self.assertEqual(str(e), 'Image input must be gray')
def test_subtract_gray_success(self):
gray = cv2.imread(self.image_path, 0)
gray_subtract_10 = ba.subtract(gray, 10)
self.assertEqual(gray_subtract_10.shape, gray.shape)
self.assertTrue(np.average(gray_subtract_10) < np.average(gray))
def test_multiple_gray_success(self):
gray = cv2.imread(self.image_path, 0)
gray_time_2 = ba.multiple(gray, 2)
self.assertEqual(gray_time_2.shape, gray.shape)
self.assertTrue(np.average(gray) < np.average(gray_time_2))
def test_subtract_2_images_success(self):
image_path1 = '../asserts/images/right.jpg'
image_path2 = '../asserts/images/right_2.jpg'
gray1 = cv2.imread(image_path1, 0)
gray2 = cv2.imread(image_path2, 0)
gray_diff = ba.subtract2images(gray1, gray2)
self.assertTrue(gray_diff.shape, gray1.shape)
def test_subtract_2_images_with_different_size(self):
image_path1 = '../asserts/images/elena.jpg'
image_path2 = '../asserts/images/right.jpg'
gray1 = cv2.imread(image_path1, 0)
gray2 = cv2.imread(image_path2, 0)
try:
gray_diff = ba.subtract2images(gray1, gray2)
except Exception as e:
self.assertEqual(str(e), 'Images must be the same size')
```
#### File: computer-vision/test/test_harris_corner_detection.py
```python
import cv2
from unittest import TestCase
from src.feature_detection.harris_corner_detection import find_harris_conrners
from src.geometrix.scaling import scaling
from src.__utils__.general import pickle_load_object, show_image
# Author: KhanhLQ
class TestHarrisCornerDetection(TestCase):
def test_harris(self):
img = cv2.imread("../asserts/images/zigzac.jpg")
dst = find_harris_conrners(image=img, block_size=5, ksize=5, k=0.03)
show_image(dst)
def test_harris_rotate_30_degrees(self):
image = cv2.imread("../asserts/images/zigzac.jpg", 0)
rows, cols = image.shape
M = cv2.getRotationMatrix2D((rows/2, cols/2), 30, 1)
dst = cv2.warpAffine(image, M, (cols, rows))
out = find_harris_conrners(image=dst, block_size=5, ksize=5, k=0.03)
show_image(out)
def test_harris_rotate_30_degrees_and_zoom_out_50_percent(self):
image = cv2.imread("../asserts/images/zigzac_zoomin.jpg", 0)
scaled_image = scaling(image, scale=2)
rows, cols = scaled_image.shape
M = cv2.getRotationMatrix2D((rows/2, cols/2), 30, 1)
dst = cv2.warpAffine(scaled_image, M, (cols, rows))
out = find_harris_conrners(image=dst, block_size=5, ksize=5, k=0.03)
show_image(out)
```
#### File: computer-vision/test/test_histogramEqualization.py
```python
from unittest import TestCase
from src.histogram.histogram_equalization import HistogramEqualization
from src.__utils__.general import show_image
import cv2
import numpy as np
# Author: KhanhLQ
class TestHistogramEqualization(TestCase):
def test_compute_histogram(self):
img = cv2.imread("../asserts/images/elena.jpg", 0)
histogram = HistogramEqualization(img=img)
histogram.compute_histogram()
histogram.compute_histogram_cumsum()
histogram.compute_possibility_of_occurrence()
histogram.generate_look_up_table()
histogram.mapping()
out = histogram.get_result()
show_image(np.hstack((img, out)))
self.assertTrue(False)
def test_get_possibility_of_occurrence(self):
img = cv2.imread("../asserts/images/elena.jpg")
histogram = HistogramEqualization(img=img)
histogram.get_possibility_of_occurrence()
```
|
{
"source": "jerrylee1230/SnatchCar",
"score": 3
}
|
#### File: SnatchCar/spiders/bookkBBDC.py
```python
from datetime import datetime
#import sys
import scrapy
import smtplib
from scrapy.mail import MailSender
import smtplib
def send_notification(body):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login("email.com", "password")
#print(subject)
print(body)
server.sendmail("<EMAIL>", "<EMAIL>", body)
server.quit()
#send_notification("testing")"""
########## Configuration begins here ##########
# Self explanatory. The username you use to log into bbdc.sg
username = 'S9712989D'
# The password you use to log into bbdc.sg
pin = '210497'
# How many days ahead to book
daysCapWeekday = 12
daysCapWeekend = 24
# Which session numbers (1-8) to book
sessionsToBookWeekdays = ['2', '3', '4','5']
sessionsToBookWeekends = []
"""
1(07:30 – 09:10)
2(09:20 – 11:00)
3(11:30 – 13:10)
4(13:20 – 15:00)
5(15:20 – 17:00)
6(17:10 – 18:50)
7(19:20 – 21:00)
8(21:10 – 22:50)
"""
# Which days to book.
# Note that day #1 is Sunday, #2 is Monday, ..., and #7 is Saturday
weekdays = ['5', '6']
weekends = []
########### Configuration ends here ###########
# Note that this bookedSlots system is not ideal,
# the system won't rebook slots that it has tried
# to book but failed (e.g. someone else has got it
# but the same slot was cancelled again)
bookedSlots = []
checkWeekday = False
class Book(scrapy.Spider):
name = "bookBBDC"
start_urls = ['https://www.bbdc.sg/bbdc/bbdc_web/newheader.asp']
custom_settings = {
'DOWNLOADER_CLIENT_TLS_METHOD': 'TLSv1.0',
'DUPEFILTER_DEBUG': True
}
# download_delay = 5
def parse(self, response):
return scrapy.FormRequest.from_response(
response,
formdata={
'txtNRIC': username,
'txtPassword': <PASSWORD>,
'btnLogin': '+'},
dont_filter=True,
callback=self.afterLogin
)
def afterLogin(self, response):
if "All sessions timeout after 20 minutes of inactivity." in response.body.decode("utf-8"):
print("Timed out, logging in again.")
return scrapy.Request('https://www.bbdc.sg/bbdc/bbdc_web/newheader.asp', dont_filter=True,
callback=self.parse)
# check login succeed before going on
if "Please try again" in response.body.decode("utf-8"):
print("Login failed: please check your username and password.")
return
# send_notification("hi")
print("Login successful.")
return scrapy.Request("https://www.bbdc.sg/bbdc/b-3c-pLessonBooking.asp?limit=pl", dont_filter=True,
callback=self.bookingPage)
def bookingPage(self, response):
if "All sessions timeout after 20 minutes of inactivity." in response.body.decode("utf-8"):
print("Timed out, logging in again.")
return scrapy.Request('https://www.bbdc.sg/bbdc/bbdc_web/newheader.asp', dont_filter=True,
callback=self.parse)
global checkWeekday
checkWeekday = not checkWeekday
print("Checking Weekdays: <{}>".format(weekdays) if checkWeekday else "Checking Weekends: <{}>".format(weekends))
return scrapy.FormRequest.from_response(
response,
formname='frmSelectSchedule',
formdata={
'Month': ['Jun/2019', 'Jul/2019'], # TODO: autogen this
'Session': sessionsToBookWeekdays if checkWeekday else sessionsToBookWeekends,
'Day': weekdays if checkWeekday else weekends,
'defPLVenue': '1',
'optVenue': '1'},
dont_filter=True,
callback=self.availableSlots
)
def availableSlots(self, response):
mailer = MailSender()
#global send_notification
def getDate(daySelector):
return daySelector.css("td.txtbold::text").extract_first()
def getSessions(daySelector):
return daySelector.css("input[type='checkbox']::attr(value)").extract()
#def getSessionNumber(daySelector):
#return int((daySelector.css("input[type='checkbox']::attr(id)").extract()).split("_")[1]) + 1
if "All sessions timeout after 20 minutes of inactivity." in response.body.decode("utf-8"):
print("Timed out, logging in again.")
return scrapy.Request('https://www.bbdc.sg/bbdc/bbdc_web/newheader.asp', dont_filter=True,
callback=self.parse)
# check if there are any available slots
filename = 'response.html'
_f = 'booked.log'
with open(filename, 'wb') as f:
f.write(response.body)
# lazy blacklist
blacklist = [u'09/06/2019']
if "There is no more slots available. Please select another schedule" in response.body.decode("utf-8"):
print("There are no slots at the moment that matches your criteria.")
return scrapy.Request("https://www.bbdc.sg/bbdc/b-3c-pLessonBooking.asp?limit=pl", dont_filter=True,
callback=self.bookingPage)
# there are available slots here - now let's book it
# iterate through each day
days = response.css("tr[bgcolor='#FFFFFF']") # this happens to be a unique identifier for each row aka day
dates = map(getDate, days)
sessions = map(getSessions, days)
#session_numbers = map(getSessionNumber, days)
bookingDates = []
submitSlots = []
bookingSessionNumbers = []
for date, session, in zip(dates, sessions):
date_format = "%d/%m/%Y"
dateObj = datetime.strptime(date, date_format)
delta = dateObj - datetime.today()
global checkWeekday
with open(_f, 'a') as f:
f.write("{} in blacklist: {}\n".format(date, date in blacklist))
if delta.days > daysCapWeekday and checkWeekday:
continue
elif delta.days > daysCapWeekend and not checkWeekday:
continue
elif session in bookedSlots:
continue
elif date in blacklist:
continue
bookingDates.extend(date)
submitSlots.extend(session) # i'm just going to ignore consecutive bookings
bookedSlots.extend(session)
#bookingSessionNumbers.extend(s_number)
if len(submitSlots) == 0:
print("There are no slots at the moment that matches your criteria.")
return scrapy.Request("https://www.bbdc.sg/bbdc/b-3c-pLessonBooking.asp?limit=pl", dont_filter=True,
callback=self.bookingPage)
# print("Booking the following slot(s): ")
mailer = MailSender(smtphost ="smtp.gmail.com", smtpport=587, smtpuser = "<EMAIL>", smtppass="<PASSWORD>" )
# send_notification(submitSlots[0])
if len(bookingDates) == 1:
message = str(bookingDates[0])
else:
message = str(bookingDates)
mailer.send(to=["<EMAIL>"], subject="booking", body = "greetings nicole jiejie OwO we made a bookie wookie on {}!! \n This is a computer generated message. No signature is required.".format(message))
#print(submitSlots)
with open(_f, 'a+rw') as gf:
gf.write('{} Trying to book: {} session {} \n \n'.format(datetime.now() , message, bookingSessionNumbers ))
#send_notification()
#return scrapy.Request("https://www.bbdc.sg/bbdc/b-3c-pLessonBooking.asp?limit=pl", dont_filter=True, callback=self.bookingPage)
# not actually book anything
return scrapy.FormRequest.from_response(
response,
formname='myform',
formdata={
'slot': submitSlots
},
dont_filter=True,
callback=self.bookingConfirm
)
def bookingConfirm(self, response):
if "All sessions timeout after 20 minutes of inactivity." in response.body.decode("utf-8"):
print("Timed out, logging in again.")
return scrapy.Request('https://www.bbdc.sg/bbdc/bbdc_web/newheader.asp', dont_filter=True,
callback=self.parse)
return scrapy.FormRequest.from_response(
response,
callback=self.bookingConfirmed,
dont_filter=True
)
def bookingConfirmed(self, response):
if "All sessions timeout after 20 minutes of inactivity." in response.body.decode("utf-8"):
print("Timed out, logging in again.")
return scrapy.Request('https://www.bbdc.sg/bbdc/bbdc_web/newheader.asp', dont_filter=True,
callback=self.parse)
if "You have insufficient fund in your account. Please top up your account." in response.body.decode("utf-8"):
print(
"Looks like you have no more money in your account. Please put in some more money or I can't book anything.")
return
return scrapy.Request("https://www.bbdc.sg/bbdc/b-3c-pLessonBooking.asp?limit=pl", dont_filter=True,
callback=self.bookingPage)
```
|
{
"source": "jerrylee1697/ZotClicker",
"score": 3
}
|
#### File: jerrylee1697/ZotClicker/anteater.py
```python
import pygame
import sys
from pygame.locals import *
class Anteater:
def __init__(self, image):
self.image = image
def createA(self):
self.ant_rect= Rect(15, 200, self.image.get_width(), self.image.get_height())
def move(self):
self.ant_rect= Rect(5, 200, self.image.get_width(), self.image.get_height())
def remove(self):
self.ant_rect= Rect(20, 200, self.image.get_width(), self.image.get_height())
```
|
{
"source": "jerrylei98/Dailydos",
"score": 3
}
|
#### File: jerrylei98/Dailydos/tasks_utils.py
```python
import sqlite3
def get_full_sql(user):
conn = sqlite3.connect("tasks.db")
c = conn.cursor()
temp = []
for row in c.execute('SELECT * FROM tasks where email = "' + user + '";'):
temp.append(row)
conn.close()
return temp
def get_tasks(user):
temp = get_full_sql(user)
i = 0
temp2 = []
while(i < len(temp)):
temp2.append(temp[i][1])
i+=1
return temp2
def remove_tasks(task_list):
if len(task_list) > 0:
conn = sqlite3.connect("tasks.db")
c = conn.cursor()
for item in task_list:
c.execute('DELETE FROM tasks WHERE task="' + item + '";')
conn.commit()
conn.close()
def clear_tasks(user):
conn = sqlite3.connect("tasks.db")
c = conn.cursor()
c.execute('DELETE FROM tasks WHERE email="' + user + '";')
conn.commit()
conn.close()
#temp = ['cheese', 'try 3', 'try again 217']
#remove_tasks(temp)
#print get_tasks("<EMAIL>")
```
#### File: site-packages/pymongo/monitoring.py
```python
import sys
import traceback
from collections import namedtuple, Sequence
_Listeners = namedtuple('Listeners', ('command_listeners',))
_LISTENERS = _Listeners([])
class CommandListener(object):
"""Abstract base class for command listeners."""
def started(self, event):
"""Abstract method to handle CommandStartedEvent.
:Parameters:
- `event`: An instance of :class:`CommandStartedEvent`
"""
raise NotImplementedError
def succeeded(self, event):
"""Abstract method to handle CommandSucceededEvent.
:Parameters:
- `event`: An instance of :class:`CommandSucceededEvent`
"""
raise NotImplementedError
def failed(self, event):
"""Abstract method to handle CommandFailedEvent.
:Parameters:
- `event`: An instance of :class:`CommandFailedEvent`
"""
raise NotImplementedError
def _to_micros(dur):
"""Convert duration 'dur' to microseconds."""
if hasattr(dur, 'total_seconds'):
return int(dur.total_seconds() * 10e5)
# Python 2.6
return dur.microseconds + (dur.seconds + dur.days * 24 * 3600) * 1000000
def _validate_event_listeners(option, listeners):
"""Validate event listeners"""
if not isinstance(listeners, Sequence):
raise TypeError("%s must be a list or tuple" % (option,))
for listener in listeners:
if not isinstance(listener, CommandListener):
raise TypeError("Only subclasses of "
"pymongo.monitoring.CommandListener are supported")
return listeners
def register(listener):
"""Register a global event listener.
:Parameters:
- `listener`: A subclass of :class:`CommandListener`.
"""
_validate_event_listeners('listener', [listener])
_LISTENERS.command_listeners.append(listener)
def _handle_exception():
"""Print exceptions raised by subscribers to stderr."""
# Heavily influenced by logging.Handler.handleError.
# See note here:
# https://docs.python.org/3.4/library/sys.html#sys.__stderr__
if sys.stderr:
einfo = sys.exc_info()
try:
traceback.print_exception(einfo[0], einfo[1], einfo[2],
None, sys.stderr)
except IOError:
pass
finally:
del einfo
# Note - to avoid bugs from forgetting which if these is all lowercase and
# which are camelCase, and at the same time avoid having to add a test for
# every command, use all lowercase here and test against command_name.lower().
_SENSITIVE_COMMANDS = set(
["authenticate", "saslstart", "saslcontinue", "getnonce", "createuser",
"updateuser", "copydbgetnonce", "copydbsaslstart", "copydb"])
class _CommandEvent(object):
"""Base class for command events."""
__slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id")
def __init__(self, command_name, request_id, connection_id, operation_id):
self.__cmd_name = command_name
self.__rqst_id = request_id
self.__conn_id = connection_id
self.__op_id = operation_id
@property
def command_name(self):
"""The command name."""
return self.__cmd_name
@property
def request_id(self):
"""The request id for this operation."""
return self.__rqst_id
@property
def connection_id(self):
"""The address (host, port) of the server this command was sent to."""
return self.__conn_id
@property
def operation_id(self):
"""An id for this series of events or None."""
return self.__op_id
class CommandStartedEvent(_CommandEvent):
"""Event published when a command starts.
:Parameters:
- `command`: The command document.
- `database_name`: The name of the database this command was run against.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this command
was sent to.
- `operation_id`: An optional identifier for a series of related events.
"""
__slots__ = ("__cmd", "__db")
def __init__(self, command, database_name, *args):
if not command:
raise ValueError("%r is not a valid command" % (command,))
# Command name must be first key.
command_name = next(iter(command))
super(CommandStartedEvent, self).__init__(command_name, *args)
if command_name.lower() in _SENSITIVE_COMMANDS:
self.__cmd = {}
else:
self.__cmd = command
self.__db = database_name
@property
def command(self):
"""The command document."""
return self.__cmd
@property
def database_name(self):
"""The name of the database this command was run against."""
return self.__db
class CommandSucceededEvent(_CommandEvent):
"""Event published when a command succeeds.
:Parameters:
- `duration`: The command duration as a datetime.timedelta.
- `reply`: The server reply document.
- `command_name`: The command name.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this command
was sent to.
- `operation_id`: An optional identifier for a series of related events.
"""
__slots__ = ("__duration_micros", "__reply")
def __init__(self, duration, reply, command_name,
request_id, connection_id, operation_id):
super(CommandSucceededEvent, self).__init__(
command_name, request_id, connection_id, operation_id)
self.__duration_micros = _to_micros(duration)
if command_name.lower() in _SENSITIVE_COMMANDS:
self.__reply = {}
else:
self.__reply = reply
@property
def duration_micros(self):
"""The duration of this operation in microseconds."""
return self.__duration_micros
@property
def reply(self):
"""The server failure document for this operation."""
return self.__reply
class CommandFailedEvent(_CommandEvent):
"""Event published when a command fails.
:Parameters:
- `duration`: The command duration as a datetime.timedelta.
- `failure`: The server reply document.
- `command_name`: The command name.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this command
was sent to.
- `operation_id`: An optional identifier for a series of related events.
"""
__slots__ = ("__duration_micros", "__failure")
def __init__(self, duration, failure, *args):
super(CommandFailedEvent, self).__init__(*args)
self.__duration_micros = _to_micros(duration)
self.__failure = failure
@property
def duration_micros(self):
"""The duration of this operation in microseconds."""
return self.__duration_micros
@property
def failure(self):
"""The server failure document for this operation."""
return self.__failure
class _EventListeners(object):
"""Configure event listeners for a client instance.
Any event listeners registered globally are included by default.
:Parameters:
- `listeners`: A list of event listeners.
"""
def __init__(self, listeners):
self.__command_listeners = _LISTENERS.command_listeners[:]
if listeners is not None:
self.__command_listeners.extend(listeners)
self.__enabled_for_commands = bool(self.__command_listeners)
@property
def enabled_for_commands(self):
"""Are any CommandListener instances registered?"""
return self.__enabled_for_commands
@property
def event_listeners(self):
"""List of registered event listeners."""
return self.__command_listeners[:]
def publish_command_start(self, command, database_name,
request_id, connection_id, op_id=None):
"""Publish a CommandStartedEvent to all command listeners.
:Parameters:
- `command`: The command document.
- `database_name`: The name of the database this command was run
against.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this
command was sent to.
- `op_id`: The (optional) operation id for this operation.
"""
if op_id is None:
op_id = request_id
event = CommandStartedEvent(
command, database_name, request_id, connection_id, op_id)
for subscriber in self.__command_listeners:
try:
subscriber.started(event)
except Exception:
_handle_exception()
def publish_command_success(self, duration, reply, command_name,
request_id, connection_id, op_id=None):
"""Publish a CommandSucceededEvent to all command listeners.
:Parameters:
- `duration`: The command duration as a datetime.timedelta.
- `reply`: The server reply document.
- `command_name`: The command name.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this
command was sent to.
- `op_id`: The (optional) operation id for this operation.
"""
if op_id is None:
op_id = request_id
event = CommandSucceededEvent(
duration, reply, command_name, request_id, connection_id, op_id)
for subscriber in self.__command_listeners:
try:
subscriber.succeeded(event)
except Exception:
_handle_exception()
def publish_command_failure(self, duration, failure, command_name,
request_id, connection_id, op_id=None):
"""Publish a CommandFailedEvent to all command listeners.
:Parameters:
- `duration`: The command duration as a datetime.timedelta.
- `failure`: The server reply document or failure description
document.
- `command_name`: The command name.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this
command was sent to.
- `op_id`: The (optional) operation id for this operation.
"""
if op_id is None:
op_id = request_id
event = CommandFailedEvent(
duration, failure, command_name, request_id, connection_id, op_id)
for subscriber in self.__command_listeners:
try:
subscriber.failed(event)
except Exception:
_handle_exception()
```
|
{
"source": "jerrylei98/home-projects",
"score": 3
}
|
#### File: python/api/imgur.py
```python
import json
import requests
import base64
#get client-id (Users/Jerry/Documents/api_keys/api_imgur.txt on macbook air)
fd = open("/Users/Jerry/Documents/api_keys/api_imgur.txt")
keys = fd.read().split('\n')
fd.close()
CLIENT_ID = keys[0]
CLIENT_SECRET = keys[1]
url_image = "https://api.imgur.com/3/image"
url_album = "https://api.imgur.com/3/album"
def upload_local(fpath):
r = requests.post(url_image, data = {'image': open(fpath,'rb').read(),
'type': 'file'},
headers = {'Authorization': 'Client-ID ' + CLIENT_ID})
data = r.json()
image_id = data.get('data').get('id')
return image_id
#print 'https://imgur.com/' + upload_local("/Users/Jerry/Documents/Wallpapers/SS.jpg")
```
|
{
"source": "jerrylei98/Reddit_Craigslist_Imgur_Bot",
"score": 3
}
|
#### File: jerrylei98/Reddit_Craigslist_Imgur_Bot/imgur.py
```python
import requests
import json
from api_keys import imgur_clientid, imgur_clientsecret
#(Client-ID)
CID = imgur_clientid
url_image = 'https://api.imgur.com/3/image'
url_album = 'https://api.imgur.com/3/album'
#returns image id
def upload_url(url):
r = requests.post(url_image, data = {'image': url,
'type': 'URL'},
headers = {'Authorization': 'Client-ID ' + CID})
data = r.json()
return data.get('data').get('id')
#returns image id
def upload_local(fpath):
r = request.post(url_image, data = {'image': open(fpath, 'rb').read(),
'type': 'file'},
headers = {'Authorization': 'Client-ID ' + CID})
data = r.json()
return data.get('data').get('id')
#returns album id
def make_album(image_ids, title):
r = requests.post(url_album, data = {'ids[]': image_ids.split(','),
'title': title},
headers = {'Authorization': 'Client-ID ' + CID})
data = r.json()
return data.get('data').get('id')
```
|
{
"source": "jerrylei98/Stock-Info-Bot",
"score": 3
}
|
#### File: jerrylei98/Stock-Info-Bot/av_interface.py
```python
import config
import requests
import json
# params = {
# "symbol": "F",
# "interval": 1,
# ...
# }
class av_interface(object):
def __init__(self, api_key):
self.api_key = api_key
def returnJsonContent(self, url):
response = requests.get(url)
if(response.ok):
return json.loads(response.content)
# -- TIME SERIES INTRADAY --
# This API returns intraday time series (timestamp, open, high, low, close, volume) of the equity specified, updated realtime.
#requires:
## symbol: "F" -- symbol is case insensitive
## interval (1,5,15,30,60 mins): 1,5,15,30,60
## output size(compact--100 pts/full): "compact", "full"
#default interval = 1min
def intraday(self, parameters):
symbol = parameters.get("symbol")
if symbol == None:
return
interval = str(parameters.get("interval", 1)) + "min"
outputsize = parameters.get("outputsize", "compact")
query_url_base = "https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY"
query_url_params = "&symbol=%s&interval=%s&outputsize=%s&apikey=%s" % (symbol, interval, outputsize, self.api_key)
return self.returnJsonContent(query_url_base + query_url_params)
# -- TIME SERIES DAILY --
# This API returns daily time series (date, daily open, daily high, daily low, daily close, daily volume) of the global equity specified, covering up to 20 years of historical data.
#requires:
## symbol, outputsize
def daily(self, parameters):
symbol = parameters.get("symbol")
if symbol == None:
return
outputsize = parameters.get("outputsize", "compact")
query_url_base = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY"
query_url_params = "&symbol=%s&outputsize=%s&apikey=%s" % (symbol, outputsize, self.api_key)
print self.returnJsonContent(query_url_base + query_url_params)
# -- TIME SERIES WEEKLY --
# This API returns weekly time series (last trading day of each week, weekly open, weekly high, weekly low, weekly close, weekly volume) of the global equity specified, covering up to 20 years of historical data.
#requires:
## symbol
def weekly(self, parameters):
symbol = parameters.get("symbol")
if symbol == None:
return
query_url_base = "https://www.alphavantage.co/query?function=TIME_SERIES_WEEKLY"
query_url_params = "&symbol=%s&apikey=%s" % (symbol, , self.api_key)
print self.returnJsonContent(query_url_base + query_url_params)
# -- TIME SERIES MONTHLY --
# This API returns weekly time series (last trading day of each week, weekly open, weekly high, weekly low, weekly close, weekly volume) of the global equity specified, covering up to 20 years of historical data.
#requires:
## symbol
def monthly(self, parameters):
symbol = parameters.get("symbol")
if symbol == None:
return
query_url_base = "https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY"
query_url_params = "&symbol=%s&apikey=%s" % (symbol, , self.api_key)
print self.returnJsonContent(query_url_base + query_url_params)
if __name__ == '__main__':
av = av_interface(config.ALPHA_VANTAGE_API_KEY)
parameters = {
"symbol": "ED",
"interval": 60,
"outputsize": "compact"
}
av.daily(parameters)
```
|
{
"source": "jerrylei98/wouldyourather",
"score": 3
}
|
#### File: jerrylei98/wouldyourather/utils.py
```python
from os import path
import md5
import sqlite3
"""
=== Creates database.db if database is not in the main directory (runs when imported by __init__.py) ===
Table: game
#==|optA |optAnum |optB |optBnum |===#
#=====================================================#
#==|explore x |12344 |explore y |1222 |===#
#==|eat x |44442 |eat y |1233 |===#
"""
if not path.isfile("database.db"):
conn = sqlite3.connect("database.db") #creates database.db if doesn't exist
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS login(user TEXT, password TEXT)") ##email, confirmation doable
c.execute("CREATE TABLE IF NOT EXISTS game(optA TEXT, optAnum INT, optB TEXT, optBnum INT)")
conn.commit()
conn.close()
"""
=== Hashes password with user ===
Input:
- user - string
- password - string
Returns: hashed password - string
"""
def saltnhash(user,password):
m = md5.new()
m.update(user + password)
return m.hexdigest()
"""
=== Checks if user is in table: login ===
=== Adds user to database.db with password hashed ===
Input:
- user - string
- password - string
Depends on fn: saltnhash(user,password)
Returns:
- True if user is added
- False if user is already taken
"""
def create_user(user, password):
conn = sqlite3.connect("database.db")
c = conn.cursor()
d = c.execute("SELECT * FROM login WHERE user = ?", (user,))
for row in d:
conn.commit()
conn.close()
return False
c.execute("INSERT INTO login VALUES(?,?)", (user, saltnhash(user,password),))
conn.commit()
conn.close()
return True
"""
===Used to authenticate user===
Input:
- user - string
- password - string
Depends on fn: saltnhash(user,password)
Returns:
- True if user+pass matches
- False if user+pass does not match
"""
def check_user(user, password):
conn = sqlite3.connect("database.db")
c = conn.cursor()
c = c.execute("SELECT * FROM login WHERE user = ? and password = ?", (user, saltnhash(user,password),))
for row in c:
conn.close()
return True
conn.close()
return False
#=== Adds a would you rather question into database ===#
def add_question(optA, optB):
conn = sqlite3.connect("database.db")
c = conn.cursor()
c = c.execute("INSERT INTO game VALUES(?,0,?,0)", (optA,optB,))
conn.commit()
conn.close()
return True
"""
Returns:
- how many rows are in table: game in database.db
"""
def num_rows():
conn = sqlite3.connect("database.db")
c = conn.cursor()
c = c.execute("SELECT COUNT(*) FROM game")
num = c.fetchone()[0]
conn.close()
return num
"""
=== Grabs row from table: game in database.db ===
Input:
- rowid - Integer
Returns:
- Dictionary containing: optionA,optionB,results of each
ex. {'optA': 'eat candy', 'optAres': 24, 'optB': 'eat chips', 'optBres': 17}
"""
def get_ques(rowid):
ret_dict = {}
conn = sqlite3.connect("database.db")
c = conn.cursor()
c = c.execute("SELECT * FROM game WHERE rowid = ?", (rowid,))
ret = c.fetchone()
ret_dict['optA'] = ret[0]
ret_dict['optAres'] = ret[1]
ret_dict['optB'] = ret[2]
ret_dict['optBres'] = ret[3]
conn.close()
return ret_dict
"""
=== Increments the option number from table: game in database.db ===
Input:
- rowid - Integer
- opt - Integer (0 for optA, 1 for optB)
"""
def update_row(rowid, opt):
if opt == 0:
conn = sqlite3.connect("database.db")
c = conn.cursor()
c.execute("UPDATE game SET optAnum = optAnum + 1 WHERE rowid = ?", (rowid,))
conn.commit()
conn.close()
elif opt == 1:
conn = sqlite3.connect("database.db")
c = conn.cursor()
c.execute("UPDATE game SET optBnum = optBnum + 1 WHERE rowid = ?", (rowid,))
conn.commit()
conn.close()
```
|
{
"source": "JerryLeolfl/AdaAttN",
"score": 2
}
|
#### File: AdaAttN/models/adaattn_model.py
```python
import torch
import torch.nn as nn
import itertools
from .base_model import BaseModel
from . import networks
class AdaAttNModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.add_argument('--image_encoder_path', required=True, help='path to pretrained image encoder')
parser.add_argument('--skip_connection_3', action='store_true',
help='if specified, add skip connection on ReLU-3')
parser.add_argument('--shallow_layer', action='store_true',
help='if specified, also use features of shallow layers')
if is_train:
parser.add_argument('--lambda_content', type=float, default=0., help='weight for L2 content loss')
parser.add_argument('--lambda_global', type=float, default=10., help='weight for L2 style loss')
parser.add_argument('--lambda_local', type=float, default=3.,
help='weight for attention weighted style loss')
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
image_encoder = nn.Sequential(
nn.Conv2d(3, 3, (1, 1)),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(3, 64, (3, 3)),
nn.ReLU(), # relu1-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(), # relu1-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 128, (3, 3)),
nn.ReLU(), # relu2-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(), # relu2-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 256, (3, 3)),
nn.ReLU(), # relu3-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 512, (3, 3)),
nn.ReLU(), # relu4-1, this is the last layer used
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU() # relu5-4
)
image_encoder.load_state_dict(torch.load(opt.image_encoder_path))
enc_layers = list(image_encoder.children())
enc_1 = nn.DataParallel(nn.Sequential(*enc_layers[:4]).to(opt.gpu_ids[0]), opt.gpu_ids)
enc_2 = nn.DataParallel(nn.Sequential(*enc_layers[4:11]).to(opt.gpu_ids[0]), opt.gpu_ids)
enc_3 = nn.DataParallel(nn.Sequential(*enc_layers[11:18]).to(opt.gpu_ids[0]), opt.gpu_ids)
enc_4 = nn.DataParallel(nn.Sequential(*enc_layers[18:31]).to(opt.gpu_ids[0]), opt.gpu_ids)
enc_5 = nn.DataParallel(nn.Sequential(*enc_layers[31:44]).to(opt.gpu_ids[0]), opt.gpu_ids)
self.image_encoder_layers = [enc_1, enc_2, enc_3, enc_4, enc_5]
for layer in self.image_encoder_layers:
for param in layer.parameters():
param.requires_grad = False
self.visual_names = ['c', 'cs', 's']
self.model_names = ['decoder', 'transformer']
parameters = []
self.max_sample = 64 * 64
if opt.skip_connection_3:
adaattn_3 = networks.AdaAttN(in_planes=256, key_planes=256 + 128 + 64 if opt.shallow_layer else 256,
max_sample=self.max_sample)
self.net_adaattn_3 = networks.init_net(adaattn_3, opt.init_type, opt.init_gain, opt.gpu_ids)
self.model_names.append('adaattn_3')
parameters.append(self.net_adaattn_3.parameters())
if opt.shallow_layer:
channels = 512 + 256 + 128 + 64
else:
channels = 512
transformer = networks.Transformer(
in_planes=512, key_planes=channels, shallow_layer=opt.shallow_layer)
decoder = networks.Decoder(opt.skip_connection_3)
self.net_decoder = networks.init_net(decoder, opt.init_type, opt.init_gain, opt.gpu_ids)
self.net_transformer = networks.init_net(transformer, opt.init_type, opt.init_gain, opt.gpu_ids)
parameters.append(self.net_decoder.parameters())
parameters.append(self.net_transformer.parameters())
self.c = None
self.cs = None
self.s = None
self.s_feats = None
self.c_feats = None
self.seed = 6666
if self.isTrain:
self.loss_names = ['content', 'global', 'local']
self.criterionMSE = torch.nn.MSELoss().to(self.device)
self.optimizer_g = torch.optim.Adam(itertools.chain(*parameters), lr=opt.lr)
self.optimizers.append(self.optimizer_g)
self.loss_global = torch.tensor(0., device=self.device)
self.loss_local = torch.tensor(0., device=self.device)
self.loss_content = torch.tensor(0., device=self.device)
def set_input(self, input_dict):
self.c = input_dict['c'].to(self.device)
self.s = input_dict['s'].to(self.device)
self.image_paths = input_dict['name']
def encode_with_intermediate(self, input_img):
results = [input_img]
for i in range(5):
func = self.image_encoder_layers[i]
results.append(func(results[-1]))
return results[1:]
@staticmethod
def get_key(feats, last_layer_idx, need_shallow=True):
if need_shallow and last_layer_idx > 0:
results = []
_, _, h, w = feats[last_layer_idx].shape
for i in range(last_layer_idx):
results.append(networks.mean_variance_norm(nn.functional.interpolate(feats[i], (h, w))))
results.append(networks.mean_variance_norm(feats[last_layer_idx]))
return torch.cat(results, dim=1)
else:
return networks.mean_variance_norm(feats[last_layer_idx])
def forward(self):
self.c_feats = self.encode_with_intermediate(self.c)
self.s_feats = self.encode_with_intermediate(self.s)
if self.opt.skip_connection_3:
c_adain_feat_3 = self.net_adaattn_3(self.c_feats[2], self.s_feats[2], self.get_key(self.c_feats, 2, self.opt.shallow_layer),
self.get_key(self.s_feats, 2, self.opt.shallow_layer), self.seed)
else:
c_adain_feat_3 = None
cs = self.net_transformer(self.c_feats[3], self.s_feats[3], self.c_feats[4], self.s_feats[4],
self.get_key(self.c_feats, 3, self.opt.shallow_layer),
self.get_key(self.s_feats, 3, self.opt.shallow_layer),
self.get_key(self.c_feats, 4, self.opt.shallow_layer),
self.get_key(self.s_feats, 4, self.opt.shallow_layer), self.seed)
self.cs = self.net_decoder(cs, c_adain_feat_3)
def compute_content_loss(self, stylized_feats):
self.loss_content = torch.tensor(0., device=self.device)
if self.opt.lambda_content > 0:
for i in range(1, 5):
self.loss_content += self.criterionMSE(networks.mean_variance_norm(stylized_feats[i]),
networks.mean_variance_norm(self.c_feats[i]))
def compute_style_loss(self, stylized_feats):
self.loss_global = torch.tensor(0., device=self.device)
if self.opt.lambda_global > 0:
for i in range(1, 5):
s_feats_mean, s_feats_std = networks.calc_mean_std(self.s_feats[i])
stylized_feats_mean, stylized_feats_std = networks.calc_mean_std(stylized_feats[i])
self.loss_global += self.criterionMSE(
stylized_feats_mean, s_feats_mean) + self.criterionMSE(stylized_feats_std, s_feats_std)
self.loss_local = torch.tensor(0., device=self.device)
if self.opt.lambda_local > 0:
for i in range(1, 5):
c_key = self.get_key(self.c_feats, i, self.opt.shallow_layer)
s_key = self.get_key(self.s_feats, i, self.opt.shallow_layer)
s_value = self.s_feats[i]
b, _, h_s, w_s = s_key.size()
s_key = s_key.view(b, -1, h_s * w_s).contiguous()
if h_s * w_s > self.max_sample:
torch.manual_seed(self.seed)
index = torch.randperm(h_s * w_s).to(self.device)[:self.max_sample]
s_key = s_key[:, :, index]
style_flat = s_value.view(b, -1, h_s * w_s)[:, :, index].transpose(1, 2).contiguous()
else:
style_flat = s_value.view(b, -1, h_s * w_s).transpose(1, 2).contiguous()
b, _, h_c, w_c = c_key.size()
c_key = c_key.view(b, -1, h_c * w_c).permute(0, 2, 1).contiguous()
attn = torch.bmm(c_key, s_key)
# S: b, n_c, n_s
attn = torch.softmax(attn, dim=-1)
# mean: b, n_c, c
mean = torch.bmm(attn, style_flat)
# std: b, n_c, c
std = torch.sqrt(torch.relu(torch.bmm(attn, style_flat ** 2) - mean ** 2))
# mean, std: b, c, h, w
mean = mean.view(b, h_c, w_c, -1).permute(0, 3, 1, 2).contiguous()
std = std.view(b, h_c, w_c, -1).permute(0, 3, 1, 2).contiguous()
self.loss_local += self.criterionMSE(stylized_feats[i], std * networks.mean_variance_norm(self.c_feats[i]) + mean)
def compute_losses(self):
stylized_feats = self.encode_with_intermediate(self.cs)
self.compute_content_loss(stylized_feats)
self.compute_style_loss(stylized_feats)
self.loss_content = self.loss_content * self.opt.lambda_content
self.loss_local = self.loss_local * self.opt.lambda_local
self.loss_global = self.loss_global * self.opt.lambda_global
def optimize_parameters(self):
self.seed = int(torch.randint(10000000, (1,))[0])
self.forward()
self.optimizer_g.zero_grad()
self.compute_losses()
loss = self.loss_content + self.loss_global + self.loss_local
loss.backward()
self.optimizer_g.step()
```
|
{
"source": "jerryli27/Hypertune",
"score": 3
}
|
#### File: jerryli27/Hypertune/hyper_tune.py
```python
import ast
import errno
import importlib
import os
import time
from os.path import dirname
import tensorflow as tf
from bayes_opt import BayesianOptimization # pip install bayesian-optimization
from typing import Union
FLAGS = tf.flags.FLAGS
def _define_flags():
tf.app.flags.DEFINE_string('file_name', '',
'The base file name of the program for which you would like to tune hyperparameters. ')
tf.app.flags.DEFINE_string('params_and_constraints', '',
'The parameters and their constraints represented as a python string representation of '
'a list. The list items have format (param_name, lower_lim, upper_lim)')
tf.app.flags.DEFINE_string('constant_flags', '',
'The flags with constant value represented as a python string representation of '
'a list. The list items have format (param_name, constant_value)')
tf.app.flags.DEFINE_integer('init_points', 5,
'Number of randomly chosen points to sample the target function initially.')
tf.app.flags.DEFINE_integer('n_iter', 15,
'Total number of times the process is to repeated. ')
tf.app.flags.DEFINE_string('output_file_name', '',
'The file name to store the tune result.')
def _to_param_bound_dict(params):
ret = dict()
for param in params:
if len(param) != 3:
raise TypeError('Incorrect format for `params_and_constraints`')
param, lower, upper = param
if not isinstance(param, str) \
or not (isinstance(lower, int) or isinstance(lower, float)) \
or not (isinstance(upper, int) or isinstance(upper, float)):
raise TypeError('Incorrect format for `params_and_constraints`')
if lower >= upper:
raise Exception('For %s, lower_lim %f is greater than upper limit %f!' % (param, float(lower), float(upper)))
ret[param] = (lower, upper)
return ret
def touch_folder(file_path):
# type: (Union[str,unicode]) -> None
"""Create a folder along with its parent folders recursively if they do not exist."""
# Taken from https://stackoverflow.com/questions/273192/how-can-i-create-a-directory-if-it-does-not-exist .
if not file_path.endswith('/'):
file_path = file_path + "/"
dn = dirname(file_path)
if dn != '':
try:
os.makedirs(dn)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def run_python(controller_module, param_val_dict, constant_flags):
"""
Given the python file name to run and its arguments, return the result as a string.
:param controller_module: the python module to tune
:param param_val_dict: a dictionary containing argument names and values
:return: result as a string.
"""
for param, val in param_val_dict.iteritems():
setattr(FLAGS, param, val) # Set tensorflow flags.
for param, val in constant_flags:
setattr(FLAGS, param, val)
ret = controller_module.hyper_tune()
return ret
def main(file_name, params_and_constraints, init_points, n_iter, output_file_name, constant_flags_str=''):
start_time = time.time()
controller_module = importlib.import_module(file_name)
# Because we've accessed the FLAGS, the __parsed is set to true, which disallows more flags to be added. We would
# like to override that to define new flags.
FLAGS.__dict__['__parsed'] = False
controller_module.define_flags()
param_bound_dict = _to_param_bound_dict(ast.literal_eval(params_and_constraints))
if constant_flags_str:
constant_flags = ast.literal_eval(constant_flags_str)
else:
constant_flags = tuple()
bo = BayesianOptimization(lambda **kw: run_python(controller_module, kw, constant_flags),
param_bound_dict)
bo.maximize(init_points=init_points, n_iter=n_iter)
end_time = time.time()
print('Finished tuning! It took %s seconds. The result is as follows: %s'
% (str(end_time - start_time), str(bo.res['max'])))
if output_file_name:
touch_folder(output_file_name)
bo.points_to_csv(output_file_name)
return bo.res['max']
if __name__ == '__main__':
_define_flags()
if not FLAGS.file_name:
raise IOError('Please input a file name (no extension)! Example: python hyper_tune.py --file_name="test"')
if not os.path.exists(FLAGS.file_name + '.py'):
raise IOError('File %s does not exist!' % FLAGS.file_name)
if not FLAGS.params_and_constraints:
raise AssertionError('You must input the parameters you are trying to tune.')
main(FLAGS.file_name, FLAGS.params_and_constraints, FLAGS.init_points, FLAGS.n_iter, FLAGS.output_file_name,
constant_flags_str=FLAGS.constant_flags)
```
#### File: jerryli27/Hypertune/hyper_tune_test_helper.py
```python
import numpy as np
import tensorflow as tf
FLAGS = tf.flags.FLAGS
MAX_X_VAL = 2.0
def define_flags():
tf.app.flags.DEFINE_float('x', 1.0,
'Test arg x')
tf.app.flags.DEFINE_float('y', 1.0,
'Test argument y')
def expression(x):
"""This is the mock performance of the model with respect to hyper-parameter x."""
return np.exp(-(x - 2) ** 2) # + np.exp(-(x - 6) ** 2 / 10) + 1 / (x ** 2 + 1)
def hyper_tune():
x = FLAGS.x
ret = expression(x)
print('Performance for %.3f is %.3f' %(x, ret))
if __name__ == '__main__':
define_flags()
print hyper_tune()
```
|
{
"source": "jerryliang122/enterprise-wechat-pushpull",
"score": 3
}
|
#### File: jerryliang122/enterprise-wechat-pushpull/wechenpush.py
```python
import json,requests
import datetime
import os
def push(text):
#获取当前时间戳
time = datetime.datetime.now().timestamp()
time = int(time)
info =os.path.exists('access_token')
if not info:
change = 7300
else:
filetime = os.stat('access_token')
filemtime = int(filetime.st_ctime)
change = time - filemtime
wecom_cid = ''
wecom_secret = ''
wecom_aid = ''
wecom_touid = ''
#这里输入你的应用信息,请注意,在第一次使用该脚本时,由于下载因素会导致access_token的创建时间小于7200秒不触发获取token
#判定文件是否过期2小时
if change >= 7200:
get_token_url = f"https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={wecom_cid}&corpsecret={wecom_secret}"
response = requests.get(get_token_url).content
access_token = json.loads(response).get('access_token')
if info is True:
os.remove('access_token')
file = open('access_token','w+')
file.write(access_token)
file.close()
else:
access_token = open('access_token').read()
send_msg_url = f'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={access_token}'
data = {
"touser":wecom_touid,
"agentid":wecom_aid,
"msgtype":"text",
"text":{
"content":text
},
"duplicate_check_interval":600
}
response = requests.post(send_msg_url,data=json.dumps(data)).content
```
|
{
"source": "Jerry-licious/jerry-licious.github.io",
"score": 3
}
|
#### File: jerry-licious.github.io/automatank/template.py
```python
from godot import *
from math import pi
class TankController:
def receive_initialise(self, controls):
# Spend your skill points here.
'''
list of points
-------
controls.bullet_speed_points: points for bullet speed
controls.bullet_health_points: points for bullet health
controls.tank_speed_points: points for tank speed
controls.tank_health_points: points for tank health
controls.regen_points: points for health regen
'''
controls.tank_health_points = 2 # change the number of points or the stat as you wish!
controls.tank_name = "Your name here" # you can change the name here as well
controls.hex_colour = "#ffffff" # you can change the color here
def receive_update(self, controls, state):
'''
controls
---------
controls.shoot(angle): shoot at an angle (in radians)
controls.accelerate(direction): accelerate in a direction (Vector2)
==================
state
---------
state.position: returns a Vector2 of your position (down and right are the positive directions)
state.velocity: returns a Vector2 of your velocity
state.health: return your current health
state.max_health: your maximum health
state.tanks: list of all tanks
state.bullets: list of all bullets
'''
# Control the tank here.
controls.shoot(pi / 2) # shoot 90 degree down
# replace the code here by whatever you want
```
|
{
"source": "JerryLife/PhyTestOnline",
"score": 3
}
|
#### File: PhyTestOnline/PhyTestOnline/PhyTestOnline.py
```python
import urllib
import urllib2
import re
import xlwt
START_PAGE = 0 # start page
ALL_PAGE = 82 # number of all pages
class PhyTestOnline(object):
"""
This class is specially for a HUST Physics Test Online providing a crawler to download
the Keys and to save them as Excel(.xls). For convenience, you can just use PhyTestOnline.main()
to finish the whole procedure.
Attention: This is a simple practice in crawler, which is only for study and communication.
It should never be used for illegal or improper ways like cheating. If so, the one who did it is
responsible for his own behavior instead of the author.
"""
def __init__(self, baseURL="http://172.16.31.10/admin/menu/query/queryandchoose/xianshi?paperNum=0"):
self.baseURL = baseURL
def getFirstPage(self, url=None):
if not url:
url = self.baseURL
try:
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'
headers = {'User-Agent': user_agent}
data = urllib.urlencode({})
request = urllib2.Request(url, data, headers)
response = urllib2.urlopen(request, timeout=10)
content = response.read()
return content.decode('GBK')
except urllib2.URLError, e:
if hasattr(e, "reason"):
print "Fail to connect to PhysicsTestOnline:", e.reason
return None
def getText(self, url=None):
textModel = re.compile('<td width="146">([0-9]+\.jpg)')
ansModel = re.compile('<td width="41">(.*)</td>')
html = self.getFirstPage(url)
if not html:
return None
text = re.findall(textModel, html)
ans = re.findall(ansModel, html)
if len(text) == len(ans):
print "%d Got" % len(ans)
return zip(text, ans)
else:
print "Answer or picture lost!"
return None
def getAll(self, allPage=ALL_PAGE):
startPage = self.baseURL[0:-1]
ansList = []
for i in range(START_PAGE, allPage+1):
url = startPage + str(i)
ans = self.getText(url)
if not ans:
pass
else:
print "Page%d finished.%d%%" % (i, i*100/allPage)
ansList += ans
print "Program complete."
return ansList
def saveAns(self, ansList, fileName='D:\TestAnswer.xls'):
ans = xlwt.Workbook()
sheet = ans.add_sheet('Sheet1')
numOfProblems = len(ansList)
for i in range(numOfProblems):
sheet.write(i, 0, ansList[i][0])
sheet.write(i, 1, ansList[i][1])
print "Line %d saved.%d%% Finished" % (i+1, (i+1)*100/numOfProblems)
# need protect?
ans.protect = True
ans.wnd_protect = True
ans.obj_protect = True
ans.save(fileName)
print "All saved."
return None
def main(self):
ansList = self.getAll()
iSave = raw_input('Save now? y/n: ')
if iSave == 'y':
self.saveAns(ansList)
else:
return None
return True
ans = PhyTestOnline()
ans.main()
```
|
{
"source": "jerrylindahl/pira",
"score": 3
}
|
#### File: pira/src/MongoStore.py
```python
import datetime
from pymongo import MongoClient
class MongoStore:
def __init__(self):
self.client = MongoClient()
self.db = self.client.pira
# Get status from last run
self.status = self.get_status()
if self.status is None:
print("Empty database, starting new datastore.")
self.init_store()
self.status = self.get_status()
print(self.status)
def mongo(self):
c = self.db.issues
#key = c.insert({"id": "MFOL-123"})
#print(key)
i = c.find_one()
print(i)
def init_store(self):
self.status = {"last_run": datetime.datetime.now()}
s = self.db.status
s.insert(self.status)
def get_status(self):
s = self.db.status
return s.find_one()
```
|
{
"source": "JerryLingjieMei/ADEPT-Dataset-Release",
"score": 2
}
|
#### File: dataset/human/collect_results.py
```python
import argparse
import json
import os
import csv
from collections import defaultdict
from dataset.human.result_storage import ResultStorage, CASE_PAIRS, SHAPE_CATS, get_shapes_from_cat
from utils.io import read_serialized
from utils.constants import CONTENT_FOLDER
_prefix = "| negative log likelihood: "
_human_pairs = read_serialized(os.path.join(CONTENT_FOLDER, "dataset", "human", "pairs.json"))["origin"]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--summary_folder", type=str)
parser.add_argument("--summary_folders", type=str, nargs="+")
parser.add_argument("--summary_file", type=str)
parser.add_argument("--violations", type=str)
parser.add_argument("--shape_cats", type=str)
parser.add_argument("--use_surprise_metric", type=int, default=True)
parser.add_argument("--output_folder", type=str)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
max_scores = {}
if args.summary_folder is not None:
for file_name in os.listdir(args.summary_folder):
if file_name.endswith(".txt"):
with open(os.path.join(args.summary_folder, file_name)) as f:
s = f.readline()
s = s[len(_prefix):]
max_score = json.loads(s.replace("\'", "\""))["max"]
max_scores[file_name[:-4]] = max_score
if args.summary_folder.endswith("/"):
experiment = os.path.split(args.summary_folder[:-1])[-1]
else:
experiment = os.path.split(args.summary_folder)[-1]
if args.output_folder is None:
args.output_folder = os.path.join(args.summary_folder, "results")
elif args.summary_folders is not None:
max_scores = defaultdict(list)
for args.summary_folder in args.summary_folders:
for file_name in os.listdir(args.summary_folder):
if file_name.endswith(".txt"):
with open(os.path.join(args.summary_folder, file_name)) as f:
s = f.readline()
s = s[len(_prefix):]
max_score = json.loads(s.replace("\'", "\""))["max"]
max_scores[file_name[:-4]].append(max_score)
if args.summary_folder.endswith("/"):
experiment = os.path.split(args.summary_folder[:-1])[-1]
else:
experiment = os.path.split(args.summary_folder)[-1]
if args.output_folder is None:
args.output_folder = os.path.join(args.summary_folders[0], "results")
elif args.summary_file is not None:
max_scores = read_serialized(args.summary_file)
experiment = os.path.split(args.summary_file)[-1][:-5]
else:
raise FileNotFoundError("Should specific summary folder / file")
with open("{}{}_absolute.csv".format(
args.output_folder, experiment), "w")as f_absolute, open(
"{}/{}_relative.csv".format(
args.output_folder, experiment), "w")as f_relative:
absolute_writer = csv.DictWriter(f_absolute, fieldnames=["name", "all", *CASE_PAIRS],
dialect="excel-tab")
relative_writer = csv.DictWriter(f_relative, fieldnames=["name", "all", *CASE_PAIRS],
dialect="excel-tab")
absolute_writer.writeheader()
relative_writer.writeheader()
for i in range(2):
if i == 1:
scores = {k: v for k, v in max_scores.items() if k in _human_pairs}
experiment = experiment + "_on-human"
else:
scores = max_scores
max_storage = ResultStorage(scores, use_surprise_metric=args.use_surprise_metric)
absolute_score = dict(all=max_storage.get_absolute_accuracy())
for case in CASE_PAIRS:
absolute_score[case] = max_storage.get_absolute_accuracy(violations=case)
absolute_writer.writerow(dict(name=experiment, **absolute_score))
for shape in SHAPE_CATS:
absolute_score = dict(all=max_storage.get_absolute_accuracy(shape_cats=shape))
for case in CASE_PAIRS:
absolute_score[case] = max_storage.get_absolute_accuracy(violations=case, shape_cats=shape)
absolute_writer.writerow(dict(name=experiment + "_" + shape, **absolute_score))
relative_score = dict(all=max_storage.get_relative_accuracy())
for case in CASE_PAIRS:
relative_score[case] = max_storage.get_relative_accuracy(violations=case)
relative_writer.writerow(dict(name=experiment, **relative_score))
for shape in SHAPE_CATS:
relative_score = dict(all=max_storage.get_relative_accuracy(shape_cats=shape))
for case in CASE_PAIRS:
relative_score[case] = max_storage.get_relative_accuracy(violations=case, shape_cats=shape)
relative_writer.writerow(dict(name=experiment + "_" + shape, **relative_score))
```
#### File: dataset/human/result_storage.py
```python
from sklearn.metrics import roc_auc_score
import math
from dataset.human.make_pairs import CASE_PAIRS, SHAPE_CATS, get_shapes_from_cat
class ResultStorage(object):
def __init__(self, scores, use_surprise_metric=True):
"""
:param scores: a dict from case name to score
"""
self.scores = scores
for k, v in self.scores.items():
if not isinstance(v, list):
self.scores[k] = [v]
self.use_surprise_metric = use_surprise_metric
def get_relative_accuracy(self, violations=None, shape_cats=None):
if violations is None:
violations = list(CASE_PAIRS.keys())
elif isinstance(violations, str):
violations = [violations]
if shape_cats is None:
shape_cats = SHAPE_CATS
elif isinstance(shape_cats, str):
shape_cats = [shape_cats]
n_correct = 0
n_all = 0
for violation in violations:
case_index_pairs = CASE_PAIRS[violation]
for shape_cat in shape_cats:
shapes = get_shapes_from_cat(shape_cat)
for shape in shapes:
case, index = case_index_pairs[0]
try:
n_case_correct = 0
n_case = 0
for surprise_score in self.scores["human_{}_{}_{}".format(case, shape, index)]:
for case, index in case_index_pairs[1:]:
try:
for control_score in self.scores["human_{}_{}_{}".format(case, shape, index)]:
n_case += 1
if self.use_surprise_metric and control_score < surprise_score:
n_case_correct += 1
if self.use_surprise_metric and control_score == surprise_score:
n_case_correct += .5
if not self.use_surprise_metric and control_score > surprise_score:
n_case_correct += 1
if not self.use_surprise_metric and control_score == surprise_score:
n_case_correct += .5
except KeyError:
pass
if n_case != 0:
n_all += 1
n_correct += n_case_correct / n_case
except KeyError:
pass
return n_correct / n_all if n_all > 0 else math.nan
def get_absolute_accuracy(self, violations=None, shape_cats=None):
if violations is None:
violations = list(CASE_PAIRS.keys())
elif isinstance(violations, str):
violations = [violations]
if shape_cats is None:
shape_cats = SHAPE_CATS
elif isinstance(shape_cats, str):
shape_cats = [shape_cats]
labels = []
scores = []
for violation in violations:
case_index_pairs = CASE_PAIRS[violation]
for shape_cat in shape_cats:
shapes = get_shapes_from_cat(shape_cat)
for shape in shapes:
case, index = case_index_pairs[0]
try:
for surprise_score in self.scores["human_{}_{}_{}".format(case, shape, index)]:
if math.isinf(surprise_score):
surprise_score = 1000
labels.append(0)
scores.append(-surprise_score if self.use_surprise_metric else surprise_score)
for case, index in case_index_pairs[1:]:
try:
for control_score in self.scores["human_{}_{}_{}".format(case, shape, index)]:
if math.isinf(control_score):
control_score = 1000
labels.append(1)
scores.append(-control_score if self.use_surprise_metric else control_score)
except KeyError:
pass
except KeyError:
pass
try:
result = roc_auc_score(labels, scores)
except ValueError:
result = math.nan
return result
```
#### File: ADEPT-Dataset-Release/phys_sim/camera.py
```python
import pybullet as p
class Camera(object):
def __init__(self,
target_pos=(0, 0, 0),
pitch=-30.0,
yaw=60,
roll=0,
cam_dist=20,
width=480,
height=320,
up_axis=2,
near_plane=0.01,
far_plane=100,
fov=60):
self.target_pos = target_pos
self.pitch = pitch
self.yaw = yaw
self.roll = roll
self.cam_dist = cam_dist
self.width = width
self.height = height
self.up_axis = up_axis
self.near_plane = near_plane
self.far_plane = far_plane
self.fov = fov
self.view_mat = p.computeViewMatrixFromYawPitchRoll(target_pos, cam_dist, yaw, pitch, roll, up_axis)
aspect = width / height
self.proj_mat = p.computeProjectionMatrixFOV(fov, aspect, near_plane, far_plane)
def get_params(self):
params = {
'target_pos': self.target_pos,
'pitch': self.pitch,
'yaw': self.yaw,
'roll': self.roll,
'cam_dist': self.cam_dist,
'width': self.width,
'height': self.height,
'up_axis': self.up_axis,
'near_plane': self.near_plane,
'far_plane': self.far_plane,
'fov': self.fov
}
return params
def take_pic(self):
img_arr = p.getCameraImage(self.width, self.height, self.view_mat, self.proj_mat)
return img_arr[2]
def take_seg(self):
img_arr = p.getCameraImage(self.width, self.height, self.view_mat, self.proj_mat)
return img_arr[4]
```
#### File: ADEPT-Dataset-Release/phys_sim/objects.py
```python
import pybullet as p
import re
import os
from phys_sim.convert_pattern import *
from utils.constants import OCCLUDER_HALF_WIDTH
from utils.shape_net import SHAPE_DIMENSIONS
class ObjectManager(object):
def __init__(self, config, obj_dir, num_steps):
self.obj_dir = obj_dir
self.config = config
self.num_steps = num_steps
self.plane_id, self.plane_visual_id = self.add_plane()
self.object_ids = []
self.desk_ids = []
self.disappear_time = []
self.appear_time = []
self.init_positions = []
for obj_params in self.config["objects"]:
self.object_ids.append(self.add_object(**obj_params))
self.num_link = 0
self.joint_patterns = []
self.occluder_info = self.add_occluders_start()
if "occluders" in self.config:
for occluder_params in self.config["occluders"]:
self.add_occluder(**occluder_params)
if "desks" in self.config:
for desk_params in self.config["desks"]:
self.add_desk(**desk_params)
self.ground_id = self.add_occluders_end()
def add_plane(self):
"""Add a plane"""
plane_id = p.createCollisionShape(p.GEOM_MESH, fileName="plane.obj", meshScale=[100, 100, 100])
plane_visual_id = p.createVisualShape(p.GEOM_MESH, fileName="plane.obj", rgbaColor=(1, 1, 1, 1))
return plane_id, plane_visual_id
def add_object(self, shape, mass=1, init_pos=(0, 0, 1), init_orn=(0, 0, 0), scale=(1, 1, 1), init_v=(0, 0, 0),
lat_fric=0.,
restitution=.9, lin_damp=0, angular_damp=0, disappear_time=100000, appear_time=0, **kwargs):
"""
create an pybullet base object from a wavefront .obj file
set up initial parameters and physical properties
"""
scale = [x * y for x, y in zip(scale, SHAPE_DIMENSIONS[shape])]
shape = "cube"
obj_path = os.path.join(self.obj_dir, "shapes", '%s.obj' % shape)
init_orn_quat = p.getQuaternionFromEuler(deg2rad(init_orn))
col_id = p.createCollisionShape(p.GEOM_MESH, fileName=obj_path, meshScale=scale)
obj_id = p.createMultiBody(mass, col_id, basePosition=init_pos, baseOrientation=init_orn_quat)
p.resetBaseVelocity(obj_id, linearVelocity=init_v)
p.changeDynamics(obj_id, -1, lateralFriction=lat_fric, restitution=restitution, linearDamping=lin_damp,
angularDamping=angular_damp)
self.init_positions.append(init_pos)
self.disappear_time.append(disappear_time)
self.appear_time.append(appear_time)
return obj_id
def add_occluders_start(self):
"""Before adding occluders, to connect occluders with ground"""
occluders_info = dict(baseCollisionShapeIndex=self.plane_id,
baseVisualShapeIndex=self.plane_visual_id,
basePosition=(0, 0, 0),
linkMasses=[],
linkCollisionShapeIndices=[],
linkVisualShapeIndices=[],
linkPositions=[], linkOrientations=[],
linkInertialFramePositions=[],
linkInertialFrameOrientations=[],
linkParentIndices=[],
linkJointTypes=[], linkJointAxis=[])
return occluders_info
def add_occluder(self, shape="cube", joint="revolute", mass=1, init_pos=(0, 0, 0), init_orn=(0, 0, 0),
scale=(.2, 4., 2.), joint_pattern=None, **kwargs):
"""Add an occluder with physical properties"""
obj_path = os.path.join(self.obj_dir, "shapes", '%s.obj' % shape)
init_orn_quat = p.getQuaternionFromEuler(deg2rad(init_orn))
col_id = p.createCollisionShape(p.GEOM_MESH, fileName=obj_path, meshScale=scale,
collisionFramePosition=(-scale[0], 0, scale[2]))
self.occluder_info["linkMasses"].append(mass)
self.occluder_info["linkCollisionShapeIndices"].append(col_id)
self.occluder_info["linkVisualShapeIndices"].append(col_id)
self.occluder_info["linkPositions"].append(init_pos)
self.occluder_info["linkOrientations"].append(init_orn_quat)
self.occluder_info["linkInertialFramePositions"].append((-scale[0], 0, scale[2]))
self.occluder_info["linkInertialFrameOrientations"].append((0, 0, 0, 1))
self.occluder_info["linkParentIndices"].append(0)
self.occluder_info["linkJointAxis"].append((0, 1, 0))
if joint == "revolute":
self.occluder_info["linkJointTypes"].append(p.JOINT_REVOLUTE)
if joint_pattern is None:
self.joint_patterns.append(np.zeros(self.num_steps))
else:
self.joint_patterns.append(convert_rot_patterns(joint_pattern))
elif joint == "prismatic":
self.occluder_info["linkJointTypes"].append(p.JOINT_PRISMATIC)
if joint_pattern is None:
self.joint_patterns.append(np.zeros(self.num_steps))
else:
self.joint_patterns.append(convert_trans_patterns(joint_pattern))
else:
raise NotImplementedError("Joint type not supported")
self.num_link += 1
def add_occluders_end(self):
"""After adding occluders, to connect occluders with ground"""
ground_id = p.createMultiBody(0., **self.occluder_info)
return ground_id
def add_desk(self, mass=100, init_pos=(0, 0, 0), init_orn=(0, 0, 0), scale=(1, 1, 1), **kwargs):
"""Add a desk, with scale[0], scale[1] being the half width of table,
scale[2] being the half height of the cubic trunk"""
if init_orn != [0, 0, 0]:
print(init_orn)
raise NotImplementedError("Only support horizontal desk")
desk_id = []
for i in (-1, 1):
for j in (-1, 1):
loc = (init_pos[0] + i * (scale[0] - scale[2]), init_pos[1] + j * (scale[1] - scale[2]),
init_pos[2] + scale[2])
desk_id.append(self.add_object("cube", mass=mass, init_pos=loc, init_orn=(0, 0, 0), init_v=(0, 0, 0),
scale=(scale[2], scale[2], scale[2])))
desk_id.append(self.add_object("cube", mass=mass, init_pos=(
init_pos[0], init_pos[1], init_pos[2] + scale[2] * 2 + OCCLUDER_HALF_WIDTH),
init_orn=(90, 90, 0), init_v=(0, 0, 0),
scale=(OCCLUDER_HALF_WIDTH, scale[0], scale[1])))
self.desk_ids.append(desk_id)
def set_object_motion(self, obj_id, time):
"""Object may appear or disappear"""
loc, quat = p.getBasePositionAndOrientation(obj_id)
v, omega = p.getBaseVelocity(obj_id)
if time == 0 and self.appear_time[obj_id] != 0:
new_loc = loc[0] + 20 * (1 + obj_id), loc[1], loc[2]
p.resetBasePositionAndOrientation(obj_id, new_loc, quat)
p.resetBaseVelocity(obj_id, v, omega)
if time != 0 and self.appear_time[obj_id] == time:
p.resetBasePositionAndOrientation(obj_id, self.init_positions[obj_id], quat)
p.resetBaseVelocity(obj_id, v, omega)
if self.disappear_time[obj_id] == time:
new_loc = loc[0] + 20 * (1 + obj_id), loc[1], loc[2]
p.resetBasePositionAndOrientation(obj_id, new_loc, quat)
p.resetBaseVelocity(obj_id, v, omega)
def get_object_motion(self, obj_id):
"""Return the location, orientation, velocity and angular velocity of an object"""
loc, quat = p.getBasePositionAndOrientation(obj_id)
orn = p.getEulerFromQuaternion(quat)
v, omega = p.getBaseVelocity(obj_id)
loc, orn, v, omega = list(loc), list(orn), list(v), list(omega)
motion_dict = {
'location': loc,
'orientation': orn,
'velocity': v,
'angular_velocity': omega,
}
return motion_dict
def set_occluder_motion(self, link_id, time):
"""Set the rotation of the occluder to a specific rotation"""
p.resetJointState(self.ground_id, link_id, self.joint_patterns[link_id][time])
def get_occluder_motion(self, link_id):
"""Return the location, orientation, velocity and angular velocity of an occluder"""
loc, quat, _, _, _, _, v, omega = p.getLinkState(self.ground_id, link_id, computeLinkVelocity=True)
orn = p.getEulerFromQuaternion(quat)
loc, orn, v, omega = list(loc), list(orn), list(v), list(omega)
if orn[1] < 0:
loc[2] += 2 * np.sin(-orn[1]) * OCCLUDER_HALF_WIDTH
motion_dict = {
'location': loc,
'orientation': orn,
'velocity': v,
'angular_velocity': omega
}
return motion_dict
def get_desk_motion(self, desk_id):
"""Return the location, orientation, velocity and angular velocity of an table"""
desk_motion = []
for object_id in desk_id:
desk_motion.append(self.get_object_motion(object_id))
return desk_motion
def has_collision(self):
"""Check if collision happens which involves objects"""
for object_id in self.object_ids:
if len(p.getContactPoints(object_id)) > 1:
return True
elif len(p.getContactPoints(object_id)) == 1:
contact_point = p.getContactPoints(object_id)[0]
contact_normal = contact_point[7]
if abs(contact_normal[0]) > .1 or abs(contact_normal[1]) > .1:
return True
loc, quat = p.getBasePositionAndOrientation(object_id)
if -4 < loc[0] < -2.8:
return True
return False
```
#### File: ADEPT-Dataset-Release/utils/geometry.py
```python
import bpy
import mathutils
import math
from collections import Iterable
import numpy as np
def deg2rad(degs):
"""Convert degree iterables to radian iterables"""
if isinstance(degs, Iterable):
rads = []
for a in degs:
rads.append(a * math.pi / 180)
else:
rads = degs * math.pi / 180
return rads
def reverse_xyz(t):
"""Point an 3d vector to the opposite direction"""
return [-t[0], -t[1], -t[2]]
def reverse_euler(t):
"""Point a xyz euler to the opposite direction"""
return [-t[2], -t[1], -t[0]]
def convert_euler(bullet_euler):
"""
input blender rad angles and convert to bullet rad coordinates
"""
return list(mathutils.Euler(bullet_euler, "ZYX"))
def convert_inverse_euler(blender_euler):
"""
input rad euler angles and convert to blender rad coordinates
"""
return list(mathutils.Euler(blender_euler, "ZYX"))
def get_retrospective_location(location, velocity, time):
"""Calculate the original location and velocity based on the one at time t"""
return [l - v * time for l, v in zip(location, velocity)]
def get_prospective_location(location, velocity, time):
"""Calculate the final location and velocity based on the one at time 0"""
return [l + v * time for l, v in zip(location, velocity)]
def get_speed(start, end, time):
return (end - start) / time
def random_spherical_point():
vec = np.random.randn(3)
vec /= np.linalg.norm(vec)
return vec.tolist()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.