filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_5374 | # Copyright (c) 2021 Cisco Systems, Inc. and its affiliates
# All rights reserved.
# Use of this source code is governed by a BSD 3-Clause License
# that can be found in the LICENSE file.
import pytest
from swagger_server.utils import get_simple_subject, SimpleSubjectType
from swagger_server.models import Subject, SimpleSubject, ComplexSubject, Aliases, Email, DID, Account, IssSub, Opaque, PhoneNumber, JwtID
@pytest.mark.parametrize("expected_result, class_to_search, subject", [
# test getting subject types that are present
[Email(email="[email protected]"), Email,
Subject.parse_obj({"format": "email", "email": "[email protected]"})],
[PhoneNumber(phone_number="+12223334444"), PhoneNumber,
Subject.parse_obj({"format": "phone_number", "phone_number": "+12223334444"})],
[IssSub(iss="http://issuer.example.com/", sub="145234573"), IssSub,
Subject.parse_obj({
"tenant": {"format": "iss_sub", "iss": "http://issuer.example.com/", "sub": "145234573"},
"user": {"format": "email", "email": "[email protected]"},
"application": {"format": "opaque", "id": "123456789"}
})],
[Opaque(id="123456789"), Opaque,
Subject.parse_obj({
"tenant": {"format": "iss_sub", "iss": "http://issuer.example.com/", "sub": "145234573"},
"user": {"format": "email", "email": "[email protected]"},
"application": {"format": "opaque", "id": "123456789"}
})],
[Account(uri="acct:[email protected]"), Account,
Subject.parse_obj({
"identifiers": [
{"format": "account", "uri": "acct:[email protected]"},
{"format": "did", "url": "did:example:123456/did/url/path?versionId=1"},
{"format": "email", "email": "[email protected]"},
{"format": "iss_sub", "iss": "http://issuer.example.com/", "sub": "145234573"},
]
})],
[DID(url="did:example:123456/did/url/path?versionId=1"), DID,
Subject.parse_obj({
"identifiers": [
{"format": "account", "uri": "acct:[email protected]"},
{"format": "did", "url": "did:example:123456/did/url/path?versionId=1"},
{"format": "email", "email": "[email protected]"},
{"format": "iss_sub", "iss": "http://issuer.example.com/", "sub": "145234573"},
]
})],
# test returning None when subject type is not present
[None, Account,
Subject.parse_obj({"format": "phone_number", "phone_number": "+12223334444"})],
[None, DID,
Subject.parse_obj({
"tenant": {"format": "iss_sub", "iss": "http://issuer.example.com/", "sub": "145234573"},
"user": {"format": "email", "email": "[email protected]"},
"application": {"format": "opaque", "id": "123456789"}
})],
[None, JwtID,
Subject.parse_obj({
"identifiers": [
{"format": "account", "uri": "acct:[email protected]"},
{"format": "did", "url": "did:example:123456/did/url/path?versionId=1"},
{"format": "email", "email": "[email protected]"},
{"format": "iss_sub", "iss": "http://issuer.example.com/", "sub": "145234573"},
]
})],
])
def test_get_simple_subject(
expected_result: SimpleSubjectType,
class_to_search: type,
subject: Subject
) -> None:
assert get_simple_subject(subject, class_to_search) == expected_result
|
the-stack_0_5375 | import sys, os
from dataset.image_base import *
set_names = {'all':['train','val','test'],'test':['test'],'val':['train','val','test']}
PW3D_PCsubset = {'courtyard_basketball_00':[200,280], 'courtyard_captureSelfies_00':[500,600],\
'courtyard_dancing_00':[60,370], 'courtyard_dancing_01':[60,270], 'courtyard_hug_00':[100,500], 'downtown_bus_00':[1620,1900]}
PW3D_OCsubset = ['courtyard_backpack','courtyard_basketball','courtyard_bodyScannerMotions','courtyard_box','courtyard_golf','courtyard_jacket',\
'courtyard_laceShoe','downtown_stairs','flat_guitar','flat_packBags','outdoors_climbing','outdoors_crosscountry','outdoors_fencing','outdoors_freestyle',\
'outdoors_golf','outdoors_parcours','outdoors_slalom']
PW3D_NOsubset = {}
class PW3D(Image_base):
def __init__(self,train_flag = False, split='train', mode='vibe', regress_smpl=True, **kwargs):
#if train_flag:
# mode, split, regress_smpl = ['normal', 'train', True]
super(PW3D,self).__init__(train_flag,regress_smpl=regress_smpl)
self.data_folder = os.path.join(self.data_folder,'3DPW/')
self.data3d_dir = os.path.join(self.data_folder,'sequenceFiles')
self.image_dir = os.path.join(self.data_folder,'imageFiles')
self.mode = mode
self.split = split
self.regress_smpl = regress_smpl
self.val_sample_ratio = 5
self.scale_range = [1.56,1.8]
self.dataset_name = {'PC':'pw3d_pc', 'NC':'pw3d_nc','OC':'pw3d_oc','vibe':'pw3d_vibe', 'normal':'pw3d_normal'}[mode]
logging.info('Start loading 3DPW data.')
if mode in ['normal','PC']:
logging.info('Loading 3DPW in {} mode, split {}'.format(self.mode,self.split))
self.joint_mapper = constants.joint_mapping(constants.COCO_18,constants.SMPL_ALL_54)
self.joint3d_mapper = constants.joint_mapping(constants.SMPL_24,constants.SMPL_ALL_54)
self.annots_path = os.path.join(self.data_folder,'annots.npz')
if not os.path.exists(self.annots_path):
self.pack_data()
self.load_annots()
elif mode in ['vibe','NC','OC']:
logging.info('Loading 3DPW in VIBE mode, split {}'.format(self.split))
self.annots_path = os.path.join(self.data_folder,'vibe_db')
self.joint_mapper = constants.joint_mapping(constants.LSP_14,constants.SMPL_ALL_54)
self.joint3d_mapper = constants.joint_mapping(constants.LSP_14,constants.SMPL_ALL_54)
self.regress_smpl = False
self.load_vibe_annots()
else:
logging.info('3DPW loading mode is not recognized, please use the normal / vibe mode')
raise NotImplementedError
if self.split=='val':
self.file_paths = self.file_paths[::self.val_sample_ratio]
if mode in ['vibe','NC','OC']:
self.root_inds = [constants.SMPL_ALL_54['R_Hip'], constants.SMPL_ALL_54['L_Hip']]
elif mode in ['PC', 'normal']:
self.root_inds = [constants.SMPL_ALL_54['Pelvis_SMPL']]
if self.regress_smpl:
self.smplr = SMPLR(use_gender=True)
logging.info('3DPW dataset {} split total {} samples, loading mode {}'.format(self.split ,self.__len__(), self.mode))
def __len__(self):
return len(self.file_paths)
def load_PC_annots(self):
annots = np.load(self.annots_path,allow_pickle=True)
params = annots['params'][()]
kp3ds = annots['kp3d'][()]
kp2ds = annots['kp2d'][()]
self.annots = {}
video_names = list(params.keys())
for video_name in video_names:
for person_id in range(len(kp3ds[video_name])):
frame_range = PW3D_PCsubset[video_name.strip('.pkl')]
for frame_id in range(frame_range[0],frame_range[1]):
name = '{}_{}'.format(video_name.strip('.pkl'),frame_id)
kp3d = kp3ds[video_name][person_id][frame_id]
kp2d = kp2ds[video_name][person_id][frame_id]
pose_param = params[video_name]['poses'][person_id][frame_id]
beta_param = params[video_name]['betas'][person_id]
if name not in self.annots:
self.annots[name] = []
self.annots[name].append([video_name.strip('.pkl'), person_id, frame_id, kp2d.T, kp3d, pose_param, beta_param])
self.file_paths = list(self.annots.keys())
def reset_dataset_length_to_target_person_number(self):
single_person_file_paths = []
for name in self.file_paths:
for person_id, annot in enumerate(self.annots[name]):
single_person_key = '{}-{}'.format(name, person_id)
single_person_file_paths.append(single_person_key)
self.annots[single_person_key]=[annot]
#del self.annots[name]
self.file_paths = single_person_file_paths
def get_image_info(self, index):
annots = self.annots[self.file_paths[index%len(self.file_paths)]]
subject_ids, genders, kp2ds, kp3ds, params, bbox, valid_mask_2d, valid_mask_3d = [[] for i in range(8)]
for inds, annot in enumerate(annots):
video_name, gender, person_id, frame_id, kp2d, kp3d, pose_param, beta_param = annot
subject_ids.append(person_id)
genders.append(gender)
if not self.regress_smpl:
kp3d = self.map_kps(kp3d, self.joint3d_mapper)
kp3ds.append(kp3d)
params.append(np.concatenate([pose_param[:66], beta_param[:10]]))
kp2d_gt = self.map_kps(kp2d, self.joint_mapper)
kp2ds.append(kp2d_gt)
valid_mask_2d.append([True,False,False])
valid_mask_3d.append([True,True,True,True])
kp2ds, kp3ds, params = np.array(kp2ds), np.array(kp3ds), np.array(params)
valid_mask_2d, valid_mask_3d = np.array(valid_mask_2d), np.array(valid_mask_3d)
if self.regress_smpl:
kp3ds = []
poses, betas = np.concatenate([params[:,:-10], np.zeros((len(params),6))], 1),params[:,-10:]
for pose, beta, gender in zip(poses, betas, genders):
smpl_outs = self.smplr(pose, beta, gender)
kp3ds.append(smpl_outs['j3d'].numpy())
kp3ds = np.concatenate(kp3ds, 0)
imgpath = os.path.join(self.image_dir,video_name,'image_{:05}.jpg'.format(frame_id))
image = cv2.imread(imgpath)[:,:,::-1].copy()
root_trans = kp3ds[:,self.root_inds].mean(1)
valid_masks = np.array([self._check_kp3d_visible_parts_(kp3d) for kp3d in kp3ds])
kp3ds -= root_trans[:,None]
kp3ds[~valid_masks] = -2.
img_info = {'imgpath': imgpath, 'image': image, 'kp2ds': kp2ds, 'track_ids': subject_ids,\
'vmask_2d': valid_mask_2d, 'vmask_3d': valid_mask_3d,\
'kp3ds': kp3ds, 'params': params, 'img_size': image.shape[:2],'ds': self.dataset_name}
return img_info
def load_vibe_annots(self):
set_names = {'all':['train','val','test'],'train':['train'],'test':['test'],'val':['val']}
self.split_used = set_names[self.split]
self.annots = {}
for split in self.split_used:
db_file = os.path.join(self.annots_path,'3dpw_{}_db.pt'.format(split))
db = joblib.load(db_file)
vid_names = db['vid_name']
frame_ids = db['frame_id']
kp2ds, kp3ds, pose_params, beta_params, valids = db['joints2D'], db['joints3D'], db['pose'], db['shape'], db['valid']
if split=='train':
kp3ds = kp3ds[:,25:39]
for vid_name, frame_id, kp2d, kp3d, pose_param, beta_param, valid in zip(vid_names, frame_ids, kp2ds, kp3ds, pose_params, beta_params, valids):
if valid!=1:
continue
video_name, person_id = vid_name[:-2], int(vid_name[-1])
name = '{}_{}'.format(video_name,frame_id)
if name not in self.annots:
self.annots[name] = []
self.annots[name].append([video_name, None, person_id, frame_id, kp2d, kp3d, pose_param, beta_param])
self.file_paths = list(self.annots.keys())
if self.mode == 'NC':
logging.info('Convert to NC subset...')
file_paths = []
annots = {}
for key, annot in self.annots.items():
frame_id = key.split('_')[-1]
video_name = key.replace('_'+frame_id,'')
if video_name[:-3] not in PW3D_OCsubset:
if video_name not in PW3D_PCsubset:
file_paths.append(key)
annots[key] = annot
self.file_paths = file_paths
self.annots = annots
if self.mode == 'OC':
logging.info('Convert to OC subset...')
video_used = []
file_paths = []
annots = {}
for key, annot in self.annots.items():
frame_id = key.split('_')[-1]
video_name = key.replace('_'+frame_id,'')
if video_name[:-3] in PW3D_OCsubset:
if video_name not in video_used:
video_used.append(video_name)
file_paths.append(key)
annots[key] = annot
self.file_paths = file_paths
self.annots = annots
def load_annots(self):
set_names = {'train':['train'],'all':['train','validation','test'],'val':['validation'],'test':['test']}
split_used = set_names[self.split]
annots = np.load(self.annots_path,allow_pickle=True)
params = annots['params'][()]
kp3ds = annots['kp3d'][()]
kp2ds = annots['kp2d'][()]
self.annots = {}
video_names = list(params.keys())
for video_name in video_names:
valid_indices = params[video_name]['valid_indices']
genders = params[video_name]['genders']
for person_id, valid_index in enumerate(valid_indices):
for annot_id,frame_id in enumerate(valid_index):
split = params[video_name]['split']
if split not in split_used:
continue
name = '{}_{}'.format(video_name.strip('.pkl'),frame_id)
kp3d = kp3ds[video_name][person_id][annot_id]
kp2d = kp2ds[video_name][person_id][annot_id]
pose_param = params[video_name]['poses'][person_id][annot_id]
beta_param = params[video_name]['betas'][person_id]
gender = genders[person_id]
if name not in self.annots:
self.annots[name] = []
self.annots[name].append([video_name.strip('.pkl'), gender, person_id, frame_id, kp2d.T, kp3d, pose_param, beta_param])
self.file_paths = list(self.annots.keys())
# bacause VIBE removed the subject occluded, so we have to use the original gt data.
if self.mode == 'PC':
file_paths = []
annots = {}
for key, annot in self.annots.items():
frame_id = key.split('_')[-1]
video_name = key.replace('_'+frame_id,'')
if video_name in PW3D_PCsubset:
frame_range = PW3D_PCsubset[video_name]
if frame_range[0]<=int(frame_id)<frame_range[1]:
file_paths.append(key)
annots[key] = annot
self.file_paths = file_paths
self.annots = annots
def pack_data(self):
"""
The function reads all the ground truth and prediction files. And concatenates
:param paths_gt: all the paths corresponding to the ground truth - list of pkl files
:param paths_prd: all the paths corresponding to the predictions - list of pkl files
:return:
jp_pred: jointPositions Prediction. Shape N x 24 x 3
jp_gt: jointPositions ground truth. Shape: N x 24 x 3
mats_pred: Global rotation matrices predictions. Shape N x 24 x 3 x 3
mats_gt: Global rotation matrices ground truths. Shape N x 24 x 3 x 3
"""
# all ground truth smpl parameters / joint positions / rotation matrices
from evaluation.pw3d_eval.SMPL import SMPL
all_params, all_jp_gts, all_jp2d_gts, all_glob_rot_gts = {}, {}, {}, {}
seq = 0
num_jps_pred = 0
num_ors_pred = 0
paths_gt = glob.glob(os.path.join(self.data3d_dir,'*/*.pkl'))
smpl_model_genders = {'f':SMPL(center_idx=0, gender='f', model_root=args().smpl_model_path),\
'm':SMPL(center_idx=0, gender='m', model_root=args().smpl_model_path) }
# construct the data structures -
for path_gt in paths_gt:
print('Processing: ', path_gt)
video_name = os.path.basename(path_gt)
seq = seq + 1
# Open pkl files
data_gt = pickle.load(open(path_gt, 'rb'), encoding='latin1')
split = path_gt.split('/')[-2]
genders = data_gt['genders']
all_params[video_name], all_jp_gts[video_name], all_jp2d_gts[video_name], all_glob_rot_gts[video_name] = {}, [], [], []
all_params[video_name]['split'] = split
all_params[video_name]['genders'] = genders
all_params[video_name]['poses'], all_params[video_name]['trans'], all_params[video_name]['valid_indices'] = [], [], []
all_params[video_name]['betas'] = np.array(data_gt['betas'])
for i in range(len(genders)):
# Get valid frames
# Frame with no zeros in the poses2d file and where campose_valid is True
poses2d_gt = data_gt['poses2d']
poses2d_gt_i = poses2d_gt[i]
camposes_valid = data_gt['campose_valid']
camposes_valid_i = camposes_valid[i]
valid_indices = check_valid_inds(poses2d_gt_i, camposes_valid_i)
all_jp2d_gts[video_name].append(poses2d_gt_i[valid_indices])
# Get the ground truth SMPL body parameters - poses, betas and translation parameters
pose_params = np.array(data_gt['poses'])
pose_params = pose_params[i, valid_indices, :]
shape_params = np.array(data_gt['betas'][i])
shape_params = np.expand_dims(shape_params, 0)
shape_params = shape_params[:, :10]
shape_params = np.tile(shape_params, (pose_params.shape[0], 1))
trans_params = np.array(data_gt['trans'])
trans_params = trans_params[i, valid_indices, :]
all_params[video_name]['trans'].append(trans_params)
all_params[video_name]['valid_indices'].append(valid_indices)
# Get the GT joint and vertex positions and the global rotation matrices
verts_gt, jp_gt, glb_rot_mats_gt = smpl_model_genders[genders[i]].update(pose_params, shape_params, trans_params)
# Apply Camera Matrix Transformation to ground truth values
cam_matrix = data_gt['cam_poses']
new_cam_poses = np.transpose(cam_matrix, (0, 2, 1))
new_cam_poses = new_cam_poses[valid_indices, :, :]
# we don't have the joint regressor for female/male model. So we can't regress all 54 joints from the mesh of female/male model.
jp_gt, glb_rot_mats_gt = apply_camera_transforms(jp_gt, glb_rot_mats_gt, new_cam_poses)
root_rotation_cam_tranformed = transform_rot_representation(glb_rot_mats_gt[:,0], input_type='mat',out_type='vec')
pose_params[:,:3] = root_rotation_cam_tranformed
all_params[video_name]['poses'].append(pose_params)
all_jp_gts[video_name].append(jp_gt)
all_glob_rot_gts[video_name].append(glb_rot_mats_gt)
np.savez(self.annots_path, params=all_params, kp3d=all_jp_gts, glob_rot=all_glob_rot_gts, kp2d=all_jp2d_gts)
def with_ones(data):
"""
Converts an array in 3d coordinates to 4d homogenous coordiantes
:param data: array of shape A x B x 3
:return return ret_arr: array of shape A x B x 4 where the extra dimension is filled with ones
"""
ext_arr = np.ones((data.shape[0], data.shape[1], 1))
ret_arr = np.concatenate((data, ext_arr), axis=2)
return ret_arr
def apply_camera_transforms(joints, rotations, camera):
"""
Applies camera transformations to joint locations and rotations matrices
:param joints: B x 24 x 3
:param rotations: B x 24 x 3 x 3
:param camera: B x 4 x 4 - already transposed
:return: joints B x 24 x 3 joints after applying camera transformations
rotations B x 24 x 3 x 3 - rotations matrices after applying camera transformations
"""
joints = with_ones(joints) # B x 24 x 4
joints = np.matmul(joints, camera)[:, :, :3]
# multiply all rotation matrices with the camera rotation matrix
# transpose camera coordinates back
cam_new = np.transpose(camera[:, :3, :3], (0, 2, 1))
cam_new = np.expand_dims(cam_new, 1)
cam_new = np.tile(cam_new, (1, 24, 1, 1))
# B x 24 x 3 x 3
rotations = np.matmul(cam_new, rotations)
return joints, rotations
def check_valid_inds(poses2d, camposes_valid):
"""
Computes the indices where further computations are required
:param poses2d: N x 18 x 3 array of 2d Poses
:param camposes_valid: N x 1 array of indices where camera poses are valid
:return: array of indices indicating frame ids in the sequence which are to be evaluated
"""
# find all indices in the N sequences where the sum of the 18x3 array is not zero
# N, numpy array
poses2d_mean = np.mean(np.mean(np.abs(poses2d), axis=2), axis=1)
poses2d_bool = poses2d_mean == 0
poses2d_bool_inv = np.logical_not(poses2d_bool)
# find all the indices where the camposes are valid
camposes_valid = np.array(camposes_valid).astype('bool')
final = np.logical_and(poses2d_bool_inv, camposes_valid)
indices = np.array(np.where(final == True)[0])
return indices
def read_keypoints(keypoint_fn, use_hands=True, use_face=True,
use_face_contour=False):
if not os.path.exists(keypoint_fn):
return None
with open(keypoint_fn) as keypoint_file:
data = json.load(keypoint_file)
keypoints = []
gender_pd = []
gender_gt = []
if len(data['people'])<1:
return None
for idx, person_data in enumerate(data['people']):
body_keypoints = np.array(person_data['pose_keypoints_2d'],
dtype=np.float32)
body_keypoints = body_keypoints.reshape([-1, 3])[:25]
keypoints.append(body_keypoints)
'''
left_hand_keyp = np.array(
person_data['hand_left_keypoints_2d'],
dtype=np.float32).reshape([-1, 3])
right_hand_keyp = np.array(
person_data['hand_right_keypoints_2d'],
dtype=np.float32).reshape([-1, 3])
hand_kp2d = np.concatenate([left_hand_keyp, right_hand_keyp],0)
# TODO: Make parameters, 17 is the offset for the eye brows,
# etc. 51 is the total number of FLAME compatible landmarks
face_keypoints = np.array(
person_data['face_keypoints_2d'],
dtype=np.float32).reshape([-1, 3])[17: 17 + 51, :]
contour_keyps = np.array(
[], dtype=body_keypoints.dtype).reshape(0, 3)
if use_face_contour:
contour_keyps = np.array(
person_data['face_keypoints_2d'],
dtype=np.float32).reshape([-1, 3])[:17, :]
keypoints.append([body_keypoints, hand_kp2d, face_keypoints])
'''
return keypoints
if __name__ == '__main__':
#dataset= PW3D(train_flag=False, split='test', mode='vibe')
dataset= PW3D(train_flag=True)
test_dataset(dataset,with_3d=True,with_smpl=True)
print('Done')
'''
if crop_eval:
self.reset_dataset_length_to_target_person_number()
self.multi_mode = Falsec
self.openpose_dir = os.path.join(self.data_folder,'openpose_json')
input_cropped_img=False, bbox=None, use_openpose_center=False
self.input_cropped_img = input_cropped_img
self.use_bbox = True if bbox is not None else False
self.use_openpose_center = use_openpose_center
if self.input_cropped_img:
self.multi_mode = False
self.reset_dataset_length_to_target_person_number()
logging.info('loading 3DPW dataset using cropped image')
if self.use_bbox:
self.bboxes = np.load(bbox,allow_pickle=True)['bbox'][()]
logging.info('using bbox from ', bbox)
openpose_annot_path = self.openpose_dir.replace('_json', '_body_results.npz')
if not os.path.exists(openpose_annot_path):
self.pack_openpose_results(openpose_annot_path)
self.openpose_kp2ds = np.load(openpose_annot_path,allow_pickle=True)['annots'][()]
def get_image_info(self,index):
if not self.input_cropped_img:
multi_person_annots = self.annots[self.file_paths[index]]
return self.get_complete_image_info(multi_person_annots)
if self.input_cropped_img:
annot_id, person_id = self.file_paths[index].split('-')
multi_person_annots = self.annots[annot_id]
target_person_annots = multi_person_annots[int(person_id)]
video_name, frame_id = target_person_annots[0], target_person_annots[2]
if video_name in self.openpose_kp2ds:
if frame_id in self.openpose_kp2ds[video_name]:
self.multi_mode = False
return self.get_cropped_image_info(target_person_annots)
self.multi_mode = True
return self.get_complete_image_info(multi_person_annots)
def get_complete_image_info(self, multi_person_annots):
# if self.train_flag and self.train_with_openpose:
# video_name, frame_id = multi_person_annots[0][0], multi_person_annots[0][2]
# if frame_id in self.openpose_kp2ds[video_name]:
# full_kp2d = self.openpose_kp2ds[video_name][frame_id]
# else:
# return self.get_image_info(random.randint(0,len(self)))
# #full_kp2d = [self.map_kps(kp2d,maps=constants.body1352coco25) for kp2d in full_kp2d]
# subject_ids = np.arange(len(full_kp2d))
# kp3d_monos, params = None, None
subject_ids, full_kp2d, kp3d_monos, params, bbox = [[] for i in range(5)]
video_name, frame_id = multi_person_annots[0][0], multi_person_annots[0][2]
#if self.use_openpose_center:
# full_kp2d_op = np.array(self.openpose_kp2ds[video_name][frame_id])
# openpose_center = np.array([self._calc_center_(kp2d) for kp2d in full_kp2d_op])
for subject_id, annots in enumerate(multi_person_annots):
video_name, person_id, frame_id, kp2d, kp3d, pose_param, beta_param = annots
subject_ids.append(person_id)
kp3d_monos.append(kp3d)
params.append(np.concatenate([pose_param[:66], beta_param]))
kp2d_gt = self.map_kps(kp2d, self.joint_mapper)
#if self.use_openpose_center:
# kp2d_gt_center = self._calc_center_(kp2d_gt)
# min_dist_idx = np.argmin(np.linalg.norm(openpose_center-kp2d_gt_center[None],axis=-1))
# full_kp2d.append(full_kp2d_op[min_dist_idx])
full_kp2d.append(kp2d_gt)
imgpath = os.path.join(self.image_dir,video_name,'image_{:05}.jpg'.format(frame_id))
image = cv2.imread(imgpath)[:,:,::-1].copy()
info_2d = ('pw3d', imgpath, image, full_kp2d[np.random.randint(len(full_kp2d))], full_kp2d, None, subject_ids)
info_3d = ('pw3d', kp3d_monos, params, None)
return info_2d, info_3d
def get_cropped_image_info(self, target_person_annots):
video_name, person_id, frame_id, kp2d, kp3d, pose_param, beta_param = target_person_annots
kp2d_op = self.openpose_kp2ds[video_name][frame_id]
kp2d_op_matched = self.match_op_to_gt(kp2d_op,kp2d)
full_kp2d = [kp2d]
subject_ids = [person_id]
kp3d_monos, params = [kp3d], [np.concatenate([pose_param[:66], beta_param])]
imgpath = os.path.join(self.image_dir,video_name,'image_{:05}.jpg'.format(frame_id))
image = cv2.imread(imgpath)[:,:,::-1].copy()
info_2d = ('pw3d', imgpath, image, kp2d_op_matched, full_kp2d,None,subject_ids)
info_3d = ('pw3d', kp3d_monos, params, None)
return info_2d, info_3d
if self.use_bbox:
bbox_center = self.bboxes[video_name][person_id,frame_id]
min_dist_idx = np.argmin(np.linalg.norm(openpose_center[:,:2]-bbox_center[None],axis=-1))
center = self._calc_center_(full_kp2d_op[min_dist_idx])
centers.append(center)
if self.use_bbox:
centers = np.array(centers)
def pack_openpose_results(self, annot_file_path):
self.openpose_kp2ds = {}
for key, multi_person_annots in self.annots.items():
video_name, frame_id = multi_person_annots[0][0], multi_person_annots[0][2]
openpose_file_path = os.path.join(self.openpose_dir,video_name+'-'+'image_{:05}_keypoints.json'.format(frame_id))
full_kp2d = read_keypoints(openpose_file_path)
if full_kp2d is None:
continue
if video_name not in self.openpose_kp2ds:
self.openpose_kp2ds[video_name] = {}
self.openpose_kp2ds[video_name][frame_id] = full_kp2d
np.savez(annot_file_path, annots=self.openpose_kp2ds)
def match_op_to_gt(self, kp2ds_op, kp2d_gt):
kp2ds_op_dist = {}
vis_gt = kp2d_gt[self.torso_ids,-1]>0
center_gt = kp2d_gt[self.torso_ids][vis_gt].mean(0)
for idx, kp2d_op in enumerate(kp2ds_op):
vis = kp2d_op[self.torso_ids,-1]>0
if vis.sum()>1:
center_point = kp2d_op[self.torso_ids][vis].mean(0)
dist = np.linalg.norm(center_point-center_gt)
kp2ds_op_dist[dist] = idx
kp2d_op_matched_id = kp2ds_op_dist[np.min(list(kp2ds_op_dist.keys()))]
return kp2ds_op[kp2d_op_matched_id]
if 'joint_format' in kwargs:
joint_format=kwargs['joint_format']
else:
joint_format='coco25'
print('joint_format',joint_format)
#for set_name in set_names[self.phase]:
# label_dir = os.path.join(self.data3d_dir,set_name)
# self.get_labels(label_dir)
def get_image_info(self,index):
annot_3d = self.labels[index]
imgpath = os.path.join(self.image_dir,annot_3d['name'],'image_{:05}.jpg'.format(annot_3d['ids']))
subject_ids = annot_3d['subject_ids'].tolist()
person_num = len(subject_ids)
#name = os.path.join(self.image_dir,annot_3d['name'],'image_{:05}_{}.jpg'.format(annot_3d['ids'],subject_id))
image = cv2.imread(imgpath)[:,:,::-1].copy()
#openpose_file_path = os.path.join(self.openpose_dir,annot_3d['name']+'-'+'image_{:05}_keypoints.json'.format(annot_3d['ids']))
#openpose_result_list = read_keypoints(openpose_file_path)
#kp2d_body = self.process_openpose(openpose_result_list, kps)
full_kps = annot_3d['kp2d'].copy()
thetas,betas,ts,genders = annot_3d['poses'].copy(),annot_3d['betas'].copy(),annot_3d['t'].copy(),annot_3d['gender'].copy()
full_kp2d,kp3d_monos = [],[]
for idx in range(person_num):
joint = self.map_kps(full_kps[idx].T)
if (joint[:,-1]>-1).sum()<1:
subject_ids.remove(idx)
continue
full_kp2d.append(joint)
kp3d = self.smplr(thetas[idx], betas[idx], genders[idx])[0]
kp3d_monos.append(kp3d)
#kp3d_mono = annot_3d['kp3d'].copy().reshape(24,3)
#kp3d_mono[:,1:] *= -1
#kp3d_mono = self.map_kps(kp3d_mono,maps=config.smpl24_2_coco25)
params = np.concatenate([np.array(thetas)[:,:66], np.array(betas)[:,-10:]],-1)
info_2d = ('pw3d', imgpath, image, full_kp2d[np.random.randint(len(full_kp2d))], full_kp2d,None,None,subject_ids)
info_3d = ('pw3d', kp3d_monos, params, None)
return info_2d, info_3d
def get_labels(self,label_dir):
label_paths = glob.glob(label_dir+'/*.pkl')
for label_path in label_paths:
raw_labels = self.read_pkl(label_path)
frame_num = len(raw_labels['img_frame_ids'])
for j in range(frame_num):
label = {}
label['name'] = raw_labels['sequence']
label['ids'] = j#raw_labels['img_frame_ids'][j]\
#img_frame_ids: an index-array to down-sample 60 Hz 3D poses to corresponding image frame ids
label['frame_ids'] = raw_labels['img_frame_ids'][j]
label['subject_ids'] = np.arange(len(raw_labels['poses']))
label['kp2d'] = np.array([raw_labels['poses2d'][idx][j] for idx in range(len(raw_labels['poses2d']))])
if (label['kp2d'][:,:,-1]>-1).sum()<1:
continue
extrinsics = raw_labels['cam_poses'][j,:3,:3]
poses,shapes,trans = [[] for idx in range(3)]
for idx in range(len(raw_labels['poses'])):
trans.append(raw_labels['trans'][idx][j])
shapes.append(raw_labels['betas'][idx][:10])
pose=raw_labels['poses'][idx][j]
pose[:3] = cv2.Rodrigues(np.dot(extrinsics, cv2.Rodrigues(pose[:3])[0]))[0].T[0]
poses.append(pose)
label['poses'],label['betas'],label['t'] = poses,shapes,trans
label['kp3d'] = [raw_labels['jointPositions'][idx][j] for idx in range(len(raw_labels['jointPositions']))]
label['gender'] = [raw_labels['genders'][idx] for idx in range(len(raw_labels['genders']))]
#label['cam_poses'] = raw_labels['cam_poses'][i]#Rt矩阵
label['cam_trans'] = raw_labels['cam_poses'][j,:3,3]
label['cam_rotation_matrix'] = raw_labels['cam_poses'][j,:3,:3]#Rt矩阵
#label['campose_valid_mask'] = raw_labels['campose_valid'][i][j]
self.labels.append(label)
return True
def process_openpose(self,result_list, kps_gt):
if result_list is not None:
if len(result_list)>1:
for body_kp2d_op, hand_kp2d_op, face_kp2d_op in result_list:
body_kp2d_op = body_kp2d_op[config.body1352coco25]
if body_kp2d_op[9,2]>0.05 and body_kp2d_op[12,2]>0.05:
body_kp2d_op[8] = (body_kp2d_op[9]+body_kp2d_op[12])/2
else:
body_kp2d_op[8,2] = -2
vis_id = ((body_kp2d_op[:,2]>0.04).astype(np.float32) + (kps_gt[:,2]>0.04).astype(np.float32))>1
if vis_id.sum()>4:
error = np.linalg.norm((body_kp2d_op[vis_id,:2]-kps_gt[vis_id,:2]), axis=-1).mean()
else:
error = 1000
if error<70:
return body_kp2d_op
return kps_gt
def load_file_list(self):
self.file_paths = []
self.annots = np.load(self.annots_file, allow_pickle=True)['annots'][()]
with open(self.imgs_list_file) as f:
test_list = f.readlines()
for test_file in test_list:
self.file_paths.append(test_file.strip())
self.kps_op, self.facial_kps2d, self.hand_kps2d = {},{},{}
with open(self.kps_openpose_json_file,'r') as f:
openpose_labels = json.load(f)
empty_count=0
for idx,img_name in enumerate(self.file_paths):
img_name = os.path.basename(img_name)
annot = openpose_labels[img_name]
if annot is None:
empty_count += 1
continue
kp2d = np.array(annot['pose_keypoints_2d']).reshape(-1,3)
self.kps_op[img_name] = kp2d.astype(np.float32)
face_kp2d = np.array(annot['face_keypoints_2d']).reshape(-1,3)[17:68]
self.facial_kps2d[img_name] = face_kp2d.astype(np.float32)
hand_kp2d = np.concatenate([np.array(annot['hand_left_keypoints_2d']).reshape(-1,3),\
np.array(annot['hand_right_keypoints_2d']).reshape(-1,3)],0)
self.hand_kps2d[img_name] = hand_kp2d.astype(np.float32)
print('empty_count_op:',empty_count)
def load_alphapose_mpii(self):
with open(self.kps_alpha_json_file,'r') as f:
raw_labels = json.load(f)
error_num = 0
for idx,annot_3d in enumerate(self.labels):
content = raw_labels['{}-image_{:05}.jpg'.format(annot_3d['name'],annot_3d['ids'])]
poses = []
for pid in range(len(content)):
poses.append(np.array(content[pid]['keypoints']).reshape(-1,3)[:,:3])
poses = np.array(poses)[:,self.mpii_2_lsp14]
kps_gt = annot_3d['kp2d'].copy().T[self.coco18_2_lsp14][:-2]
vis = np.where(kps_gt[:,2]>0)[0]
poses_comp = poses[:,vis,:2]
kps_gt = kps_gt[vis,:2][None,:,:]
mis_errors = np.mean(np.linalg.norm(poses_comp-kps_gt,ord=2,axis=-1),-1)
pose = poses[np.argmin(mis_errors)]
pose[pose[:,2]<0.01,2] = 0
pose[pose[:,2]>0.01,2] = 1
annot_3d['kps_alpha'] = pose
def load_alphapose_coco(self):
with open(self.kps_alpha_json_file,'r') as f:
raw_labels = json.load(f)
frame_num = len(raw_labels)
print('frame_num',frame_num)
error_count=0
for idx,annot_3d in enumerate(self.labels):
try:
content = raw_labels['{}-image_{:05}.jpg'.format(annot_3d['name'],annot_3d['ids'])]['bodies']
poses = []
for pid in range(len(content)):
poses.append(np.array(content[pid]['joints']).reshape(-1,3))
poses = np.array(poses)[:,self.coco18_2_lsp14]
poses[:,-1,2] = 0
kps_gt = annot_3d['kp2d'].copy().T[self.coco18_2_lsp14][:-2]
vis = np.where(kps_gt[:,2]>0)[0]
mis_errors = []
for i in range(len(poses)):
poses_comp = poses[i,vis]
vis_pred = poses_comp[:,2]>0
poses_comp = poses_comp[vis_pred,:2]
kps_gti = kps_gt[vis,:2][vis_pred,:]
mis_errors.append(np.mean(np.linalg.norm(poses_comp-kps_gti,ord=2,axis=-1)))
mis_errors = np.array(mis_errors)
pose = poses[np.argmin(mis_errors)]
pose[pose[:,2]<0.1,2] = 0
pose[pose[:,2]>0.1,2] = 1
annot_3d['kps_alpha'] = pose
except :
print('{}/image_{:05}.jpg'.format(annot_3d['name'],annot_3d['ids']))
error_count+=1
pose_gt = annot_3d['kp2d'].copy().T[self.coco18_2_lsp14]
pose_gt[pose_gt[:,2]<0.1,2] = 0
pose_gt[pose_gt[:,2]>0.1,2] = 1
annot_3d['kps_alpha'] = pose_gt
print('error_count',error_count)
def get_item_video(self,index):
label = self.labels[index]
label_dict_name = '{}_{}'.format(label['name'],label['subject_ids'])
ids_sequence = list(self.label_dict[label_dict_name].keys())
current_frame = label['ids']
current_spawn = int((self.spawn-1)/2)
features_idx = []
for index, num in enumerate(list(range(current_frame, current_frame+current_spawn+1))):
if num not in ids_sequence:
num=features_idx[index-1]
features_idx.append(num)
for index, num in enumerate(list(range(current_frame-1, current_frame-current_spawn-1,-1))):
if num not in ids_sequence:
num=features_idx[0]
features_idx=[num]+features_idx
labels_idx = []
for idx in features_idx:
labels_idx.append(self.label_dict[label_dict_name][idx])
video = []
video_input = {}
for label_idx in labels_idx:
video.append(self.get_item_single_frame(label_idx))
for key in video[0].keys():
if key=='image':
video_input[key] = torch.cat([video[i][key].unsqueeze(0) for i in range(len(video))])
elif key=='kps_alpha':
video_input[key] = torch.cat([video[i][key].unsqueeze(0) for i in range(len(video))])
else:
video_input[key] = video[current_spawn][key]
return video_input
''' |
the-stack_0_5376 | import sys
import PySimpleGUI as sg
# import os.path
import json
import os
import random
import tkinter as tk
def check_experience(s):
if (s.isdigit() == False):
return False
exp = int(s)
if (exp > 30):
return False
return True
filename = './to-grade/hs.json'
try:
with open(filename[:-5] + '.tmp.json') as f:
dat = json.load(f)
except:
with open(filename) as f:
dat = json.load(f)
mylist = [(x, y) for x in range(len(dat) - 1) for y in range(3)]
random.shuffle(mylist)
names = ('gcnn', 'nl2code', 'snippet')
file_list_column = [
[
sg.Text('''Is the code snippet below relevant or not relevant description of the card on the right?
Please rate it on a scale from 0 to 4. You can either press on the radio button or press the corresponding key (\'4\' for 4 etc.)
You can also press \'Exit\' to finish grading or \'Skip\' to skip the snippet
4: Snippet is very relevant, it describes the card exactly
3: Snippet is relevant, but needs to be slightly changed to describe the card exactly
2: Snippet is somewhat relevant, it requires significant changes (compared to the size of the snippet), but is still useful to describe the card
1: Snippet is slightly relevant, it contains information relevant to the card, but it is easier to write the description from scratch
0: Snippet is not at all relevant to the card''', font=("Helvetica", 12)),
],
[sg.Radio('4', "RADIO1", enable_events=True, font=("Helvetica", 12), key='4', size=(10, 10)),
sg.Radio('3', "RADIO1", enable_events=True, font=("Helvetica", 12), key='3', size=(10, 10)),
sg.Radio('2', "RADIO1", enable_events=True, font=("Helvetica", 12), key='2', size=(10, 10)),
sg.Radio('1', "RADIO1", enable_events=True, font=("Helvetica", 12), key='1', size=(10, 10)),
sg.Radio('0', "RADIO1", enable_events=True, font=("Helvetica", 12), key='0', size=(10, 10))],
[sg.Cancel(button_text="Skip"), sg.Exit()],
[sg.Text(''), sg.Text(size=(150, 40), key='-OUTPUT-', font=("Helvetica", 12))]
]
# For now will only show the name of the file that was chosen
image_viewer_column = [
[sg.Image(key="-IMAGE-")],
]
# ----- Full layout -----
layout_form = [[sg.Text('''Dear participant,\n
this program is a survey on quality of the code snippets conducted by Independent non-profit organization of additional professional education
“Research and Education Center “JetBrains”, OGRN 1187800000134, located at St. Petersburg, Kantemirovskaya street 2, liter A, office 201.
You will be presented with code snippets (one at a time) and a problem they are supposed to solve. You are asked to evaluate whether
the suggested snippet is helpful or not helpful in solving the problem on a scale from 0 to 4, where 0 corresponds to a totally irrelevant snippet
and 4 corresponds to a snippet which solves the problem (more detailed instruction will be present at the snippet grading screen).\n
In the event of any publication or presentation resulting from the research, no personally identifiable information will be shared.
We plan to include the results of this survey in a scientific publication. If you have any concerns or questions about your rights as a participant
or about the way the study is being conducted, please contact Mikhail Evtikhiev ([email protected]).''',
font=("Helvetica", 12))],
[sg.Text('''In the text box below please write, for how long have you been programming in Python (in years),
rounded to the nearest integer number. This information will be reported in the publication in an aggregated form.''',
font=("Helvetica", 12))],
[sg.Text('Python experience: ', key='_text1_',
font=("Helvetica", 12)), sg.InputText(key='_python_', size=(10, 1))],
[sg.Text('''In the text box below please write your Slack handle or e-mail address. This information will be kept private and we only ask for it
to be able to reach back to you to clarify any technical uncertainties with the graded snippets, if such uncertainties shall arise.''')],
[sg.Text('Contact information: ', key='_text2_',
font=("Helvetica", 12)), sg.InputText(key='_contact_', size=(30, 1))],
[sg.Text('''ELECTRONIC CONSENT\n
Please select your choice below. Selecting the “yes” option below indicates that:
i) you have read and understood the above information,
ii) you voluntarily agree to participate, and
iii) you are at least 18 years old.
If you do not wish to participate in the research study, please decline participation by selecting “No”.''',
font=("Helvetica", 12))],
[sg.Ok(button_text="Yes"), sg.Exit(button_text="No")],
]
layout_grade = [[
sg.Column(file_list_column),
sg.VSeperator(),
sg.Column(image_viewer_column),
]
]
#layout1 = [[sg.Text('')]]
#root = tk.Tk()
#screen_width = root.winfo_screenwidth()
#scaling_window = sg.Window('Window Title', layout1, no_titlebar=True, auto_close=False, alpha_channel=0).Finalize()
#scaling_window.TKroot.tk.call('tk', 'scaling', max(screen_width / 1920, 1))
#scaling_window.close()
pers_data = dat[-1]
no_consent = False
if ((pers_data["contact"] == "") or (pers_data["experience"] == "") or (pers_data["consent"] == "")):
window = sg.Window("Hearthstone dataset grader form", layout_form, finalize=True, location=(0, 0),
return_keyboard_events=True)
no_consent = True
while (no_consent):
event, values = window.read()
if event == "No" or event == sg.WIN_CLOSED:
window.close()
sys.exit()
elif event == "Yes":
error_text = ""
if (check_experience(values['_python_']) == False):
error_text += "Incorrect input. Please enter, for how long have you been programming in Python (in " \
"years, rounded to a nearest integer)\n"
if (len(values['_contact_']) < 1):
error_text += 'Incorrect input. Please enter your Slack handle or e-mail address.\n'
if len(error_text) > 0:
sg.popup(error_text)
else:
pers_data["contact"] = values['_contact_']
pers_data["experience"] = int(values['_python_'])
pers_data["consent"] = 'yes'
no_consent = False
for key in dat[-1]:
dat[-1][key] = pers_data[key]
window.close()
else:
pass
window = sg.Window("Hearthstone dataset grader", layout_grade, finalize=True, location=(0, 0),
return_keyboard_events=True)
if no_consent: window.close()
# Run the Event Loop
for (i, j) in mylist:
successful = False
finished = False
sname = 'grade-' + names[j]
if sname not in dat[i]:
window['-OUTPUT-'].update(dat[i][names[j]])
window["-IMAGE-"].update(filename='./hs_cards/' + str(i) + '.png')
while not successful:
event, values = window.read()
if event == "Exit" or event == sg.WIN_CLOSED:
with open(filename, 'w') as o:
json.dump(dat, o)
try:
os.remove(filename[:-5] + '.tmp.json')
except:
pass
finished = True
successful = True
elif event[0] in ['0', '1', '2', '3', '4']:
successful = True
dat[i][sname] = int(event)
with open(filename[:-5] + '.tmp.json', 'w') as o:
json.dump(dat, o)
elif event == "Skip":
successful = True
pass
else:
sg.popup(event)
if finished:
break
with open(filename, 'w') as o:
json.dump(dat, o)
try:
os.remove(filename[:-5] + '.tmp.json')
except:
pass
window.close()
|
the-stack_0_5377 | from flask import Flask , render_template, request
from db_magazina import Kategor, Tovar, Tovar_photo, Tovar_inphoto
my_flask_app = Flask(__name__)
@my_flask_app.route('/')
def index():
return render_template('index.html')
@my_flask_app.route('/smart/harakter/')
def harakt():
t1= Tovar_inphoto()
harackter = Tovar_inphoto.query.filter(Tovar_inphoto.id==1).all()
return render_template('harakteriskick.html' , harackter=harackter)
@my_flask_app.route('/inf/')
def info():
return render_template('info_gl_str.html')
@my_flask_app.route('/log1/')
def log1():
return render_template('login_menu.html')
@my_flask_app.route('/login/',methods = ['POST'])
def login():
return render_template('login.html',email=request.form.get("email"), password=request.form.get("passwd"))
@my_flask_app.route('/smart/')
@my_flask_app.route('/smart/<username>', methods = ['GET', 'POST'])
def category(username=None):
phone_name = request.args.get('phone_name', False)
t = Tovar()
smartfons = t.query.filter(Tovar.kategory_id==1)
harackter = Tovar_inphoto.query.filter(Tovar_inphoto.id==1).all()
if phone_name:
qry = '%{}%'.format(phone_name)
smartfons = smartfons.filter(Tovar.tovar_name.like(qry))
if username:
qry = '%{}%'.format(username)
smartfons = smartfons.filter(Tovar.tovar_name.like(qry))
check9 = request.args.get('check9', False)
check8 = request.args.get('check8', False)
check7 = request.args.get('check7', False)
check6 = request.args.get('check6', False)
check5 = request.args.get('check5', False)
check4 = request.args.get('check4', False)
check3 = request.args.get('check3', False)
check2 = request.args.get('check2', False)
check1 = request.args.get('check1', False)
check0 = request.args.get('check0', False)
checki = request.args.get('checki', False)
if checki:
z = [check0,check1,check2,check3,check4,check5,check6,check7,check8,check9]
smartfons=smartfons.filter(Tovar.tovar_name.in_([z[0],z[1],z[2],z[6]]))
try:
smartfons= Tovar.query.filter(Tovar.id.in_([Tovar_inphoto.query.filter(Tovar_inphoto.tovarinphoto_diagon.in_([z[3],z[4],z[5]])).all()[0].tovar_id]))
except IndexError:
pass
try:
a = Tovar_inphoto.query.filter(Tovar_inphoto.tovarinphoto_ram.in_([z[7],z[8],z[9]])).all()
for i in a:
smartfons= Tovar.query.filter(Tovar.id.in_([i.tovar_id]))
#smartfons= Tovar.query.filter(Tovar.id.in_([a[0].tovar_id , a[1].tovar_id]))
except IndexError:
pass
print(smartfons)
smartfons=smartfons.all()
return render_template('smartfons.html', smartfons=smartfons)
if __name__ == "__main__":
my_flask_app.run(debug=True)
|
the-stack_0_5378 | #!/usr/bin/env python3
# Copyright (c) 2015-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multisig RPCs"""
import decimal
import itertools
import json
import os
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.authproxy import JSONRPCException
from test_framework.descriptors import descsum_create, drop_origins
from test_framework.key import ECPubKey, ECKey
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
)
from test_framework.wallet_util import bytes_to_wif
class RpcCreateMultiSigTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def get_keys(self):
self.pub = []
self.priv = []
node0, node1, node2 = self.nodes
for _ in range(self.nkeys):
k = ECKey()
k.generate()
self.pub.append(k.get_pubkey().get_bytes().hex())
self.priv.append(bytes_to_wif(k.get_bytes(), k.is_compressed))
self.final = node2.getnewaddress()
def run_test(self):
node0, node1, node2 = self.nodes
self.check_addmultisigaddress_errors()
self.log.info('Generating blocks ...')
self.generate(node0, 149)
self.moved = 0
for self.nkeys in [3, 5]:
for self.nsigs in [2, 3]:
for self.output_type in ["bech32", "p2sh-segwit", "legacy"]:
self.get_keys()
self.do_multisig()
self.checkbalances()
# Test mixed compressed and uncompressed pubkeys
self.log.info('Mixed compressed and uncompressed multisigs are not allowed')
pk0 = node0.getaddressinfo(node0.getnewaddress())['pubkey']
pk1 = node1.getaddressinfo(node1.getnewaddress())['pubkey']
pk2 = node2.getaddressinfo(node2.getnewaddress())['pubkey']
# decompress pk2
pk_obj = ECPubKey()
pk_obj.set(bytes.fromhex(pk2))
pk_obj.compressed = False
pk2 = pk_obj.get_bytes().hex()
node0.createwallet(wallet_name='wmulti0', disable_private_keys=True)
wmulti0 = node0.get_wallet_rpc('wmulti0')
# Check all permutations of keys because order matters apparently
for keys in itertools.permutations([pk0, pk1, pk2]):
# Results should be the same as this legacy one
legacy_addr = node0.createmultisig(2, keys, 'legacy')['address']
result = wmulti0.addmultisigaddress(2, keys, '', 'legacy')
assert_equal(legacy_addr, result['address'])
assert 'warnings' not in result
# Generate addresses with the segwit types. These should all make legacy addresses
for addr_type in ['bech32', 'p2sh-segwit']:
result = wmulti0.createmultisig(2, keys, addr_type)
assert_equal(legacy_addr, result['address'])
assert_equal(result['warnings'], ["Unable to make chosen address type, please ensure no uncompressed public keys are present."])
result = wmulti0.addmultisigaddress(2, keys, '', addr_type)
assert_equal(legacy_addr, result['address'])
assert_equal(result['warnings'], ["Unable to make chosen address type, please ensure no uncompressed public keys are present."])
self.log.info('Testing sortedmulti descriptors with BIP 67 test vectors')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_bip67.json'), encoding='utf-8') as f:
vectors = json.load(f)
for t in vectors:
key_str = ','.join(t['keys'])
desc = descsum_create('sh(sortedmulti(2,{}))'.format(key_str))
assert_equal(self.nodes[0].deriveaddresses(desc)[0], t['address'])
sorted_key_str = ','.join(t['sorted_keys'])
sorted_key_desc = descsum_create('sh(multi(2,{}))'.format(sorted_key_str))
assert_equal(self.nodes[0].deriveaddresses(sorted_key_desc)[0], t['address'])
# Check that bech32m is currently not allowed
assert_raises_rpc_error(-5, "createmultisig cannot create bech32m multisig addresses", self.nodes[0].createmultisig, 2, self.pub, "bech32m")
def check_addmultisigaddress_errors(self):
if self.options.descriptors:
return
self.log.info('Check that addmultisigaddress fails when the private keys are missing')
addresses = [self.nodes[1].getnewaddress(address_type='legacy') for _ in range(2)]
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
for a in addresses:
# Importing all addresses should not change the result
self.nodes[0].importaddress(a)
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
# Bech32m address type is disallowed for legacy wallets
pubs = [self.nodes[1].getaddressinfo(addr)["pubkey"] for addr in addresses]
assert_raises_rpc_error(-5, "Bech32m multisig addresses cannot be created with legacy wallets", self.nodes[0].addmultisigaddress, 2, pubs, "", "bech32m")
def checkbalances(self):
node0, node1, node2 = self.nodes
self.generate(node0, COINBASE_MATURITY)
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert 150 < height < 350
total = 149 * 50 + (height - 149 - 100) * 25
assert bal1 == 0
assert bal2 == self.moved
assert bal0 + bal1 + bal2 == total
def do_multisig(self):
node0, node1, node2 = self.nodes
if 'wmulti' not in node1.listwallets():
try:
node1.loadwallet('wmulti')
except JSONRPCException as e:
path = os.path.join(self.options.tmpdir, "node1", "regtest", "wallets", "wmulti")
if e.error['code'] == -18 and "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path) in e.error['message']:
node1.createwallet(wallet_name='wmulti', disable_private_keys=True)
else:
raise
wmulti = node1.get_wallet_rpc('wmulti')
# Construct the expected descriptor
desc = 'multi({},{})'.format(self.nsigs, ','.join(self.pub))
if self.output_type == 'legacy':
desc = 'sh({})'.format(desc)
elif self.output_type == 'p2sh-segwit':
desc = 'sh(wsh({}))'.format(desc)
elif self.output_type == 'bech32':
desc = 'wsh({})'.format(desc)
desc = descsum_create(desc)
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
assert_equal(desc, msig['descriptor'])
if self.output_type == 'bech32':
assert madd[0:4] == "bcrt" # actually a bech32 address
# compare against addmultisigaddress
msigw = wmulti.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
assert_equal(desc, drop_origins(msigw['descriptor']))
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd == v["scriptPubKey"]["address"]]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
self.generate(node0, 1)
outval = value - decimal.Decimal("0.00001000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
prevtx_err = dict(prevtxs[0])
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "Missing redeemScript/witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# if witnessScript specified, all ok
prevtx_err["witnessScript"] = prevtxs[0]["redeemScript"]
node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# both specified, also ok
prevtx_err["redeemScript"] = prevtxs[0]["redeemScript"]
node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# redeemScript mismatch to witnessScript
prevtx_err["redeemScript"] = "6a" # OP_RETURN
assert_raises_rpc_error(-8, "redeemScript does not correspond to witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# redeemScript does not match scriptPubKey
del prevtx_err["witnessScript"]
assert_raises_rpc_error(-8, "redeemScript/witnessScript does not match scriptPubKey", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# witnessScript does not match scriptPubKey
prevtx_err["witnessScript"] = prevtx_err["redeemScript"]
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "redeemScript/witnessScript does not match scriptPubKey", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs - 1], prevtxs)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], 0)
blk = self.generate(node0, 1)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
wmulti.unloadwallet()
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
|
the-stack_0_5379 | #!/usr/bin/env python3
import string
class BracketError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Machine():
def __init__(self):
self.tape = [0]
self.p = 0
def run(self, code, step=False):
pc = 0
loop_stack = []
brackets = 0
printed = False
for instr in code:
if instr == '[':
brackets += 1
elif instr == ']':
brackets -= 1
if brackets != 0:
raise BracketError('Error: failed bracket count')
while pc < len(code):
instr = code[pc]
# increment/decrement
if instr == '+':
self.increment(1)
elif instr == '-':
self.increment(-1)
# I/O
elif instr == '.':
print(chr(self.cell()), end='')
printed = True
elif instr == ',':
self.input()
# move tape
elif instr == '<':
if self.p > 0:
self.p -= 1
else:
print("Error: Can't decrement pointer")
elif instr == '>':
if self.p > (len(self.tape)-2):
self.tape.append(0)
self.p += 1
# looping
elif instr == ']':
pc = loop_stack.pop() - 1
elif instr == '[':
if self.cell() == 0:
while code[pc] != ']':
pc += 1
else:
loop_stack.append(pc)
if step:
input()
pc += 1
if printed:
print('')
def set(self, val):
self.tape[self.p] = val % 128
def increment(self, amount):
self.set(self.cell() + amount)
def input(self):
character = input()
if character == '':
print("No value given, setting cell to 0 ...")
self.set(0)
else:
self.set(ord(character[0]))
def cell(self):
return self.tape[self.p]
def dump(self):
print("%d," % self.p, self.tape)
def write_to(program, command):
split = command.index(' ')
line = int(command[:split])
command = command[(split+1):]
if line < len(program):
program[line] = command
else:
while len(program) < line:
program.append('')
program.append(command)
if __name__ == "__main__":
helptext = "help: Display this help text\nquit: Quit\ndump: Print tape, pointer\nclear: Reset tape\nnew: Wipe program\nlist: List program\nrun: Run program\nsave <arg>: Save program as <arg>\nload <arg>: Load program from <arg>\nstep [arg]: step through program or optional arg"
tape = Machine()
program = []
while True:
try:
command = input("[%d]:%d$ " %(tape.p,tape.cell()))
except EOFError:
break
if command == "":
continue
elif command == "q" or command == "quit":
break
elif command == "d" or command == "dump":
tape.dump()
elif command == "h" or command == "help":
print(helptext)
elif command == "new":
program = []
elif command == "clear":
tape = Machine()
print("Tape Reset")
elif command == "l" or command == "list":
for number, line in enumerate(program):
if line != '':
print(number, line)
elif command == "r" or command == "run":
tape.run("".join(program))
elif command[:4] == "load":
f = open(command[5:],mode='r')
program = f.read().split('\n')
f.close()
elif command[:4] == "save":
f = open(command[5:],mode='w')
f.write('\n'.join(program))
f.close()
elif command == "step":
tape.run(program, step=True)
elif command[:4] == "step":
tape.run(command[5:], step=True)
elif command[0] in string.digits:
write_to(program, command)
else:
try:
tape.run(command)
except BracketError:
print("Error: Failed bracket count!")
print("Goodbye!")
|
the-stack_0_5380 | from __future__ import absolute_import
from builtins import object
import future.utils as futils
import os
if futils.PY2:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
else:
from io import BytesIO as StringIO
from .compat import as_bytes, as_str
# Python 2.4 support: os lacks SEEK_END and friends
try:
getattr(os, "SEEK_END")
except AttributeError:
os.SEEK_SET, os.SEEK_CUR, os.SEEK_END = list(range(3))
class SimpleBuffer(object):
"""
>>> b = SimpleBuffer()
>>> b.write('abcdef')
>>> b.read(3)
'abc'
>>> b.consume(3)
>>> b.write('z')
>>> b.read()
'defz'
>>> b.read()
'defz'
>>> b.read(0)
''
>>> repr(b)
"<SimpleBuffer of 4 bytes, 7 total size, 'defz'>"
>>> str(b)
"<SimpleBuffer of 4 bytes, 7 total size, 'defz'>"
>>> len(b)
4
>>> bool(b)
True
>>> b.flush()
>>> len(b)
0
>>> bool(b)
False
>>> b.read(1)
''
>>> b.write('a'*524288)
>>> b.flush() # run GC code
"""
def __init__(self):
self.buf = StringIO()
self.size = 0
self.offset = 0
def write(self, data):
data = as_bytes(data)
self.buf.write(data)
self.size += len(data)
def read(self, size=None):
self.buf.seek(self.offset)
if size is None:
data = self.buf.read()
else:
data = self.buf.read(size)
self.buf.seek(0, os.SEEK_END)
return data
def consume(self, size):
self.offset += size
self.size -= size
# GC old StringIO instance and free memory used by it.
if self.size == 0 and self.offset > 524288:
self.buf.close()
self.buf = StringIO()
self.offset = 0
def flush(self):
self.consume(self.size)
def __bool__(self):
return self.size > 0
def __len__(self):
return self.size
def __str__(self):
return self.__repr__()
def __repr__(self):
return '<SimpleBuffer of %i bytes, %i total size, %r%s>' % \
(self.size, self.size + self.offset, self.read(16),
(self.size > 16) and '...' or '')
|
the-stack_0_5381 | from conftest import get_metrics
from pyriemann.embedding import Embedding
import pytest
@pytest.mark.parametrize("metric", get_metrics())
@pytest.mark.parametrize("eps", [None, 0.1])
def test_embedding(metric, eps, get_covmats):
"""Test Embedding."""
n_trials, n_channels, n_comp = 6, 3, 2
covmats = get_covmats(n_trials, n_channels)
embd = Embedding(metric=metric, n_components=n_comp, eps=eps)
covembd = embd.fit_transform(covmats)
assert covembd.shape == (n_trials, n_comp)
def test_fit_independence(get_covmats):
n_trials, n_channels = 6, 3
covmats = get_covmats(n_trials, n_channels)
embd = Embedding()
embd.fit_transform(covmats)
# retraining with different size should erase previous fit
new_covmats = covmats[:, :-1, :-1]
embd.fit_transform(new_covmats)
|
the-stack_0_5382 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.errors",
marshal="google.ads.googleads.v4",
manifest={"HeaderErrorEnum",},
)
class HeaderErrorEnum(proto.Message):
r"""Container for enum describing possible header errors."""
class HeaderError(proto.Enum):
r"""Enum describing possible header errors."""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_LOGIN_CUSTOMER_ID = 3
INVALID_LINKED_CUSTOMER_ID = 7
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_5383 | import cdflib
import numpy as np
import pandas as pd
import numpy as np
import sys
sys.path.insert(0, '/home/andres_munoz_j/pyCFOFiSAX')
print(sys.path)
# import importlib.util
# spec = importlib.util.spec_from_file_location('ForestISAX', '/home/andres_munoz_j/pyCFOFiSAX/pyCFOFiSAX/_forest_iSAX.py')
# ForestISAX = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(ForestISAX)
# spec = importlib.util.spec_from_file_location('TreeISAX', '/home/andres_munoz_j/pyCFOFiSAX/pyCFOFiSAX/_tree_iSAX.py')
# TreeISAX = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(TreeISAX)
from pyCFOFiSAX._forest_iSAX import ForestISAX
from pyCFOFiSAX._isax import IndexableSymbolicAggregateApproximation
# from pyCFOFiSAX._tree_iSAX import TreeISAX
from anytree import RenderTree
from anytree.exporter import DotExporter
psp_path = '/sw-data/psp/'
path = psp_path + 'mag_rtn/'
year = '2019'
month = '05'
day = '15'
hour = '00'
cdf_file_path = path + year + '/psp_fld_l2_mag_rtn_' + year + month + day + hour + '_v01.cdf'
cdf_file = cdflib.CDF(cdf_file_path)
x = cdf_file.varget('epoch_mag_RTN') # reading in the epoch time stamps
x = cdflib.epochs.CDFepoch.to_datetime(x) # convrting x axis labels to date time stamps
y = cdf_file.varget('psp_fld_l2_mag_RTN')
npoints = 200
# Start with Bx
ts = y[0:int(y.shape[0]/npoints)*npoints,0].reshape(-1,npoints)
# Append By
ts = np.append(ts, y[0:int(y.shape[0]/npoints)*npoints,1].reshape(-1,npoints), axis=0)
# Append Bz
ts = np.append(ts, y[0:int(y.shape[0]/npoints)*npoints,2].reshape(-1,npoints), axis=0)
# Create auxiliary dataframe
ts_loc = pd.DataFrame({'File':np.repeat(cdf_file_path,ts.shape[0]/3), 'Component':np.repeat('Bx',ts.shape[0]/3)})
ts_loc['t0'] = np.array(x[0:int(y.shape[0]/npoints)*npoints]).reshape(-1,npoints)[:,0]
ts_loc['t1'] = np.array(x[0:int(y.shape[0]/npoints)*npoints]).reshape(-1,npoints)[:,-1]
tmp_loc = ts_loc.copy()
tmp_loc['Component'] = 'By'
ts_loc = pd.concat((ts_loc,tmp_loc))
tmp_loc['Component'] = 'Bz'
ts_loc = pd.concat((ts_loc,tmp_loc)).reset_index(drop=True)
sw_forest = ForestISAX(size_word=10,
threshold=20,
data_ts=ts,
base_cardinality=2, number_tree=1)
sw_forest.index_data(ts, parallel=True)
# test = sw_forest.forest[0].root.escalate_node(sw_forest.forest[0].root.nodes[0])
# print(test)
sw_forest.forest[0].parallel_escalation()
# print(test)
size_word = 10
mu = np.mean(ts)
sig = np.std(ts)
isax = IndexableSymbolicAggregateApproximation(size_word, mean=mu, std=sig)
nodes_at_level = sw_forest.forest[0].get_nodes_of_level_or_terminal(8)
annotations_l = nodes_at_level[30].get_annotations()
sequences_l = nodes_at_level[30].get_sequences()
annotations_l = pd.concat([pd.Series(sequences_l, index=annotations_l.index, name='iSAX'), annotations_l], axis=1)
# print(sw_forest.forest[0].root.get_sequences())
# print(sw_forest.forest[0].root.get_annotations())
print('done') |
the-stack_0_5384 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
from flask import Flask,request, jsonify, render_template
import pickle
# In[2]:
app=Flask(__name__)
model=pickle.load(open('spam_model.pkl','rb'))
cv = pickle.load(open('cv-transform.pkl','rb'))
# In[3]:
@app.route('/')
def home():
return render_template('index.html')
# In[4]:
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
message = request.form['message']
data = [message]
vect = cv.transform(data).toarray()
prediction = model.predict(vect)
return render_template('index.html', prediction_text="message is $ {}".format(prediction))
# In[5]:
if __name__=="__main__":
app.run(debug=True)
# In[6]:
# In[ ]:
|
the-stack_0_5385 | import copy
import decimal
import subprocess
import time
import os
import re
import datetime
import json
import signal
from core_symbol import CORE_SYMBOL
from testUtils import Utils
from testUtils import Account
from testUtils import EnumType
from testUtils import addEnum
from testUtils import unhandledEnumType
class ReturnType(EnumType):
pass
addEnum(ReturnType, "raw")
addEnum(ReturnType, "json")
class BlockType(EnumType):
pass
addEnum(BlockType, "head")
addEnum(BlockType, "lib")
# pylint: disable=too-many-public-methods
class Node(object):
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
def __init__(self, host, port, pid=None, cmd=None, walletMgr=None, enableMongo=False, mongoHost="localhost", mongoPort=27017, mongoDb="EOStest"):
self.host=host
self.port=port
self.pid=pid
self.cmd=cmd
if Utils.Debug: Utils.Print("new Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd))
self.killed=False # marks node as killed
self.enableMongo=enableMongo
self.mongoHost=mongoHost
self.mongoPort=mongoPort
self.mongoDb=mongoDb
self.endpointHttp="http://%s:%d" % (self.host, self.port)
self.endpointArgs="--url %s" % (self.endpointHttp)
self.mongoEndpointArgs=""
self.infoValid=None
self.lastRetrievedHeadBlockNum=None
self.lastRetrievedLIB=None
self.transCache={}
self.walletMgr=walletMgr
self.missingTransaction=False
self.popenProc=None # initial process is started by launcher, this will only be set on relaunch
if self.enableMongo:
self.mongoEndpointArgs += "--host %s --port %d %s" % (mongoHost, mongoPort, mongoDb)
def eosClientArgs(self):
walletArgs=" " + self.walletMgr.getWalletEndpointArgs() if self.walletMgr is not None else ""
return self.endpointArgs + walletArgs + " " + Utils.MiscEosClientArgs
def __str__(self):
#return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd)
return "Host: %s, Port:%d" % (self.host, self.port)
@staticmethod
def validateTransaction(trans):
assert trans
assert isinstance(trans, dict), print("Input type is %s" % type(trans))
executed="executed"
def printTrans(trans, status):
Utils.Print("ERROR: Valid transaction should be \"%s\" but it was \"%s\"." % (executed, status))
Utils.Print("Transaction: %s" % (json.dumps(trans, indent=1)))
transStatus=Node.getTransStatus(trans)
assert transStatus == executed, printTrans(trans, transStatus)
@staticmethod
def __printTransStructureError(trans, context):
Utils.Print("ERROR: Failure in expected transaction structure. Missing trans%s." % (context))
Utils.Print("Transaction: %s" % (json.dumps(trans, indent=1)))
class Context:
def __init__(self, obj, desc):
self.obj=obj
self.sections=[obj]
self.keyContext=[]
self.desc=desc
def __json(self):
return "%s=\n%s" % (self.desc, json.dumps(self.obj, indent=1))
def __keyContext(self):
msg=""
for key in self.keyContext:
if msg=="":
msg="["
else:
msg+="]["
msg+=key
if msg!="":
msg+="]"
return msg
def __contextDesc(self):
return "%s%s" % (self.desc, self.__keyContext())
def add(self, newKey):
assert isinstance(newKey, str), print("ERROR: Trying to use %s as a key" % (newKey))
subSection=self.sections[-1]
assert isinstance(subSection, dict), print("ERROR: Calling \"add\" method when context is not a dictionary. %s in %s" % (self.__contextDesc(), self.__json()))
assert newKey in subSection, print("ERROR: %s%s does not contain key \"%s\". %s" % (self.__contextDesc(), key, self.__json()))
current=subSection[newKey]
self.sections.append(current)
self.keyContext.append(newKey)
return current
def index(self, i):
assert isinstance(i, int), print("ERROR: Trying to use \"%s\" as a list index" % (i))
cur=self.getCurrent()
assert isinstance(cur, list), print("ERROR: Calling \"index\" method when context is not a list. %s in %s" % (self.__contextDesc(), self.__json()))
listLen=len(cur)
assert i < listLen, print("ERROR: Index %s is beyond the size of the current list (%s). %s in %s" % (i, listLen, self.__contextDesc(), self.__json()))
return self.sections.append(cur[i])
def getCurrent(self):
return self.sections[-1]
@staticmethod
def getTransStatus(trans):
cntxt=Node.Context(trans, "trans")
cntxt.add("processed")
cntxt.add("receipt")
return cntxt.add("status")
@staticmethod
def getTransBlockNum(trans):
cntxt=Node.Context(trans, "trans")
cntxt.add("processed")
cntxt.add("action_traces")
cntxt.index(0)
return cntxt.add("block_num")
@staticmethod
def stdinAndCheckOutput(cmd, subcommand):
"""Passes input to stdin, executes cmd. Returns tuple with return code(int), stdout(byte stream) and stderr(byte stream)."""
assert(cmd)
assert(isinstance(cmd, list))
assert(subcommand)
assert(isinstance(subcommand, str))
outs=None
errs=None
ret=0
try:
popen=subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs,errs=popen.communicate(input=subcommand.encode("utf-8"))
ret=popen.wait()
except subprocess.CalledProcessError as ex:
msg=ex.output
return (ex.returncode, msg, None)
return (ret, outs, errs)
@staticmethod
def normalizeJsonObject(extJStr):
tmpStr=extJStr
tmpStr=re.sub(r'ObjectId\("(\w+)"\)', r'"ObjectId-\1"', tmpStr)
tmpStr=re.sub(r'ISODate\("([\w|\-|\:|\.]+)"\)', r'"ISODate-\1"', tmpStr)
tmpStr=re.sub(r'NumberLong\("(\w+)"\)', r'"NumberLong-\1"', tmpStr)
return tmpStr
@staticmethod
def runMongoCmdReturnJson(cmd, subcommand, trace=False, exitOnError=False):
"""Run mongodb subcommand and return response."""
assert(cmd)
assert(isinstance(cmd, list))
assert(subcommand)
assert(isinstance(subcommand, str))
retId,outs,errs=Node.stdinAndCheckOutput(cmd, subcommand)
if retId is not 0:
errorMsg="mongodb call failed. cmd=[ %s ] subcommand=\"%s\" - %s" % (", ".join(cmd), subcommand, errs)
if exitOnError:
Utils.cmdError(errorMsg)
Utils.errorExit(errorMsg)
Utils.Print("ERROR: %s" % (errMsg))
return None
outStr=Node.byteArrToStr(outs)
if not outStr:
return None
extJStr=Utils.filterJsonObjectOrArray(outStr)
if not extJStr:
return None
jStr=Node.normalizeJsonObject(extJStr)
if not jStr:
return None
if trace: Utils.Print ("RAW > %s"% (outStr))
if trace: Utils.Print ("JSON> %s"% jStr)
try:
jsonData=json.loads(jStr)
except json.decoder.JSONDecodeError as _:
Utils.Print ("ERROR: JSONDecodeError")
Utils.Print ("Raw MongoDB response: > %s"% (outStr))
Utils.Print ("Normalized MongoDB response: > %s"% (jStr))
raise
return jsonData
@staticmethod
def getTransId(trans):
"""Retrieve transaction id from dictionary object."""
assert trans
assert isinstance(trans, dict), print("Input type is %s" % type(trans))
assert "transaction_id" in trans, print("trans does not contain key %s. trans={%s}" % ("transaction_id", json.dumps(trans, indent=2, sort_keys=True)))
transId=trans["transaction_id"]
return transId
@staticmethod
def isTrans(obj):
"""Identify if this is a transaction dictionary."""
if obj is None or not isinstance(obj, dict):
return False
return True if "transaction_id" in obj else False
@staticmethod
def byteArrToStr(arr):
return arr.decode("utf-8")
def validateAccounts(self, accounts):
assert(accounts)
assert(isinstance(accounts, list))
for account in accounts:
assert(account)
assert(isinstance(account, Account))
if Utils.Debug: Utils.Print("Validating account %s" % (account.name))
accountInfo=self.getEosAccount(account.name, exitOnError=True)
try:
if not self.enableMongo:
assert(accountInfo["account_name"] == account.name)
else:
assert(accountInfo["name"] == account.name)
except (AssertionError, TypeError, KeyError) as _:
Utils.Print("account validation failed. account: %s" % (account.name))
raise
# pylint: disable=too-many-branches
def getBlock(self, blockNum, silentErrors=False, exitOnError=False):
"""Given a blockId will return block details."""
assert(isinstance(blockNum, int))
if not self.enableMongo:
cmdDesc="get block"
cmd="%s %d" % (cmdDesc, blockNum)
msg="(block number=%s)" % (blockNum);
return self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg)
else:
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.blocks.findOne( { "block_num": %d } )' % (blockNum)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
start=time.perf_counter()
try:
block=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
if block is not None:
return block
except subprocess.CalledProcessError as ex:
if not silentErrors:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
errorMsg="Exception during get db node get block. cmd Duration: %.3f sec. %s" % (end-start, msg)
if exitOnError:
Utils.cmdError(errorMsg)
Utils.errorExit(errorMsg)
else:
Utils.Print("ERROR: %s" % (errorMsg))
return None
return None
def getBlockByIdMdb(self, blockId, silentErrors=False):
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.blocks.findOne( { "block_id": "%s" } )' % (blockId)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
start=time.perf_counter()
try:
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
if trans is not None:
return trans
except subprocess.CalledProcessError as ex:
if not silentErrors:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during db get block by id. cmd Duration: %.3f sec. %s" % (end-start, msg))
return None
return None
def isBlockPresent(self, blockNum, blockType=BlockType.head):
"""Does node have head_block_num/last_irreversible_block_num >= blockNum"""
assert isinstance(blockNum, int)
assert isinstance(blockType, BlockType)
assert (blockNum > 0)
info=self.getInfo(silentErrors=True, exitOnError=True)
node_block_num=0
try:
if blockType==BlockType.head:
node_block_num=int(info["head_block_num"])
elif blockType==BlockType.lib:
node_block_num=int(info["last_irreversible_block_num"])
else:
unhandledEnumType(blockType)
except (TypeError, KeyError) as _:
Utils.Print("Failure in get info parsing %s block. %s" % (blockType.type, info))
raise
present = True if blockNum <= node_block_num else False
if Utils.Debug and blockType==BlockType.lib:
decorator=""
if present:
decorator="is not "
Utils.Print("Block %d is %sfinalized." % (blockNum, decorator))
return present
def isBlockFinalized(self, blockNum):
"""Is blockNum finalized"""
return self.isBlockPresent(blockNum, blockType=BlockType.lib)
# pylint: disable=too-many-branches
def getTransaction(self, transId, silentErrors=False, exitOnError=False, delayedRetry=True):
assert(isinstance(transId, str))
exitOnErrorForDelayed=not delayedRetry and exitOnError
timeout=3
if not self.enableMongo:
cmdDesc="get transaction"
cmd="%s %s" % (cmdDesc, transId)
msg="(transaction id=%s)" % (transId);
for i in range(0,(int(60/timeout) - 1)):
trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnErrorForDelayed, exitMsg=msg)
if trans is not None or not delayedRetry:
return trans
if Utils.Debug: Utils.Print("Could not find transaction with id %s, delay and retry" % (transId))
time.sleep(timeout)
self.missingTransaction=True
# either it is there or the transaction has timed out
return self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg)
else:
for i in range(0,(int(60/timeout) - 1)):
trans=self.getTransactionMdb(transId, silentErrors=silentErrors, exitOnError=exitOnErrorForDelayed)
if trans is not None or not delayedRetry:
return trans
if Utils.Debug: Utils.Print("Could not find transaction with id %s in mongodb, delay and retry" % (transId))
time.sleep(timeout)
return self.getTransactionMdb(transId, silentErrors=silentErrors, exitOnError=exitOnError)
def getTransactionMdb(self, transId, silentErrors=False, exitOnError=False):
"""Get transaction from MongoDB. Since DB only contains finalized blocks, transactions can take a while to appear in DB."""
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
#subcommand='db.Transactions.findOne( { $and : [ { "trx_id": "%s" }, {"irreversible":true} ] } )' % (transId)
subcommand='db.transactions.findOne( { "trx_id": "%s" } )' % (transId)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
start=time.perf_counter()
try:
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
if trans is not None:
return trans
except subprocess.CalledProcessError as ex:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
errorMsg="Exception during get db node get trans in mongodb with transaction id=%s. cmd Duration: %.3f sec. %s" % (transId, end-start, msg)
if exitOnError:
Utils.cmdError("" % (errorMsg))
Utils.errorExit("Failed to retrieve transaction in mongodb for transaction id=%s" % (transId))
elif not silentErrors:
Utils.Print("ERROR: %s" % (errorMsg))
return None
def isTransInBlock(self, transId, blockId):
"""Check if transId is within block identified by blockId"""
assert(transId)
assert(isinstance(transId, str))
assert(blockId)
assert(isinstance(blockId, int))
block=self.getBlock(blockId, exitOnError=True)
transactions=None
key=""
try:
if not self.enableMongo:
key="[transactions]"
transactions=block["transactions"]
else:
key="[blocks][transactions]"
transactions=block["block"]["transactions"]
except (AssertionError, TypeError, KeyError) as _:
Utils.Print("block%s not found. Block: %s" % (key,block))
raise
if transactions is not None:
for trans in transactions:
assert(trans)
try:
myTransId=trans["trx"]["id"]
if transId == myTransId:
return True
except (TypeError, KeyError) as _:
Utils.Print("transaction%s not found. Transaction: %s" % (key, trans))
return False
def getBlockIdByTransId(self, transId, delayedRetry=True):
"""Given a transaction Id (string), will return the actual block id (int) containing the transaction"""
assert(transId)
assert(isinstance(transId, str))
trans=self.getTransaction(transId, exitOnError=True, delayedRetry=delayedRetry)
refBlockNum=None
key=""
try:
if not self.enableMongo:
key="[trx][trx][ref_block_num]"
refBlockNum=trans["trx"]["trx"]["ref_block_num"]
else:
key="[ref_block_num]"
refBlockNum=trans["ref_block_num"]
refBlockNum=int(refBlockNum)+1
except (TypeError, ValueError, KeyError) as _:
Utils.Print("transaction%s not found. Transaction: %s" % (key, trans))
return None
headBlockNum=self.getHeadBlockNum()
assert(headBlockNum)
try:
headBlockNum=int(headBlockNum)
except(ValueError) as _:
Utils.Print("ERROR: Block info parsing failed. %s" % (headBlockNum))
raise
if Utils.Debug: Utils.Print("Reference block num %d, Head block num: %d" % (refBlockNum, headBlockNum))
for blockNum in range(refBlockNum, headBlockNum+1):
if self.isTransInBlock(str(transId), blockNum):
if Utils.Debug: Utils.Print("Found transaction %s in block %d" % (transId, blockNum))
return blockNum
return None
def getBlockIdByTransIdMdb(self, transId):
"""Given a transaction Id (string), will return block id (int) containing the transaction. This is specific to MongoDB."""
assert(transId)
assert(isinstance(transId, str))
trans=self.getTransactionMdb(transId)
if not trans: return None
refBlockNum=None
try:
refBlockNum=trans["ref_block_num"]
refBlockNum=int(refBlockNum)+1
except (TypeError, ValueError, KeyError) as _:
Utils.Print("transaction[ref_block_num] not found. Transaction: %s" % (trans))
return None
headBlockNum=self.getHeadBlockNum()
assert(headBlockNum)
try:
headBlockNum=int(headBlockNum)
except(ValueError) as _:
Utils.Print("Info parsing failed. %s" % (headBlockNum))
for blockNum in range(refBlockNum, headBlockNum+1):
if self.isTransInBlock(str(transId), blockNum):
return blockNum
return None
def isTransInAnyBlock(self, transId):
"""Check if transaction (transId) is in a block."""
assert(transId)
assert(isinstance(transId, (str,int)))
# if not self.enableMongo:
blockId=self.getBlockIdByTransId(transId)
# else:
# blockId=self.getBlockIdByTransIdMdb(transId)
return True if blockId else False
def isTransFinalized(self, transId):
"""Check if transaction (transId) has been finalized."""
assert(transId)
assert(isinstance(transId, str))
blockId=self.getBlockIdByTransId(transId)
if not blockId:
return False
assert(isinstance(blockId, int))
return self.isBlockPresent(blockId, blockType=BlockType.lib)
# Create & initialize account and return creation transactions. Return transaction json object
def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False):
cmdDesc="system newaccount"
cmd='%s -j %s %s %s %s --stake-net "%s %s" --stake-cpu "%s %s" --buy-ram "%s %s"' % (
cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey,
account.activePublicKey, stakeNet, CORE_SYMBOL, stakeCPU, CORE_SYMBOL, buyRAM, CORE_SYMBOL)
msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name);
trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg)
self.trackCmdTransaction(trans)
transId=Node.getTransId(trans)
if stakedDeposit > 0:
self.waitForTransInBlock(transId) # seems like account creation needs to be finalized before transfer can happen
trans = self.transferFunds(creatorAccount, account, Node.currencyIntToStr(stakedDeposit, CORE_SYMBOL), "init")
transId=Node.getTransId(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, exitOnError=False):
"""Create account and return creation transactions. Return transaction json object.
waitForTransBlock: wait on creation transaction id to appear in a block."""
cmdDesc="create account"
cmd="%s -j %s %s %s %s" % (
cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey)
msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name);
trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg)
self.trackCmdTransaction(trans)
transId=Node.getTransId(trans)
if stakedDeposit > 0:
self.waitForTransInBlock(transId) # seems like account creation needs to be finlized before transfer can happen
trans = self.transferFunds(creatorAccount, account, "%0.04f %s" % (stakedDeposit/10000, CORE_SYMBOL), "init")
self.trackCmdTransaction(trans)
transId=Node.getTransId(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def getEosAccount(self, name, exitOnError=False, returnType=ReturnType.json, avoidMongo=False):
assert(isinstance(name, str))
if not self.enableMongo or avoidMongo:
cmdDesc="get account"
jsonFlag="-j" if returnType==ReturnType.json else ""
cmd="%s %s %s" % (cmdDesc, jsonFlag, name)
msg="( getEosAccount(name=%s) )" % (name);
return self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg, returnType=returnType)
else:
assert returnType == ReturnType.json, "MongoDB only supports a returnType of ReturnType.json"
return self.getEosAccountFromDb(name, exitOnError=exitOnError)
def getEosAccountFromDb(self, name, exitOnError=False):
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.accounts.findOne({"name" : "%s"})' % (name)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
try:
timeout = 3
for i in range(0,(int(60/timeout) - 1)):
start=time.perf_counter()
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError)
if trans is not None:
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
return trans
time.sleep(timeout)
return trans
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
if exitOnError:
end=time.perf_counter()
Utils.cmdError("Exception during get account from db for %s. cmd Duration: %.3f sec. %s" % (name, end-start, msg))
Utils.errorExit("Failed during get account from db for %s. %s" % (name, msg))
Utils.Print("ERROR: Exception during get account from db for %s. %s" % (name, msg))
return None
def getTable(self, contract, scope, table, exitOnError=False):
cmdDesc = "get table"
cmd="%s %s %s %s" % (cmdDesc, contract, scope, table)
msg="contract=%s, scope=%s, table=%s" % (contract, scope, table);
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
def getTableAccountBalance(self, contract, scope):
assert(isinstance(contract, str))
assert(isinstance(scope, str))
table="accounts"
trans = self.getTable(contract, scope, table, exitOnError=True)
try:
return trans["rows"][0]["balance"]
except (TypeError, KeyError) as _:
print("transaction[rows][0][balance] not found. Transaction: %s" % (trans))
raise
def getCurrencyBalance(self, contract, account, symbol=CORE_SYMBOL, exitOnError=False):
"""returns raw output from get currency balance e.g. '99999.9950 CUR'"""
assert(contract)
assert(isinstance(contract, str))
assert(account)
assert(isinstance(account, str))
assert(symbol)
assert(isinstance(symbol, str))
cmdDesc = "get currency balance"
cmd="%s %s %s %s" % (cmdDesc, contract, account, symbol)
msg="contract=%s, account=%s, symbol=%s" % (contract, account, symbol);
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg, returnType=ReturnType.raw)
def getCurrencyStats(self, contract, symbol=CORE_SYMBOL, exitOnError=False):
"""returns Json output from get currency stats."""
assert(contract)
assert(isinstance(contract, str))
assert(symbol)
assert(isinstance(symbol, str))
cmdDesc = "get currency stats"
cmd="%s %s %s" % (cmdDesc, contract, symbol)
msg="contract=%s, symbol=%s" % (contract, symbol);
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
# Verifies account. Returns "get account" json return object
def verifyAccount(self, account):
assert(account)
if not self.enableMongo:
ret=self.getEosAccount(account.name)
if ret is not None:
account_name=ret["account_name"]
if account_name is None:
Utils.Print("ERROR: Failed to verify account creation.", account.name)
return None
return ret
else:
return self.verifyAccountMdb(account)
def verifyAccountMdb(self, account):
assert(account)
ret=self.getEosAccountFromDb(account.name)
if ret is not None:
account_name=ret["name"]
if account_name is None:
Utils.Print("ERROR: Failed to verify account creation.", account.name)
return None
return ret
return None
def waitForTransInBlock(self, transId, timeout=None):
"""Wait for trans id to be finalized."""
assert(isinstance(transId, str))
lam = lambda: self.isTransInAnyBlock(transId)
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForTransFinalization(self, transId, timeout=None):
"""Wait for trans id to be finalized."""
assert(isinstance(transId, str))
lam = lambda: self.isTransFinalized(transId)
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForNextBlock(self, timeout=None, blockType=BlockType.head):
num=self.getBlockNum(blockType=blockType)
lam = lambda: self.getHeadBlockNum() > num
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForBlock(self, blockNum, timeout=None, blockType=BlockType.head):
lam = lambda: self.getBlockNum(blockType=blockType) > blockNum
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForIrreversibleBlock(self, blockNum, timeout=None, blockType=BlockType.head):
return self.waitForBlock(blockNum, timeout=timeout, blockType=blockType)
# Trasfer funds. Returns "transfer" json return object
def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False, exitOnError=True):
assert isinstance(amountStr, str)
assert(source)
assert(isinstance(source, Account))
assert(destination)
assert(isinstance(destination, Account))
cmd="%s %s -v transfer -j %s %s" % (
Utils.EosClientPath, self.eosClientArgs(), source.name, destination.name)
cmdArr=cmd.split()
cmdArr.append(amountStr)
cmdArr.append(memo)
if force:
cmdArr.append("-f")
s=" ".join(cmdArr)
if Utils.Debug: Utils.Print("cmd: %s" % (s))
trans=None
start=time.perf_counter()
try:
trans=Utils.runCmdArrReturnJson(cmdArr)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
self.trackCmdTransaction(trans)
except subprocess.CalledProcessError as ex:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during funds transfer. cmd Duration: %.3f sec. %s" % (end-start, msg))
if exitOnError:
Utils.cmdError("could not transfer \"%s\" from %s to %s" % (amountStr, source, destination))
Utils.errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination))
return None
if trans is None:
Utils.cmdError("could not transfer \"%s\" from %s to %s" % (amountStr, source, destination))
Utils.errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination))
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
@staticmethod
def currencyStrToInt(balanceStr):
"""Converts currency string of form "12.3456 EON" to int 123456"""
assert(isinstance(balanceStr, str))
balanceStr=balanceStr.split()[0]
#balance=int(decimal.Decimal(balanceStr[1:])*10000)
balance=int(decimal.Decimal(balanceStr)*10000)
return balance
@staticmethod
def currencyIntToStr(balance, symbol):
"""Converts currency int of form 123456 to string "12.3456 EON" where EON is symbol string"""
assert(isinstance(balance, int))
assert(isinstance(symbol, str))
balanceStr="%.04f %s" % (balance/10000.0, symbol)
return balanceStr
def validateFunds(self, initialBalances, transferAmount, source, accounts):
"""Validate each account has the expected EON balance. Validate cumulative balance matches expectedTotal."""
assert(source)
assert(isinstance(source, Account))
assert(accounts)
assert(isinstance(accounts, list))
assert(len(accounts) > 0)
assert(initialBalances)
assert(isinstance(initialBalances, dict))
assert(isinstance(transferAmount, int))
currentBalances=self.getEosBalances([source] + accounts)
assert(currentBalances)
assert(isinstance(currentBalances, dict))
assert(len(initialBalances) == len(currentBalances))
if len(currentBalances) != len(initialBalances):
Utils.Print("ERROR: validateFunds> accounts length mismatch. Initial: %d, current: %d" % (len(initialBalances), len(currentBalances)))
return False
for key, value in currentBalances.items():
initialBalance = initialBalances[key]
assert(initialBalances)
expectedInitialBalance = value - transferAmount
if key is source:
expectedInitialBalance = value + (transferAmount*len(accounts))
if (initialBalance != expectedInitialBalance):
Utils.Print("ERROR: validateFunds> Expected: %d, actual: %d for account %s" %
(expectedInitialBalance, initialBalance, key.name))
return False
def getEosBalances(self, accounts):
"""Returns a dictionary with account balances keyed by accounts"""
assert(accounts)
assert(isinstance(accounts, list))
balances={}
for account in accounts:
balance = self.getAccountEosBalance(account.name)
balances[account]=balance
return balances
# Gets accounts mapped to key. Returns json object
def getAccountsByKey(self, key, exitOnError=False):
cmdDesc = "get accounts"
cmd="%s %s" % (cmdDesc, key)
msg="key=%s" % (key);
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
# Get actions mapped to an account (cleos get actions)
def getActions(self, account, pos=-1, offset=-1, exitOnError=False):
assert(isinstance(account, Account))
assert(isinstance(pos, int))
assert(isinstance(offset, int))
if not self.enableMongo:
cmdDesc = "get actions"
cmd="%s -j %s %d %d" % (cmdDesc, account.name, pos, offset)
msg="account=%s, pos=%d, offset=%d" % (account.name, pos, offset);
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
else:
return self.getActionsMdb(account, pos, offset, exitOnError=exitOnError)
def getActionsMdb(self, account, pos=-1, offset=-1, exitOnError=False):
assert(isinstance(account, Account))
assert(isinstance(pos, int))
assert(isinstance(offset, int))
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.action_traces.find({$or: [{"act.data.from":"%s"},{"act.data.to":"%s"}]}).sort({"_id":%d}).limit(%d)' % (account.name, account.name, pos, abs(offset))
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
start=time.perf_counter()
try:
actions=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
if actions is not None:
return actions
except subprocess.CalledProcessError as ex:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
errorMsg="Exception during get db actions. cmd Duration: %.3f sec. %s" % (end-start, msg)
if exitOnError:
Utils.cmdError(errorMsg)
Utils.errorExit(errorMsg)
else:
Utils.Print("ERROR: %s" % (errorMsg))
return None
# Gets accounts mapped to key. Returns array
def getAccountsArrByKey(self, key):
trans=self.getAccountsByKey(key)
assert(trans)
assert("account_names" in trans)
accounts=trans["account_names"]
return accounts
def getServants(self, name, exitOnError=False):
cmdDesc = "get servants"
cmd="%s %s" % (cmdDesc, name)
msg="name=%s" % (name);
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
def getServantsArr(self, name):
trans=self.getServants(name, exitOnError=True)
servants=trans["controlled_accounts"]
return servants
def getAccountEosBalanceStr(self, scope):
"""Returns EON currency0000 account balance from cleos get table command. Returned balance is string following syntax "98.0311 EON". """
assert isinstance(scope, str)
amount=self.getTableAccountBalance("eonio.token", scope)
if Utils.Debug: Utils.Print("getNodeAccountEosBalance %s %s" % (scope, amount))
assert isinstance(amount, str)
return amount
def getAccountEosBalance(self, scope):
"""Returns EON currency0000 account balance from cleos get table command. Returned balance is an integer e.g. 980311. """
balanceStr=self.getAccountEosBalanceStr(scope)
balance=Node.currencyStrToInt(balanceStr)
return balance
def getAccountCodeHash(self, account):
cmd="%s %s get code %s" % (Utils.EosClientPath, self.eosClientArgs(), account)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
start=time.perf_counter()
try:
retStr=Utils.checkOutput(cmd.split())
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
#Utils.Print ("get code> %s"% retStr)
p=re.compile(r'code\shash: (\w+)\n', re.MULTILINE)
m=p.search(retStr)
if m is None:
msg="Failed to parse code hash."
Utils.Print("ERROR: "+ msg)
return None
return m.group(1)
except subprocess.CalledProcessError as ex:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during code hash retrieval. cmd Duration: %.3f sec. %s" % (end-start, msg))
return None
# publish contract and return transaction as json object
def publishContract(self, account, contractDir, wasmFile, abiFile, waitForTransBlock=False, shouldFail=False):
cmd="%s %s -v set contract -j %s %s" % (Utils.EosClientPath, self.eosClientArgs(), account, contractDir)
cmd += "" if wasmFile is None else (" "+ wasmFile)
cmd += "" if abiFile is None else (" " + abiFile)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
trans=None
start=time.perf_counter()
try:
trans=Utils.runCmdReturnJson(cmd, trace=False)
self.trackCmdTransaction(trans)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
except subprocess.CalledProcessError as ex:
if not shouldFail:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during code hash retrieval. cmd Duration: %.3f sec. %s" % (end-start, msg))
return None
else:
retMap={}
retMap["returncode"]=ex.returncode
retMap["cmd"]=ex.cmd
retMap["output"]=ex.output
# commented below as they are available only in Python3.5 and above
# retMap["stdout"]=ex.stdout
# retMap["stderr"]=ex.stderr
return retMap
if shouldFail:
Utils.Print("ERROR: The publish contract did not fail as expected.")
return None
Node.validateTransaction(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=False)
def getTableRows(self, contract, scope, table):
jsonData=self.getTable(contract, scope, table)
if jsonData is None:
return None
rows=jsonData["rows"]
return rows
def getTableRow(self, contract, scope, table, idx):
if idx < 0:
Utils.Print("ERROR: Table index cannot be negative. idx: %d" % (idx))
return None
rows=self.getTableRows(contract, scope, table)
if rows is None or idx >= len(rows):
Utils.Print("ERROR: Retrieved table does not contain row %d" % idx)
return None
row=rows[idx]
return row
def getTableColumns(self, contract, scope, table):
row=self.getTableRow(contract, scope, table, 0)
keys=list(row.keys())
return keys
# returns tuple with transaction and
def pushMessage(self, account, action, data, opts, silentErrors=False):
cmd="%s %s push action -j %s %s" % (Utils.EosClientPath, self.eosClientArgs(), account, action)
cmdArr=cmd.split()
if data is not None:
cmdArr.append(data)
if opts is not None:
cmdArr += opts.split()
s=" ".join(cmdArr)
if Utils.Debug: Utils.Print("cmd: %s" % (cmdArr))
start=time.perf_counter()
try:
trans=Utils.runCmdArrReturnJson(cmdArr)
self.trackCmdTransaction(trans, ignoreNonTrans=True)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
return (True, trans)
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
if not silentErrors:
end=time.perf_counter()
Utils.Print("ERROR: Exception during push message. cmd Duration=%.3f sec. %s" % (end - start, msg))
return (False, msg)
def setPermission(self, account, code, pType, requirement, waitForTransBlock=False, exitOnError=False):
cmdDesc="set action permission"
cmd="%s -j %s %s %s %s" % (cmdDesc, account, code, pType, requirement)
trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError)
self.trackCmdTransaction(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, transferTo=False, waitForTransBlock=False, exitOnError=False):
if toAccount is None:
toAccount=fromAccount
cmdDesc="system delegatebw"
transferStr="--transfer" if transferTo else ""
cmd="%s -j %s %s \"%s %s\" \"%s %s\" %s" % (
cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr)
msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name);
trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
self.trackCmdTransaction(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def undelegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, waitForTransBlock=False, exitOnError=False):
if toAccount is None:
toAccount=fromAccount
cmdDesc="system undelegatebw"
cmd="%s -j %s %s \"%s %s\" \"%s %s\"" % (
cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL)
msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name);
trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
self.trackCmdTransaction(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnError=False):
cmdDesc="system regproducer"
cmd="%s -j %s %s %s %s" % (
cmdDesc, producer.name, producer.activePublicKey, url, location)
msg="producer=%s" % (producer.name);
trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
self.trackCmdTransaction(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def vote(self, account, producers, waitForTransBlock=False, exitOnError=False):
cmdDesc = "system voteproducer prods"
cmd="%s -j %s %s" % (
cmdDesc, account.name, " ".join(producers))
msg="account=%s, producers=[ %s ]" % (account.name, ", ".join(producers));
trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
self.trackCmdTransaction(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def processCleosCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json):
assert(isinstance(returnType, ReturnType))
cmd="%s %s %s" % (Utils.EosClientPath, self.eosClientArgs(), cmd)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
if exitMsg is not None:
exitMsg="Context: " + exitMsg
else:
exitMsg=""
trans=None
start=time.perf_counter()
try:
if returnType==ReturnType.json:
trans=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors)
elif returnType==ReturnType.raw:
trans=Utils.runCmdReturnStr(cmd)
else:
unhandledEnumType(returnType)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
except subprocess.CalledProcessError as ex:
if not silentErrors:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
errorMsg="Exception during \"%s\". Exception message: %s. cmd Duration=%.3f sec. %s" % (cmdDesc, msg, end-start, exitMsg)
if exitOnError:
Utils.cmdError(errorMsg)
Utils.errorExit(errorMsg)
else:
Utils.Print("ERROR: %s" % (errorMsg))
return None
if exitOnError and trans is None:
Utils.cmdError("could not \"%s\". %s" % (cmdDesc,exitMsg))
Utils.errorExit("Failed to \"%s\"" % (cmdDesc))
return trans
def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json):
assert(isinstance(producer, str))
assert(isinstance(whereInSequence, int))
assert(isinstance(blockType, BlockType))
assert(isinstance(returnType, ReturnType))
basedOnLib="true" if blockType==BlockType.lib else "false"
cmd="curl %s/v1/test_control/kill_node_on_producer -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }' -X POST -H \"Content-Type: application/json\"" % \
(self.endpointHttp, producer, whereInSequence, basedOnLib)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
rtn=None
start=time.perf_counter()
try:
if returnType==ReturnType.json:
rtn=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors)
elif returnType==ReturnType.raw:
rtn=Utils.runCmdReturnStr(cmd)
else:
unhandledEnumType(returnType)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
except subprocess.CalledProcessError as ex:
if not silentErrors:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
errorMsg="Exception during \"%s\". %s. cmd Duration=%.3f sec." % (cmd, msg, end-start)
if exitOnError:
Utils.cmdError(errorMsg)
Utils.errorExit(errorMsg)
else:
Utils.Print("ERROR: %s" % (errorMsg))
return None
if exitMsg is not None:
exitMsg=": " + exitMsg
else:
exitMsg=""
if exitOnError and rtn is None:
Utils.cmdError("could not \"%s\" - %s" % (cmd,exitMsg))
Utils.errorExit("Failed to \"%s\"" % (cmd))
return rtn
def waitForTransBlockIfNeeded(self, trans, waitForTransBlock, exitOnError=False):
if not waitForTransBlock:
return trans
transId=Node.getTransId(trans)
if not self.waitForTransInBlock(transId):
if exitOnError:
Utils.cmdError("transaction with id %s never made it to a block" % (transId))
Utils.errorExit("Failed to find transaction with id %s in a block before timeout" % (transId))
return None
return trans
def getInfo(self, silentErrors=False, exitOnError=False):
cmdDesc = "get info"
info=self.processCleosCmd(cmdDesc, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError)
if info is None:
self.infoValid=False
else:
self.infoValid=True
self.lastRetrievedHeadBlockNum=int(info["head_block_num"])
self.lastRetrievedLIB=int(info["last_irreversible_block_num"])
return info
def getBlockFromDb(self, idx):
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand="db.blocks.find().sort({\"_id\":%d}).limit(1).pretty()" % (idx)
if Utils.Debug: Utils.Print("cmd: echo \"%s\" | %s" % (subcommand, cmd))
start=time.perf_counter()
try:
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
return trans
except subprocess.CalledProcessError as ex:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during get db block. cmd Duration: %.3f sec. %s" % (end-start, msg))
return None
def checkPulse(self, exitOnError=False):
info=self.getInfo(True, exitOnError=exitOnError)
return False if info is None else True
def getHeadBlockNum(self):
"""returns head block number(string) as returned by cleos get info."""
if not self.enableMongo:
info=self.getInfo(exitOnError=True)
if info is not None:
headBlockNumTag="head_block_num"
return info[headBlockNumTag]
else:
# Either this implementation or the one in getIrreversibleBlockNum are likely wrong.
block=self.getBlockFromDb(-1)
if block is not None:
blockNum=block["block_num"]
return blockNum
return None
def getIrreversibleBlockNum(self):
if not self.enableMongo:
info=self.getInfo(exitOnError=True)
if info is not None:
return info["last_irreversible_block_num"]
else:
# Either this implementation or the one in getHeadBlockNum are likely wrong.
block=self.getBlockFromDb(-1)
if block is not None:
blockNum=block["block_num"]
return blockNum
return None
def getBlockNum(self, blockType=BlockType.head):
assert isinstance(blockType, BlockType)
if blockType==BlockType.head:
return self.getHeadBlockNum()
elif blockType==BlockType.lib:
return self.getIrreversibleBlockNum()
else:
unhandledEnumType(blockType)
def kill(self, killSignal):
if Utils.Debug: Utils.Print("Killing node: %s" % (self.cmd))
assert(self.pid is not None)
try:
os.kill(self.pid, killSignal)
except OSError as ex:
Utils.Print("ERROR: Failed to kill node (%d)." % (self.cmd), ex)
return False
# wait for kill validation
def myFunc():
try:
os.kill(self.pid, 0) #check if process with pid is running
except OSError as _:
return True
return False
if not Utils.waitForBool(myFunc):
Utils.Print("ERROR: Failed to validate node shutdown.")
return False
# mark node as killed
self.pid=None
self.killed=True
return True
def interruptAndVerifyExitStatus(self):
if Utils.Debug: Utils.Print("terminating node: %s" % (self.cmd))
assert self.popenProc is not None, "node: \"%s\" does not have a popenProc, this may be because it is only set after a relaunch." % (self.cmd)
self.popenProc.send_signal(signal.SIGINT)
try:
outs, _ = self.popenProc.communicate(timeout=15)
assert self.popenProc.returncode == 0, "Expected terminating \"%s\" to have an exit status of 0, but got %d" % (self.cmd, self.popenProc.returncode)
except subprocess.TimeoutExpired:
Utils.errorExit("Terminate call failed on node: %s" % (self.cmd))
def verifyAlive(self, silent=False):
if not silent and Utils.Debug: Utils.Print("Checking if node(pid=%s) is alive(killed=%s): %s" % (self.pid, self.killed, self.cmd))
if self.killed or self.pid is None:
self.killed=True
self.pid=None
return False
try:
os.kill(self.pid, 0)
except ProcessLookupError as ex:
# mark node as killed
self.pid=None
self.killed=True
return False
except PermissionError as ex:
return True
return True
def getBlockProducerByNum(self, blockNum, timeout=None, waitForBlock=True, exitOnError=True):
if waitForBlock:
self.waitForBlock(blockNum, timeout=timeout, blockType=BlockType.head)
block=self.getBlock(blockNum, exitOnError=exitOnError)
blockProducer=block["producer"]
if blockProducer is None and exitOnError:
Utils.cmdError("could not get producer for block number %s" % (blockNum))
Utils.errorExit("Failed to get block's producer")
return blockProducer
def getBlockProducer(self, timeout=None, waitForBlock=True, exitOnError=True, blockType=BlockType.head):
blockNum=self.getBlockNum(blockType=blockType)
block=self.getBlock(blockNum, exitOnError=exitOnError, blockType=blockType)
blockProducer=block["producer"]
if blockProducer is None and exitOnError:
Utils.cmdError("could not get producer for block number %s" % (blockNum))
Utils.errorExit("Failed to get block's producer")
return blockProducer
def getNextCleanProductionCycle(self, trans):
transId=Node.getTransId(trans)
rounds=21*12*2 # max time to ensure that at least 2/3+1 of producers x blocks per producer x at least 2 times
self.waitForTransFinalization(transId, timeout=rounds/2)
irreversibleBlockNum=self.getIrreversibleBlockNum()
# The voted schedule should be promoted now, then need to wait for that to become irreversible
votingTallyWindow=120 #could be up to 120 blocks before the votes were tallied
promotedBlockNum=self.getHeadBlockNum()+votingTallyWindow
self.waitForIrreversibleBlock(promotedBlockNum, timeout=rounds/2)
ibnSchedActive=self.getIrreversibleBlockNum()
blockNum=self.getHeadBlockNum()
Utils.Print("Searching for clean production cycle blockNum=%s ibn=%s transId=%s promoted bn=%s ibn for schedule active=%s" % (blockNum,irreversibleBlockNum,transId,promotedBlockNum,ibnSchedActive))
blockProducer=self.getBlockProducerByNum(blockNum)
blockNum+=1
Utils.Print("Advance until the next block producer is retrieved")
while blockProducer == self.getBlockProducerByNum(blockNum):
blockNum+=1
blockProducer=self.getBlockProducerByNum(blockNum)
return blockNum
# TBD: make nodeId an internal property
# pylint: disable=too-many-locals
def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False):
assert(self.pid is None)
assert(self.killed)
if Utils.Debug: Utils.Print("Launching node process, Id: %d" % (nodeId))
cmdArr=[]
myCmd=self.cmd
toAddOrSwap=copy.deepcopy(addOrSwapFlags) if addOrSwapFlags is not None else {}
if not newChain:
skip=False
swapValue=None
for i in self.cmd.split():
Utils.Print("\"%s\"" % (i))
if skip:
skip=False
continue
if "--genesis-json" == i or "--genesis-timestamp" == i:
skip=True
continue
if swapValue is None:
cmdArr.append(i)
else:
cmdArr.append(swapValue)
swapValue=None
if i in toAddOrSwap:
swapValue=toAddOrSwap[i]
del toAddOrSwap[i]
for k,v in toAddOrSwap.items():
cmdArr.append(k)
cmdArr.append(v)
myCmd=" ".join(cmdArr)
dataDir="var/lib/node_%02d" % (nodeId)
dt = datetime.datetime.now()
dateStr="%d_%02d_%02d_%02d_%02d_%02d" % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr)
stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr)
with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr:
cmd=myCmd + ("" if chainArg is None else (" " + chainArg))
Utils.Print("cmd: %s" % (cmd))
popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr)
if cachePopen:
self.popenProc=popen
self.pid=popen.pid
if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd))
def isNodeAlive():
"""wait for node to be responsive."""
try:
return True if self.checkPulse() else False
except (TypeError) as _:
pass
return False
isAlive=Utils.waitForBool(isNodeAlive, timeout)
if isAlive:
Utils.Print("Node relaunch was successfull.")
else:
Utils.Print("ERROR: Node relaunch Failed.")
self.pid=None
return False
self.cmd=cmd
self.killed=False
return True
def trackCmdTransaction(self, trans, ignoreNonTrans=False):
if trans is None:
if Utils.Debug: Utils.Print(" cmd returned transaction: %s" % (trans))
return
if ignoreNonTrans and not Node.isTrans(trans):
if Utils.Debug: Utils.Print(" cmd returned a non-transaction")
return
transId=Node.getTransId(trans)
if Utils.Debug:
status=Node.getTransStatus(trans)
blockNum=Node.getTransBlockNum(trans)
if transId in self.transCache.keys():
replaceMsg="replacing previous trans=\n%s" % json.dumps(self.transCache[transId], indent=2, sort_keys=True)
else:
replaceMsg=""
Utils.Print(" cmd returned transaction id: %s, status: %s, (possible) block num: %s %s" % (transId, status, blockNum, replaceMsg))
self.transCache[transId]=trans
def reportStatus(self):
Utils.Print("Node State:")
Utils.Print(" cmd : %s" % (self.cmd))
self.verifyAlive(silent=True)
Utils.Print(" killed: %s" % (self.killed))
Utils.Print(" host : %s" % (self.host))
Utils.Print(" port : %s" % (self.port))
Utils.Print(" pid : %s" % (self.pid))
status="last getInfo returned None" if not self.infoValid else "at last call to getInfo"
Utils.Print(" hbn : %s (%s)" % (self.lastRetrievedHeadBlockNum, status))
Utils.Print(" lib : %s (%s)" % (self.lastRetrievedLIB, status))
|
the-stack_0_5386 | from flask import render_template, current_app, session, jsonify, request
from info import constants
from info.models import User, News, Category
from info.utils.response_code import RET
from . import index_blue
@index_blue.route('/news_list')
def news_list():
"""
获取首页新闻数据
:return:
"""
# 1. 获取参数
# 新闻的分类id
cid = request.args.get("cid", "1")
page = request.args.get("page", "1")
per_page = request.args.get("per_page", "10")
# 2. 校验参数
try:
page = int(page)
cid = int(cid)
per_page = int(per_page)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.PARAMERR, errmsg="参数")
filters = [News.status == 0]
if cid != 1: # 查询的不是最新的数据
# 需要添加条件
filters.append(News.category_id == cid)
# 3. 查询数据
try:
paginate = News.query.filter(*filters).order_by(News.create_time.desc()).paginate(page, per_page, False)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据查询错误")
# 取到当前页的数据
news_model_list = paginate.items # 模型对象列表
total_page = paginate.pages
current_page = paginate.page
# 将模型对象列表转成字典列表
news_dict_li = []
for news in news_model_list:
news_dict_li.append(news.to_basic_dict())
data = {
"total_page": total_page,
"current_page": current_page,
"news_dict_li": news_dict_li
}
return jsonify(errno=RET.OK, errmsg="OK", data=data)
@index_blue.route('/')
def index():
user_id = session.get("user_id", None)
user = None
if user_id:
# 尝试查询用户的模型
try:
user = User.query.get(user_id)
except Exception as e:
current_app.logger.error(e)
# 右侧的新闻排行的逻辑
news_list = []
try:
news_list = News.query.order_by(News.clicks.desc()).limit(constants.CLICK_RANK_MAX_NEWS)
except Exception as e:
current_app.logger.error(e)
# 定义一个空的字典列表,里面装的就是字典
news_dict_li = []
# 遍历对象列表,将对象的字典添加到字典列表中
for news in news_list:
news_dict_li.append(news.to_basic_dict())
# 查询分类数据,通过模板的形式渲染出来
categories = Category.query.all()
category_li = []
for category in categories:
category_li.append(category.to_dict())
data = {
"user": user.to_dict() if user else None,
"news_dict_li": news_dict_li,
"category_li": category_li
}
return render_template("news/index.html", data=data)
@index_blue.route('/favicon.ico')
def favicon():
return current_app.send_static_file('news/favicon.ico')
|
the-stack_0_5388 |
import numpy as np
import matplotlib.pyplot as plt
def dbtime(x):
return (x/2-2)*(x/2-2)+2
xdbtime = np.arange(0,np.pi*4,0.1)
ydbtime = dbtime(xdbtime)
plt.grid()
plt.xlim(0,10)
plt.ylim(0,10)
plt.title("Fonction dbtime 2")
plt.plot(xdbtime,ydbtime)
plt.savefig('dbtime2.png')
|
the-stack_0_5390 | _base_ = './fovea_r50_fpn_4x4_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24) |
the-stack_0_5391 | #------------------------------------------------------------------------------
# Copyright 2020 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# Name: CleanMRFCache.py
# Description: Cleans MRF Cache files by oldest access-time until free space
# requested has been achieved.
# Version: 20201109
# Requirements: Python
# Required Arguments: -input
# Optional Arguments: -mode -ext -size
# e.g.: -mode = [del,scan], -ext=txt,mrfcache -input=d:/mrfcache_folder
# Usage: python.exe CleanMRFCache.py <arguments>
# Author: Esri Imagery Workflows team
#------------------------------------------------------------------------------
#!/usr/bin/env python
import sys
import operator
import argparse
import os
import ctypes
import platform
def Message(msg, status=0):
try:
if (log is not None):
log.Message(msg, status)
return
except:
pass
print(msg)
# for any paprent processes to receive the stdout realtime.
sys.stdout.flush()
class Cleaner:
def __init__(self):
pass
def init(self, input_path, extensions=()):
self.m_extensions = extensions
self.m_input_path = input_path.replace('\\', '/')
if (self.m_input_path.endswith('/') is False):
self.m_input_path += '/'
self.m_info = []
return True
def getFreeDiskSpace(self, input_path): # static
try:
fbytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(input_path),
None,
None,
ctypes.pointer(fbytes))
except:
return -1
return fbytes
def getFileInfo(self, root_only=False):
Message('[Generate file list]..')
for r, d, f in os.walk(self.m_input_path):
if (root_only):
if (r != self.m_input_path):
continue
for file in f:
(f_, e_) = os.path.splitext(file)
if ((e_[1:].lower() in self.m_extensions)):
mk_path = os.path.join(r, file).replace('\\', '/')
self.m_info.append({
'f': mk_path,
's': os.path.getsize(mk_path),
'at': os.path.getatime(mk_path)
})
try:
pass
except Exception as exp:
Message('Err: (%s)' % (str(inf)))
return True
def main():
pass
if __name__ == '__main__':
main()
if __name__ == '__main__':
main()
__program_ver__ = 'v1.0'
__program_name__ = 'CleanMRFCache.py %s' % __program_ver__
parser = argparse.ArgumentParser(description='Cleans MRF Cache files by '
'oldest access-time until free space '
'requested has been achieved.\n')
parser.add_argument('-input', help='Input directory', dest='input_path')
parser.add_argument('-mode', help='Processing mode. Valid modes [del]',
dest='mode', default='scan')
parser.add_argument('-ext',
help='Extensions to filter-in. e.g. -ext=mrfcache,txt',
dest='ext')
parser.add_argument('-size', type=int,
help='Free size requested in bytes. e.g. -size=1000000',
dest='size', default=2000000000)
log = None
Message(__program_name__)
Message(parser.description)
args = parser.parse_args()
extensions = ['mrfcache']
# check for extensions
if (args.ext is not None):
ext_ = args.ext.split(',')
for e in ext_:
e = e.strip().lower()
if ((e in extensions) is False):
extensions.append(e)
# ends
# check input path
if (args.input_path is None):
Message('Err: -input is required.')
exit(0)
# ends
# clean-up instance
cln = Cleaner()
cln.init(args.input_path, extensions)
# ends
# let's get the free space
space_available = cln.getFreeDiskSpace(os.path.dirname(args.input_path))
if (space_available == -1): # an error has occured
Message('Err: Unable to get the free-disk-space for the path (%s)' %
(args.input_path))
exit(1)
# ends
space_to_free = args.size * 1000000000
space_available = space_available.value
if (space_available >= space_to_free):
Message('The disk already has the requested free space')
exit(0)
# setup -mode
is_mode = not args.mode is None
arg_mode = args.mode.lower()
Message('Mode (%s)' % arg_mode) # display the user/default selected (-mode)
# ends
ret = cln.getFileInfo()
if (ret is False):
Message('Err: Unable to scan for files. Quitting..')
exit(1)
process = sorted(cln.m_info, key=operator.itemgetter('at'), reverse=False)
print('\nResults:')
tot_savings = 0
for f in process:
print('%s [%s] [%s]' % (f['f'], f['s'], f['at']))
tot_savings += f['s']
if (is_mode):
if (arg_mode == 'del'):
Message('[Del] %s' % (f['f']))
# let's delete here.
try:
pass
os.remove(f['f'])
except Exception as exp:
Message('Err: Unable to remove (%s). Skipping..' % (f['f']))
continue
space_available += f['s']
if (space_available >= space_to_free):
pass
Message('\nRequired disk space has been freed.')
break
# ends
msg = '\nTotal savings if files get deleted: [%d] bytes.' % (tot_savings)
if (arg_mode == 'del'):
msg = '\nTotal space freed [%d] bytes' % (space_available)
if (space_available < space_to_free):
Message('\nUnable to free space requested.')
Message(msg)
Message('\nDone..')
|
the-stack_0_5392 | """
.. module:: Katna.config
:platform: Platfrom Independent
:synopsis: This module defines some helpful configuration variables
"""
import os
# # Configuration parameters for Image class
class Image:
# default value by which image size to be reduces for processing
down_sample_factor = 8
# Debug flag
DEBUG = False
# Crop_height_reduction_factor_in_each_iterationnot found crop height
# will be reduced by this multiplier/factor and search for candidate crops
# is resumed.
# Decreasing the height and width for crops while checking it don't get small by 1/(min_image_to_crop_factor) of image height/width
min_image_to_crop_factor = 4
crop_height_reduction_factor_in_each_iteration = 0.05
# # Configurations for Scoring crops for crop extractor
class CropScorer:
detail_weight = 0.2 # default weight value for detail parameter
edge_radius = 0.4 # default edge radius
edge_weight = -20 # default edge weight
outside_importance = (
-0.5
) # default value to set if the pixel is outside crop rectangle
rule_of_thirds = True # boolean to set rule of third condition check
saliency_bias = 0.2 # bias color value for saliency(+- error value)
saliency_weight = 1.3 # default edge radius
face_bias = 0.01 # bias color value for face(+- error value)
face_weight = 3.4 # default weight value for face parameter
rects_weight = 1 # default weight value for crop rectangles
# # Configurations for Text detection class
class TextDetector:
# Min Confidence Threshold for Text detection model
min_confidence = 0.9
# Threshold for merging text detection boxes
merge_threshold = 1
# Name of Model files to be used for text detection
frozen_weights = "frozen_east_text_detection.pb"
# Location where model file will be downloaded
cache_subdir = "models"
# Layers Name for text detection
layerNames = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]
# Download Link for Text detection model
model_download_link = "https://github.com/oyyd/frozen_east_text_detection.pb/raw/master/frozen_east_text_detection.pb"
# # Configurations for Edge Feature class
class EdgeFeature:
# min edge threshold value
min_val_threshold = 100
# Max edge threshold value
max_val_threshold = 200
# aperture_size/size of Sobel kernel for canny edge detector
ksize = 3
# # Configurations for Face detection Feature class
class FaceFeature:
# Model file name to be used for face detection
model_file = "res10_300x300_ssd_iter_140000_fp16.caffemodel"
# Model definition file name to be used for face detetion
prototxt_file = "deploy.prototxt"
# Location where model file will be downloaded
cache_subdir = "models"
# Min Confidence Threshold for face detection model
confidence = 0.5
# Download Link for face detection model defintion file
prototxt_download_link = "https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt"
# Download Link for face detection model
modelfile_download_link = "https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel"
# # Configuration parameters for Video class
class Video:
# Debug flag
DEBUG = False
min_video_duration = 5.0
# consume % of memory during video keyframe extraction
# 80% of available memory will be consumed
memory_consumption_threshold = 0.80
# assumed numbers of frames within which 1 candidate frames which might be available
# seconds to reach threshold if all frames are collected, but not all are candidate frames
# currently we assume 1 in 5 frame for that
assumed_no_of_frames_per_candidate_frame = 5
# if video duration greater than this number video will be treated as a large video
video_split_threshold_in_minutes = 20
# https://trac.ffmpeg.org/wiki/Encode/H.264
# Keep this between 20 to 30 value
video_compression_crf_parameter = 23
video_compression_codec = "libx264" # Currently "libx264 and is supported"
compression_output_file_extension = "mp4"
# Supported/valid video extensions supported by ffmpeg
# You can generate updated list by using following shell script on MacOSX or Linux
# $ ffmpeg -demuxers -hide_banner | tail -n +5 | cut -d' ' -f4 | xargs -I{} ffmpeg -hide_banner -h demuxer={} | grep 'Common extensions' | cut -d' ' -f7 | tr ',' $'\n' | tr -d '.'
video_extensions = [
".str",
".aa",
".aac",
".ac3",
".acm",
".adf",
".adp",
".dtk",
".ads",
".ss2",
".adx",
".aea",
".afc",
".aix",
".al",
".ape",
".apl",
".mac",
".aptx",
".aptxhd",
".aqt",
".ast",
".avi",
".avr",
".bfstm",
".bcstm",
".bit",
".bmv",
".brstm",
".cdg",
".cdxl",
".xl",
".c2",
".302",
".daud",
".str",
".dss",
".dts",
".dtshd",
".dv",
".dif",
".cdata",
".eac3",
".paf",
".fap",
".flm",
".flac",
".flv",
".fsb",
".g722",
".722",
".tco",
".rco",
".g723_1",
".g729",
".genh",
".gsm",
".h261",
".h26l",
".h264",
".264",
".avc",
".hevc",
".h265",
".265",
".idf",
".cgi",
".sf",
".ircam",
".ivr",
".flv",
".lvf",
".m4v",
".mkv",
".mk3d",
".mka",
".mks",
".mjpg",
".mjpeg",
".mpo",
".j2k",
".mlp",
".mov",
".mp4",
".m4a",
".3gp",
".3g2",
".mj2",
".mp2",
".mp3",
".m2a",
".mpa",
".mpc",
".mjpg",
".txt",
".mpl2",
".sub",
".msf",
".mtaf",
".ul",
".musx",
".mvi",
".mxg",
".v",
".nist",
".sph",
".nsp",
".nut",
".ogg",
".oma",
".omg",
".aa3",
".pjs",
".pvf",
".yuv",
".cif",
".qcif",
".rgb",
".rt",
".rsd",
".rsd",
".rso",
".sw",
".sb",
".smi",
".sami",
".sbc",
".msbc",
".sbg",
".scc",
".sdr2",
".sds",
".sdx",
".shn",
".vb",
".son",
".sln",
".mjpg",
".stl",
".sub",
".sub",
".sup",
".svag",
".tak",
".thd",
".tta",
".ans",
".art",
".asc",
".diz",
".ice",
".nfo",
".txt",
".vt",
".ty",
".ty+",
".uw",
".ub",
".v210",
".yuv10",
".vag",
".vc1",
".viv",
".idx",
".vpk",
".txt",
".vqf",
".vql",
".vqe",
".vtt",
".wsd",
".xmv",
".xvag",
".yop",
".y4m",
]
# Configuration parameters for mediapipe
class MediaPipe:
class AutoFlip:
# Rerun is required due to autoflip issue mentione here:
# https://github.com/google/mediapipe/issues/497
RERUN_LIMIT = 2
# Models folder location
MODELS_FOLDER_LOCATION = os.path.join(os.getcwd(), "mediapipe", "models")
# pbtxt temp folder name
TMP_PBTXT_FOLDER_NAME = "temp_pbtxt"
TMP_PBTXT_FOLDER_PATH = os.path.join(os.getcwd(), TMP_PBTXT_FOLDER_NAME)
# Default pbtxt and build cmd
CONFIG_FILE_PBTXT = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "mediapipe_autoflip.pbtxt"
)
BUILD_CMD = "run_autoflip"
# user friendly conf keys
ENFORCE_FEATURES_KEYNAME = "ENFORCE_FEATURES"
STABALIZATION_THRESHOLD_KEYNAME = "STABALIZATION_THRESHOLD"
BLUR_AREA_OPACITY_KEYNAME = "BLUR_AREA_OPACITY"
# DEFAULT VALUES IN PBTXT
DEFAULT_BLUR_AREA_OPACITY = 0.6
DEFAULT_MOTION_STABALIZATION_THRESHOLD = 0.5
DEFAULT_FEATURE_SIGNAL_VALUE = "false"
# ENFORCE_FEATURES Keys
_FACE_CORE_LANDMARKS = "FACE_CORE_LANDMARKS"
_FACE_FULL = "FACE_FULL"
_FACE_ALL_LANDMARKS = "FACE_ALL_LANDMARKS"
_HUMAN = "HUMAN"
_PET = "PET"
_CAR = "CAR"
_OBJECT = "OBJECT"
# the variables names below should match the keyname for set_conf to work
# smoothly
# ENFORCE_FEATURES list
ENFORCE_FEATURES = {
_FACE_CORE_LANDMARKS: False,
_FACE_ALL_LANDMARKS: False,
_FACE_FULL: False,
_HUMAN: False,
_PET: False,
_CAR: False,
_OBJECT: False,
}
# % AREA from center where most of the content is
# usually applied when content is focused near center
STABALIZATION_THRESHOLD = DEFAULT_MOTION_STABALIZATION_THRESHOLD
# opacity of blur area
BLUR_AREA_OPACITY = DEFAULT_BLUR_AREA_OPACITY
@classmethod
def get_pbtxt_mapping(cls):
return {
cls.ENFORCE_FEATURES_KEYNAME: "signal_settings",
cls.STABALIZATION_THRESHOLD_KEYNAME: "motion_stabilization_threshold_percent",
cls.BLUR_AREA_OPACITY_KEYNAME: "overlay_opacity",
}
@classmethod
def get_conf(cls):
"""Gets the current config
:return: dictionary containing the current config
:rtype: dict
"""
return {
cls.ENFORCE_FEATURES_KEYNAME: cls.ENFORCE_FEATURES,
cls.STABALIZATION_THRESHOLD_KEYNAME: cls.STABALIZATION_THRESHOLD,
cls.BLUR_AREA_OPACITY_KEYNAME: cls.BLUR_AREA_OPACITY,
}
@classmethod
def set_conf(cls, config):
"""Sets the config passed
:param config: The configuration to set.
:type config: dict
"""
for attr in config.keys():
current_conf = cls.get_conf()
if attr in current_conf.keys():
if attr == cls.ENFORCE_FEATURES_KEYNAME:
updated_attr_dict = {**current_conf[attr], **config[attr]}
setattr(cls, attr, updated_attr_dict)
else:
setattr(cls, attr, config[attr])
else:
raise Exception(
" Invalid configuration. Use get_conf method to see existing configuration or refer documentation."
)
class ImageSelector:
# Setting for optimum Brightness values
min_brightness_value = 10.0
max_brightness_value = 90.0
brightness_step = 2.0
# Setting for optimum Contrast/Entropy values
min_entropy_value = 1.0
max_entropy_value = 10.0
entropy_step = 0.5
class FrameExtractor:
# Setting local maxima criteria
USE_LOCAL_MAXIMA = True
# Lenght of sliding window taking difference
len_window = 20
# Chunk size of Images to be processed at a time in memory
max_frames_in_chunk = 500
# Type of smoothening window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing.
window_type = "hanning"
|
the-stack_0_5393 | from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
import sys
from sumatra import commands
from io import StringIO
modes = list(commands.modes)
modes.sort()
usage = {}
sys.argv[0] = 'smt'
for mode in modes:
main = getattr(commands, mode)
usage[mode] = StringIO()
sys.stdout = usage[mode]
try:
main(['--help'])
except:
pass
sys.stdout = sys.__stdout__
f = open("command_reference.txt", "w")
f.write("=====================\n")
f.write("smt command reference\n")
f.write("=====================\n\n")
for mode in modes:
sio = usage[mode]
f.write(mode + '\n')
f.write('-'*len(mode) + '\n::\n\n ')
sio.seek(0)
f.write(" ".join(sio.readlines()) + '\n')
sio.close()
f.close()
|
the-stack_0_5394 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Optional list of dependencies required by the package
dependencies = ['torch', 'torchvision']
# from torch.hub import load_state_dict_from_url
import torch
from .resnet import ResNet, Bottleneck
model_urls = {
'resnext101_32x8d': '/mnt/soulfs2/kyyue/research/ig_models/ig_resnext101_32x8-c38310e5.pth',
'resnext101_32x16d': '/mnt/soulfs2/kyyue/research/ig_models/ig_resnext101_32x16-c6f796b0.pth',
'resnext101_32x32d': '/mnt/soulfs2/kyyue/research/ig_models/ig_resnext101_32x32-e4b90b00.pth',
'resnext101_32x48d': '/mnt/soulfs2/kyyue/research/ig_models/ig_resnext101_32x48-3e41cc8a.pth'
}
def _resnext(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
# state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
state_dict = torch.load(model_urls[arch])
model.load_state_dict(state_dict)
return model
def resnext101_32x8d_wsl(progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnext('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
def resnext101_32x16d_wsl(progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 16
return _resnext('resnext101_32x16d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
def resnext101_32x32d_wsl(progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 32
return _resnext('resnext101_32x32d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
def resnext101_32x48d_wsl(progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 48
return _resnext('resnext101_32x48d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
|
the-stack_0_5395 | import pytest
from app.main.forms import get_placeholder_form_instance
def test_form_class_not_mutated(app_):
with app_.test_request_context(method="POST", data={"placeholder_value": ""}):
form1 = get_placeholder_form_instance("name", {}, "sms", optional_placeholder=False)
form2 = get_placeholder_form_instance("city", {}, "sms", optional_placeholder=True)
assert not form1.validate_on_submit()
assert form2.validate_on_submit()
assert str(form1.placeholder_value.label) == '<label for="placeholder_value">name</label>'
assert str(form2.placeholder_value.label) == '<label for="placeholder_value">city</label>'
@pytest.mark.parametrize(
"service_can_send_international_sms, placeholder_name, template_type, value, expected_error",
[
(False, "email address", "email", "", "Enter an email address"),
(False, "email address", "email", "12345", "Enter a valid email address"),
(
False,
"email address",
"email",
"“bad”@email-address.com",
"Enter a valid email address",
),
(False, "email address", "email", "test+'éüî@example.com", None),
(False, "email address", "email", "Tom!the#[email protected]", None),
(False, "email address", "email", "Jean-o'briå[email protected]", None),
(False, "email address", "email", "Tom!the#[email protected]", None),
(False, "email address", "email", "2+2={5*4/5}@mailinator.com", None),
(False, "email address", "email", "[email protected]", None),
(False, "email address", "email", "[email protected]", None),
(False, "phone number", "sms", "", "This cannot be empty"),
(False, "phone number", "sms", "+4966921809", "Not a valid phone number"),
(False, "phone number", "sms", "6502532222", None),
(False, "phone number", "sms", "+16502532222", None),
(True, "phone number", "sms", "+123", "Not a valid phone number"),
(True, "phone number", "sms", "+16502532222", None),
(True, "phone number", "sms", "+4966921809", None),
(False, "anything else", "sms", "", "This cannot be empty"),
(False, "anything else", "email", "", "This cannot be empty"),
(True, "phone number", "sms", "invalid", "Not a valid phone number"),
(True, "phone number", "email", "invalid", None),
(True, "phone number", "letter", "invalid", None),
(True, "email address", "sms", "invalid", None),
],
)
def test_validates_recipients(
app_,
placeholder_name,
template_type,
value,
service_can_send_international_sms,
expected_error,
):
with app_.test_request_context(method="POST", data={"placeholder_value": value}):
form = get_placeholder_form_instance(
placeholder_name,
{},
template_type,
allow_international_phone_numbers=service_can_send_international_sms,
)
if expected_error:
assert not form.validate_on_submit()
assert form.placeholder_value.errors[0] == expected_error
else:
assert form.validate_on_submit()
|
the-stack_0_5397 | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programytest.storage.asserts.store.assert_denormals import DenormalStoreAsserts
from programy.storage.stores.sql.store.lookups import SQLDenormalStore
from programy.storage.stores.sql.engine import SQLStorageEngine
from programy.storage.stores.sql.config import SQLStorageConfiguration
import programytest.storage.engines as Engines
class SQLDenormalStoreTests(DenormalStoreAsserts):
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_initialise(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLDenormalStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_upload_from_file(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLDenormalStore(engine)
self.assert_upload_from_file(store)
|
the-stack_0_5398 | from vk_bot.core.modules.basicplug import BasicPlug
from vk_bot.core.sql.vksql import *
from vk_bot.core.sql.sqlgame import *
class Admins(BasicPlug):
command = ("бан", "разбан:", "вип",)
doc = "Забанить или разбанить"
available_for = "admins"
def main(self):
requests = self.text[0]
try:
uid = event.object.reply_message['from_id']
except:
print(".")
None
if requests == "бан":
tableadd("ban", "id", uid, one=True)
self.sendmsg("забанен нахой", "video367919273_456240239")
elif requests == "разбан":
tablerm("ban", "id", uid)
elif requests == "вип":
tableadd("vips", "id", event.object.reply_message['from_id'])
elif requests == "рассылка":
sendall(self.event, self.text, self.vk)
|
the-stack_0_5399 | from typing import Optional, Union
from snowflake.connector import SnowflakeConnection
from dbnd import log_duration
from dbnd._core.plugin.dbnd_plugins import is_plugin_enabled
from dbnd._core.tracking.metrics import log_data, log_target_operation
from dbnd_snowflake.extract_sql_query import TableTargetOperation
from dbnd_snowflake.snowflake_config import SnowflakeConfig
from dbnd_snowflake.snowflake_controller import SnowflakeController
def log_snowflake_table(
table_name: str,
connection_string: Union[str, SnowflakeConnection],
database: str,
schema: str,
key: Optional[str] = None,
with_preview: Optional[bool] = None,
with_schema: Optional[bool] = None,
raise_on_error: bool = False,
):
"""
:param table_name: table name
:param connection_string: either connection_string or actual connection
:param database:
:param schema:
:param key:
:param with_preview:
:param with_schema:
:param raise_on_error:
:return:
"""
if not is_plugin_enabled("dbnd-snowflake", module_import="dbnd_snowflake"):
return
from dbnd_snowflake import snowflake_values
with log_duration(
"log_snowflake_table__time_seconds", source="system"
), SnowflakeController(connection_string) as snowflake_ctrl:
config = SnowflakeConfig()
snowflake_table = snowflake_values.SnowflakeTable(
snowflake_ctrl, database, schema, table_name, config.table_preview_rows,
)
log_data(
key or "snowflake_table.{}".format(snowflake_table),
snowflake_table,
with_preview=with_preview,
with_schema=with_schema,
with_size=with_schema,
with_histograms=False,
raise_on_error=raise_on_error,
)
def log_snowflake_table_targets(
table_op: TableTargetOperation,
connection_string: Union[str, SnowflakeConnection],
with_preview: Optional[bool] = None,
with_schema: Optional[bool] = None,
):
if not is_plugin_enabled("dbnd-snowflake", module_import="dbnd_snowflake"):
return
from dbnd_snowflake.snowflake_values import SnowflakeTable
with SnowflakeController(connection_string) as snowflake_ctrl:
snowflake_table = SnowflakeTable.from_table(snowflake_ctrl, table_op.name)
log_target_operation(
name=table_op.name,
target=table_op.path,
operation_type=table_op.operation,
success=table_op.success,
data=snowflake_table,
with_preview=with_preview,
with_schema=with_schema,
)
|
the-stack_0_5402 | # Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch.nn import functional as F
from detectron2.structures import Instances, ROIMasks
# perhaps should rename to "resize_instance"
def detector_postprocess(
results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5
):
"""
Resize the output instances.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will resize the raw outputs of an R-CNN detector
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height, output_width: the desired output resolution.
Returns:
Instances: the resized output from the model, based on the output resolution
"""
# Change to 'if is_tracing' after PT1.7
if isinstance(output_height, torch.Tensor):
# Converts integer tensors to float temporaries to ensure true
# division is performed when computing scale_x and scale_y.
output_width_tmp = output_width.float()
output_height_tmp = output_height.float()
new_size = torch.stack([output_height, output_width])
else:
new_size = (output_height, output_width)
output_width_tmp = output_width
output_height_tmp = output_height
scale_x, scale_y = (
output_width_tmp / results.image_size[1],
output_height_tmp / results.image_size[0],
)
results = Instances(new_size, **results.get_fields())
if results.has("pred_boxes"):
output_boxes = results.pred_boxes
elif results.has("proposal_boxes"):
output_boxes = results.proposal_boxes
else:
output_boxes = None
assert output_boxes is not None, "Predictions must contain boxes!"
output_boxes.scale(scale_x, scale_y)
output_boxes.clip(results.image_size)
results = results[output_boxes.nonempty()]
if results.has("pred_masks"):
if isinstance(results.pred_masks, ROIMasks):
roi_masks = results.pred_masks
else:
# pred_masks is a tensor of shape (N, 1, M, M)
roi_masks = ROIMasks(results.pred_masks[:, 0, :, :])
results.pred_masks = roi_masks.to_bitmasks(
results.pred_boxes, output_height, output_width, mask_threshold
).tensor # TODO return ROIMasks/BitMask object in the future
if results.has("pred_keypoints"):
results.pred_keypoints[:, :, 0] *= scale_x
results.pred_keypoints[:, :, 1] *= scale_y
return results
def sem_seg_postprocess(result, img_size, output_height, output_width):
"""
Return semantic segmentation predictions in the original resolution.
The input images are often resized when entering semantic segmentor. Moreover, in same
cases, they also padded inside segmentor to be divisible by maximum network stride.
As a result, we often need the predictions of the segmentor in a different
resolution from its inputs.
Args:
result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
where C is the number of classes, and H, W are the height and width of the prediction.
img_size (tuple): image size that segmentor is taking as input.
output_height, output_width: the desired output resolution.
Returns:
semantic segmentation prediction (Tensor): A tensor of the shape
(C, output_height, output_width) that contains per-pixel soft predictions.
"""
result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)
result = F.interpolate(
result, size=(output_height, output_width), mode="bilinear", align_corners=False
)[0]
return result
|
the-stack_0_5403 | """Platform to present any Tuya DP as a binary sensor."""
import logging
from functools import partial
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
DOMAIN,
BinarySensorEntity,
)
from homeassistant.const import CONF_DEVICE_CLASS
from .common import LocalTuyaEntity, async_setup_entry
_LOGGER = logging.getLogger(__name__)
CONF_STATE_ON = "state_on"
CONF_STATE_OFF = "state_off"
def flow_schema(dps):
"""Return schema used in config flow."""
return {
vol.Required(CONF_STATE_ON, default="True"): str,
vol.Required(CONF_STATE_OFF, default="False"): str,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
}
class LocaltuyaBinarySensor(LocalTuyaEntity, BinarySensorEntity):
"""Representation of a Tuya binary sensor."""
def __init__(
self,
device,
config_entry,
sensorid,
**kwargs,
):
"""Initialize the Tuya binary sensor."""
super().__init__(device, config_entry, sensorid, _LOGGER, **kwargs)
self._is_on = False
@property
def is_on(self):
"""Return sensor state."""
return self._is_on
@property
def device_class(self):
"""Return the class of this device."""
return self._config.get(CONF_DEVICE_CLASS)
def status_updated(self):
"""Device status was updated."""
state = str(self.dps(self._dp_id)).lower()
if state == self._config[CONF_STATE_ON].lower():
self._is_on = True
elif state == self._config[CONF_STATE_OFF].lower():
self._is_on = False
else:
self.warning(
"State for entity %s did not match state patterns", self.entity_id
)
async_setup_entry = partial(
async_setup_entry, DOMAIN, LocaltuyaBinarySensor, flow_schema
)
|
the-stack_0_5404 | # -*- coding: utf-8 -*-
import argparse
import os
from pprint import pprint
import subprocess
import sys
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILES", default="path/to/*.mp4", help="Input media file pattern")
parser.add_argument('-width', dest="TARGET_WIDTH", default=640, type=int, help="Target width")
parser.add_argument('-height', dest="TARGET_HEIGHT", default=360, type=int, help="Target height")
parser.add_argument('-out', dest="OUTPUT_FILE", default="output/%s.mp4", help="Media output file pattern")
a = parser.parse_args()
from lib.io_utils import *
makeDirectories([a.OUTPUT_FILE])
filenames = getFilenames(a.INPUT_FILES)
for infile in filenames:
basefn = getBasename(infile)
command = ['ffmpeg',
'-y',
'-i', infile,
'-vf', 'scale=%s:%s' % (a.TARGET_WIDTH, a.TARGET_HEIGHT),
a.OUTPUT_FILE % basefn]
print(" ".join(command))
finished = subprocess.check_call(command)
print("Done.")
|
the-stack_0_5407 | from typing import Dict, Callable
from optimade.models import (
DataType,
ErrorResponse,
StructureResource,
ReferenceResource,
)
from optimade.server.exceptions import POSSIBLE_ERRORS
__all__ = ("ENTRY_INFO_SCHEMAS", "ERROR_RESPONSES", "retrieve_queryable_properties")
ENTRY_INFO_SCHEMAS: Dict[str, Callable[[None], Dict]] = {
"structures": StructureResource.schema,
"references": ReferenceResource.schema,
}
"""This dictionary is used to define the `/info/<entry_type>` endpoints."""
ERROR_RESPONSES: Dict[int, Dict] = {
err.status_code: {"model": ErrorResponse, "description": err.title}
for err in POSSIBLE_ERRORS
}
def retrieve_queryable_properties(
schema: dict,
queryable_properties: list = None,
entry_type: str = None,
) -> dict:
"""Recursively loops through the schema of a pydantic model and
resolves all references, returning a dictionary of all the
OPTIMADE-queryable properties of that model.
Parameters:
schema: The schema of the pydantic model.
queryable_properties: The list of properties to find in the schema.
entry_type: An optional entry type for the model. Will be used to
lookup schemas for any config-defined fields.
Returns:
A flat dictionary with properties as keys, containing the field
description, unit, sortability, support level, queryability
and type, where provided.
"""
properties = {}
for name, value in schema["properties"].items():
if not queryable_properties or name in queryable_properties:
if "$ref" in value:
path = value["$ref"].split("/")[1:]
sub_schema = schema.copy()
while path:
next_key = path.pop(0)
sub_schema = sub_schema[next_key]
sub_queryable_properties = sub_schema["properties"].keys()
properties.update(
retrieve_queryable_properties(sub_schema, sub_queryable_properties)
)
else:
properties[name] = {"description": value.get("description", "")}
# Update schema with extension keys provided they are not None
for key in [_ for _ in ("unit", "queryable", "support") if _ in value]:
properties[name][key] = value[key]
# All properties are sortable with the MongoDB backend.
# While the result for sorting lists may not be as expected, they are still sorted.
properties[name]["sortable"] = value.get("sortable", True)
# Try to get OpenAPI-specific "format" if possible, else get "type"; a mandatory OpenAPI key.
properties[name]["type"] = DataType.from_json_type(
value.get("format", value.get("type"))
)
# If specified, check the config for any additional well-described provider fields
if entry_type:
from optimade.server.config import CONFIG
described_provider_fields = [
field
for field in CONFIG.provider_fields.get(entry_type, {})
if isinstance(field, dict)
]
for field in described_provider_fields:
name = f"_{CONFIG.provider.prefix}_{field['name']}"
properties[name] = {k: field[k] for k in field if k != "name"}
properties[name]["sortable"] = field.get("sortable", True)
return properties
|
the-stack_0_5409 | import sys
sys.path.insert(0, '../')
import tornado_dynamodb
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = 'tornado-dynamodb'
copyright = '2015, Gavin M. Roy'
author = 'Gavin M. Roy'
release = tornado_dynamodb.__version__
version = '.'.join(release.split('.')[0:1])
language = None
exclude_patterns = ['_build']
pygments_style = 'sphinx'
todo_include_todos = True
html_static_path = ['_static']
htmlhelp_basename = 'tornado-dynamodb'
intersphinx_mapping = {'https://docs.python.org/': None,
'http://www.tornadoweb.org/en/stable/': None}
|
the-stack_0_5411 | """empty message
Revision ID: aa989b9b2862
Revises: 7223a3ac4f30
Create Date: 2021-03-29 19:41:56.312406
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'aa989b9b2862'
down_revision = '7223a3ac4f30'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('campaigns',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('events',
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.Column('type', sa.String(length=50), nullable=True),
sa.Column('campaign_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['campaign_id'], ['campaigns.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('events_users',
sa.Column('event_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('event_id', 'user_id')
)
op.create_table('phone_bank_events',
sa.Column('id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id'], ['events.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('phone_bank_tenants',
sa.Column('phone_bank_event_id', sa.Integer(), nullable=False),
sa.Column('defendant_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['defendant_id'], ['defendants.id'], ),
sa.ForeignKeyConstraint(['phone_bank_event_id'], ['phone_bank_events.id'], ),
sa.PrimaryKeyConstraint('phone_bank_event_id', 'defendant_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('phone_bank_tenants')
op.drop_table('phone_bank_events')
op.drop_table('events_users')
op.drop_table('events')
op.drop_table('campaigns')
# ### end Alembic commands ###
|
the-stack_0_5412 | from setuptools import setup
install_requires = [
r.strip() for r in open('requirements.txt')
if r.strip() and not r.strip().startswith('#')
]
setup(
name="aiokafka_rpc",
version="1.3.1.3",
author='Kostiantyn Andrusenko',
author_email='[email protected]',
description=("RPC over Apache Kafka for Python using asyncio"),
license="Apache Software License",
keywords="aiokafka_rpc",
url="https://github.com/fabregas/aiokafka_rpc",
packages=["aiokafka_rpc"],
install_requires=install_requires,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3"
],
)
|
the-stack_0_5415 | import base64
import datetime
import json
import logging
import os
import time
from functools import reduce
import cv2
import gevent
import numpy as np
from flask import (Blueprint, Flask, Response, current_app, jsonify,
make_response, request)
from flask_sockets import Sockets
from peewee import SqliteDatabase, operator, fn, DoesNotExist
from playhouse.shortcuts import model_to_dict
from frigate.const import CLIPS_DIR, RECORD_DIR
from frigate.models import Event
from frigate.stats import stats_snapshot
from frigate.util import calculate_region
from frigate.version import VERSION
logger = logging.getLogger(__name__)
bp = Blueprint('frigate', __name__)
ws = Blueprint('ws', __name__)
class MqttBackend():
"""Interface for registering and updating WebSocket clients."""
def __init__(self, mqtt_client, topic_prefix):
self.clients = list()
self.mqtt_client = mqtt_client
self.topic_prefix = topic_prefix
def register(self, client):
"""Register a WebSocket connection for Mqtt updates."""
self.clients.append(client)
def publish(self, message):
try:
json_message = json.loads(message)
json_message = {
'topic': f"{self.topic_prefix}/{json_message['topic']}",
'payload': json_message['payload'],
'retain': json_message.get('retain', False)
}
except:
logger.warning("Unable to parse websocket message as valid json.")
return
logger.debug(f"Publishing mqtt message from websockets at {json_message['topic']}.")
self.mqtt_client.publish(json_message['topic'], json_message['payload'], retain=json_message['retain'])
def run(self):
def send(client, userdata, message):
"""Sends mqtt messages to clients."""
try:
logger.debug(f"Received mqtt message on {message.topic}.")
ws_message = json.dumps({
'topic': message.topic.replace(f"{self.topic_prefix}/",""),
'payload': message.payload.decode()
})
except:
# if the payload can't be decoded don't relay to clients
logger.debug(f"MQTT payload for {message.topic} wasn't text. Skipping...")
return
for client in self.clients:
try:
client.send(ws_message)
except:
logger.debug("Removing websocket client due to a closed connection.")
self.clients.remove(client)
self.mqtt_client.message_callback_add(f"{self.topic_prefix}/#", send)
def start(self):
"""Maintains mqtt subscription in the background."""
gevent.spawn(self.run)
def create_app(frigate_config, database: SqliteDatabase, stats_tracking, detected_frames_processor, mqtt_client):
app = Flask(__name__)
sockets = Sockets(app)
@app.before_request
def _db_connect():
database.connect()
@app.teardown_request
def _db_close(exc):
if not database.is_closed():
database.close()
app.frigate_config = frigate_config
app.stats_tracking = stats_tracking
app.detected_frames_processor = detected_frames_processor
app.register_blueprint(bp)
sockets.register_blueprint(ws)
app.mqtt_backend = MqttBackend(mqtt_client, frigate_config.mqtt.topic_prefix)
app.mqtt_backend.start()
return app
@bp.route('/')
def is_healthy():
return "Frigate is running. Alive and healthy!"
@bp.route('/events/summary')
def events_summary():
has_clip = request.args.get('has_clip', type=int)
has_snapshot = request.args.get('has_snapshot', type=int)
clauses = []
if not has_clip is None:
clauses.append((Event.has_clip == has_clip))
if not has_snapshot is None:
clauses.append((Event.has_snapshot == has_snapshot))
if len(clauses) == 0:
clauses.append((1 == 1))
groups = (
Event
.select(
Event.camera,
Event.label,
fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')).alias('day'),
Event.zones,
fn.COUNT(Event.id).alias('count')
)
.where(reduce(operator.and_, clauses))
.group_by(
Event.camera,
Event.label,
fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')),
Event.zones
)
)
return jsonify([e for e in groups.dicts()])
@bp.route('/events/<id>')
def event(id):
try:
return model_to_dict(Event.get(Event.id == id))
except DoesNotExist:
return "Event not found", 404
@bp.route('/events/<id>/thumbnail.jpg')
def event_thumbnail(id):
format = request.args.get('format', 'ios')
thumbnail_bytes = None
try:
event = Event.get(Event.id == id)
thumbnail_bytes = base64.b64decode(event.thumbnail)
except DoesNotExist:
# see if the object is currently being tracked
try:
for camera_state in current_app.detected_frames_processor.camera_states.values():
if id in camera_state.tracked_objects:
tracked_obj = camera_state.tracked_objects.get(id)
if not tracked_obj is None:
thumbnail_bytes = tracked_obj.get_thumbnail()
except:
return "Event not found", 404
if thumbnail_bytes is None:
return "Event not found", 404
# android notifications prefer a 2:1 ratio
if format == 'android':
jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
img = cv2.imdecode(jpg_as_np, flags=1)
thumbnail = cv2.copyMakeBorder(img, 0, 0, int(img.shape[1]*0.5), int(img.shape[1]*0.5), cv2.BORDER_CONSTANT, (0,0,0))
ret, jpg = cv2.imencode('.jpg', thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
thumbnail_bytes = jpg.tobytes()
response = make_response(thumbnail_bytes)
response.headers['Content-Type'] = 'image/jpg'
return response
@bp.route('/events/<id>/snapshot.jpg')
def event_snapshot(id):
jpg_bytes = None
try:
event = Event.get(Event.id == id)
if not event.has_snapshot:
return "Snapshot not available", 404
# read snapshot from disk
with open(os.path.join(CLIPS_DIR, f"{event.camera}-{id}.jpg"), 'rb') as image_file:
jpg_bytes = image_file.read()
except DoesNotExist:
# see if the object is currently being tracked
try:
for camera_state in current_app.detected_frames_processor.camera_states.values():
if id in camera_state.tracked_objects:
tracked_obj = camera_state.tracked_objects.get(id)
if not tracked_obj is None:
jpg_bytes = tracked_obj.get_jpg_bytes(
timestamp=request.args.get('timestamp', type=int),
bounding_box=request.args.get('bbox', type=int),
crop=request.args.get('crop', type=int),
height=request.args.get('h', type=int)
)
except:
return "Event not found", 404
except:
return "Event not found", 404
response = make_response(jpg_bytes)
response.headers['Content-Type'] = 'image/jpg'
return response
@bp.route('/events')
def events():
limit = request.args.get('limit', 100)
camera = request.args.get('camera')
label = request.args.get('label')
zone = request.args.get('zone')
after = request.args.get('after', type=float)
before = request.args.get('before', type=float)
has_clip = request.args.get('has_clip', type=int)
has_snapshot = request.args.get('has_snapshot', type=int)
include_thumbnails = request.args.get('include_thumbnails', default=1, type=int)
clauses = []
excluded_fields = []
if camera:
clauses.append((Event.camera == camera))
if label:
clauses.append((Event.label == label))
if zone:
clauses.append((Event.zones.cast('text') % f"*\"{zone}\"*"))
if after:
clauses.append((Event.start_time >= after))
if before:
clauses.append((Event.start_time <= before))
if not has_clip is None:
clauses.append((Event.has_clip == has_clip))
if not has_snapshot is None:
clauses.append((Event.has_snapshot == has_snapshot))
if not include_thumbnails:
excluded_fields.append(Event.thumbnail)
if len(clauses) == 0:
clauses.append((1 == 1))
events = (Event.select()
.where(reduce(operator.and_, clauses))
.order_by(Event.start_time.desc())
.limit(limit))
return jsonify([model_to_dict(e, exclude=excluded_fields) for e in events])
@bp.route('/config')
def config():
return jsonify(current_app.frigate_config.to_dict())
@bp.route('/version')
def version():
return VERSION
@bp.route('/stats')
def stats():
stats = stats_snapshot(current_app.stats_tracking)
return jsonify(stats)
@bp.route('/<camera_name>/<label>/best.jpg')
def best(camera_name, label):
if camera_name in current_app.frigate_config.cameras:
best_object = current_app.detected_frames_processor.get_best(camera_name, label)
best_frame = best_object.get('frame')
if best_frame is None:
best_frame = np.zeros((720,1280,3), np.uint8)
else:
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
crop = bool(request.args.get('crop', 0, type=int))
if crop:
box = best_object.get('box', (0,0,300,300))
region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
height = int(request.args.get('h', str(best_frame.shape[0])))
width = int(height*best_frame.shape[1]/best_frame.shape[0])
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode('.jpg', best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
response = make_response(jpg.tobytes())
response.headers['Content-Type'] = 'image/jpg'
return response
else:
return "Camera named {} not found".format(camera_name), 404
@bp.route('/<camera_name>')
def mjpeg_feed(camera_name):
fps = int(request.args.get('fps', '3'))
height = int(request.args.get('h', '360'))
draw_options = {
'bounding_boxes': request.args.get('bbox', type=int),
'timestamp': request.args.get('timestamp', type=int),
'zones': request.args.get('zones', type=int),
'mask': request.args.get('mask', type=int),
'motion_boxes': request.args.get('motion', type=int),
'regions': request.args.get('regions', type=int),
}
if camera_name in current_app.frigate_config.cameras:
# return a multipart response
return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height, draw_options),
mimetype='multipart/x-mixed-replace; boundary=frame')
else:
return "Camera named {} not found".format(camera_name), 404
@bp.route('/<camera_name>/latest.jpg')
def latest_frame(camera_name):
draw_options = {
'bounding_boxes': request.args.get('bbox', type=int),
'timestamp': request.args.get('timestamp', type=int),
'zones': request.args.get('zones', type=int),
'mask': request.args.get('mask', type=int),
'motion_boxes': request.args.get('motion', type=int),
'regions': request.args.get('regions', type=int),
}
if camera_name in current_app.frigate_config.cameras:
# max out at specified FPS
frame = current_app.detected_frames_processor.get_current_frame(camera_name, draw_options)
if frame is None:
frame = np.zeros((720,1280,3), np.uint8)
height = int(request.args.get('h', str(frame.shape[0])))
width = int(height*frame.shape[1]/frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
response = make_response(jpg.tobytes())
response.headers['Content-Type'] = 'image/jpg'
return response
else:
return "Camera named {} not found".format(camera_name), 404
@bp.route('/recordings/<camera_name>')
def list_recordings(camera_name):
levels = 3
outFiles = []
outDirs = []
for subdir, dirs, files, curLevel in dirwalklevel(RECORD_DIR, level=levels):
if curLevel == levels:
for file in files:
if pathBelongsToCamera(camera_name, subdir, file):
outFiles.append(sanitiseRecordingPath(subdir, file))
for dir in dirs:
if pathBelongsToCamera(camera_name, subdir, dir):
outDirs.append(sanitiseRecordingPath(subdir, dir))
return jsonify({
"files": outFiles,
"directories": outDirs
})
@bp.route('/recordings/<year_month>/<day>/<hour>/<camera_name>')
def list_recording_files(year_month, day, hour, camera_name):
outFiles = []
try:
levels = 1
pathParts = [RECORD_DIR, year_month, day, hour]
recPath = os.path.join(*pathParts)
for subdir, dirs, files, curLevel in dirwalklevel(recPath, level=levels):
if curLevel == levels:
for file in files:
if pathBelongsToCamera(camera_name, subdir, file):
outFiles.append(sanitiseRecordingPath(subdir, file))
except Exception as e:
print(e)
return jsonify({
"files": outFiles
})
def dirwalklevel(some_dir, level=1):
some_dir = some_dir.rstrip(os.path.sep)
assert os.path.isdir(some_dir)
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir):
num_sep_this = root.count(os.path.sep)
curLevel = (num_sep_this - num_sep)
yield root, dirs, files, curLevel
if num_sep + level <= num_sep_this:
del dirs[:]
def pathBelongsToCamera(camera_name, subdir, file):
return camera_name == "all" or camera_name in file or camera_name in subdir
def sanitiseRecordingPath(subdir, file):
return os.path.join(subdir, file).replace(RECORD_DIR, "")
def imagestream(detected_frames_processor, camera_name, fps, height, draw_options):
while True:
# max out at specified FPS
gevent.sleep(1/fps)
frame = detected_frames_processor.get_current_frame(camera_name, draw_options)
if frame is None:
frame = np.zeros((height,int(height*16/9),3), np.uint8)
width = int(height*frame.shape[1]/frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
ret, jpg = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
@ws.route('/ws')
def echo_socket(socket):
current_app.mqtt_backend.register(socket)
while not socket.closed:
# Sleep to prevent *constant* context-switches.
gevent.sleep(0.1)
message = socket.receive()
if message:
current_app.mqtt_backend.publish(message)
|
the-stack_0_5422 | # pylint: skip-file
def main():
'''
ansible git module for committing
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str', choices=['present']),
msg=dict(default=None, required=True, type='str'),
path=dict(default=None, required=True, type='str'),
author=dict(default=None, required=False, type='str'),
commit_files=dict(default=None, required=False, type='list'),
),
supports_check_mode=False,
)
git = GitCommit(module.params['msg'],
module.params['path'],
module.params['commit_files'],
module.params['author'],
)
state = module.params['state']
if state == 'present':
results = git.commit()
if results['returncode'] != 0:
module.fail_json(msg=results)
if results.has_key('no_commits'):
module.exit_json(changed=False, results=results, state="present")
module.exit_json(changed=True, results=results, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
|
the-stack_0_5423 | from __future__ import absolute_import
from fobi.base import form_element_plugin_registry
from .base import ContentTextPlugin
__title__ = 'fobi.contrib.plugins.form_elements.content.content_text.' \
'fobi_form_elements'
__author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('ContentTextPlugin',)
form_element_plugin_registry.register(ContentTextPlugin)
|
the-stack_0_5424 | import spacy
from spacy.lemmatizer import Lemmatizer
from spacy.lang.en import LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES
import random
import swda
import string
class feature_extractor(object):
def __init__(self):
self.nlp = spacy.load('en_core_web_sm', disable = ['ner', 'textcat'])
self.lemmatizer = spacy.lemmatizer.Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES)
def find_features(self, utterance, n = 9, is_previous_speaker = None, previous_da = None):
features = {}
doc = self.nlp(utterance)
lemmatized = [self.lemmatizer(token.text, token.pos_)[0] for token in doc]
features['is_question'] = ('?' in [token.text for token in doc]) #question mark
for i in range(n): #first n tokens
try:
features['word'+str(i+1)] = lemmatized[i]
except IndexError:
features['word'+str(i+1)] = ''
for i in range(n): #first n pos-tags
try:
features['word'+str(i+1)+'_pos_tag'] = doc[i].pos_
except IndexError:
features['word'+str(i+1)+'_pos_tag'] = ''
if is_previous_speaker != None: #previous speaker
features['is_previous_speaker'] = is_previous_speaker
else:
features['is_previous_speaker'] = ''
if previous_da: #previous dialogue act
features['previous_da'] = previous_da
else:
features['previous_da'] = ''
try: #predicate verb
predicate, predicate_pos = [(token.text, token.pos_) for token in doc if token.dep_ == 'ROOT'][0]
features['predicate'] = self.lemmatizer(predicate, predicate_pos)[0]
except IndexError:
features['predicate'] = ''
try: #subject
subject, subject_pos = [(token.text, token.pos_) for token in doc if (token.dep_ == 'nsubj') or (token.dep_ == 'nsubjpass') or (token.dep_ == 'csubj')][0]
features['subject'] = self.lemmatizer(subject, subject_pos)[0]
except IndexError:
features['subject'] = ''
try: #object
object, object_pos = [(token.text, token.pos_) for token in doc if (token.dep_ == 'iobj') or (token.dep_ == 'obj') or (token.dep_ == 'dobj') or (token.dep_ == 'pobj')][0]
features['object'] = self.lemmatizer(object, object_pos)[0]
except IndexError:
features['object'] = ''
return features
def create_featuresets(self, max_transcripts = 20, n = 9):
corpus = swda.CorpusReader('swda')
utterances = []
i = 1
for trans in corpus.iter_transcripts(display_progress = True):
if i > max_transcripts:
break
previous_tag = None
previous_caller = None
for utt in trans.utterances:
if utt.act_tag not in ('x', 't3', '%', '+'): #discard non-verbal, uninterpretable and third-party talk da:s as well as continued sentences
try:
previous_tag = utterances[-1][1]
previous_caller = utterances[-1][2]
except IndexError:
pass
utterances.append((self.clean_utterance(utt.text), utt.act_tag, utt.caller, previous_tag, previous_caller))
i += 1
print('\nProcessing {} utterances... this will take some time.'.format(str(len(utterances))))
random.shuffle(utterances)
featuresets = [(self.find_features(text, n, is_previous_speaker = (caller == previous_caller), previous_da = previous_tag), tag) for (text, tag, caller, previous_tag, previous_caller) in utterances]
return featuresets
def clean_utterance(self, utterance):
ttable = dict((ord(char), None) for char in string.punctuation)
for key in '?!.,':
if ord(key) in ttable: del ttable[ord(key)]
for key in 'CDEFG':
ttable[ord(key)] = None
utterance = (utterance.translate(ttable)).replace(' ', ' ')
if utterance[0] == ' ':
utterance = utterance[1:]
return utterance
|
the-stack_0_5425 | import unittest
from unittest.mock import Mock
from rastervision.augmentor import (Augmentor, AugmentorConfig,
AugmentorConfigBuilder)
from rastervision.protos.augmentor_pb2 import AugmentorConfig as AugmentorConfigMsg
from tests.mock import SupressDeepCopyMixin
MOCK_AUGMENTOR = 'MOCK_AUGMENTOR'
class MockAugmentor(Augmentor):
def __init__(self):
self.mock = Mock()
self.mock.process.return_value = None
def process(self, training_data, tmp_dir):
result = self.mock.process(training_data, tmp_dir)
if result is None:
return training_data
else:
return result
class MockAugmentorConfig(SupressDeepCopyMixin, AugmentorConfig):
def __init__(self):
super().__init__(MOCK_AUGMENTOR)
self.mock = Mock()
self.mock.to_proto.return_value = None
self.mock.create_augmentor.return_value = None
self.mock.update_for_command.return_value = None
def to_proto(self):
result = self.mock.to_proto()
if result is None:
return AugmentorConfigMsg(
augmentor_type=self.augmentor_type, custom_config={})
else:
return result
def create_augmentor(self):
result = self.mock.create_augmentor()
if result is None:
return MockAugmentor()
else:
return result
def update_for_command(self, command_type, experiment_config,
context=None):
super().update_for_command(command_type, experiment_config, context)
self.mock.update_for_command(command_type, experiment_config, context)
def report_io(self, command_type, io_def):
self.mock.report_io(command_type, io_def)
class MockAugmentorConfigBuilder(SupressDeepCopyMixin, AugmentorConfigBuilder):
def __init__(self, prev=None):
super().__init__(MockAugmentorConfig, {})
self.mock = Mock()
self.mock.from_proto = None
def from_proto(self, msg):
result = self.mock.from_proto(msg)
if result is None:
return self
else:
return result
if __name__ == '__main__':
unittest.main()
|
the-stack_0_5427 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django import http
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from horizon import messages
from horizon import middleware
from horizon.test import helpers as test
class MessageTests(test.TestCase):
def test_middleware_header(self):
req = self.request
string = "Giant ants are attacking San Francisco!"
expected = ["error", force_text(string), ""]
self.assertIn("async_messages", req.horizon)
self.assertItemsEqual(req.horizon['async_messages'], [])
req.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
messages.error(req, string)
self.assertItemsEqual(req.horizon['async_messages'], [expected])
res = http.HttpResponse()
res = middleware.HorizonMiddleware('dummy_get_response') \
._process_response(req, res)
self.assertEqual(json.dumps([expected]),
res['X-Horizon-Messages'])
def test_error_message(self):
req = self.request
string = mark_safe("We are now safe from ants! Go <a>here</a>!")
expected = ["error", force_text(string), " safe"]
self.assertIn("async_messages", req.horizon)
self.assertItemsEqual(req.horizon['async_messages'], [])
req.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
messages.error(req, string)
self.assertItemsEqual(req.horizon['async_messages'], [expected])
res = http.HttpResponse()
res = middleware.HorizonMiddleware('dummy_get_response') \
._process_response(req, res)
self.assertEqual(json.dumps([expected]),
res['X-Horizon-Messages'])
|
the-stack_0_5428 | import logging
import pytest
from dvc.exceptions import (
NoMetricsFoundError,
NoMetricsParsedError,
OverlappingOutputPathsError,
)
from dvc.path_info import PathInfo
from dvc.utils.fs import remove
from dvc.utils.serialize import dump_yaml, modify_yaml
from tests.func.metrics.utils import _write_json
@pytest.mark.parametrize(
"diff, metric_value",
(
(
lambda repo, target, rev: repo.metrics.diff(
targets=[target], a_rev=rev
),
{"m": 1},
),
(
lambda repo, target, rev: repo.plots.diff(
targets=[target], revs=[rev]
),
[{"m": 1}, {"m": 2}],
),
),
)
def test_diff_no_file_on_target_rev(
tmp_dir, scm, dvc, caplog, diff, metric_value
):
with tmp_dir.branch("new_branch", new=True):
_write_json(tmp_dir, metric_value, "metric.json")
with caplog.at_level(logging.WARNING, "dvc"):
diff(dvc, "metric.json", "master")
assert "'metric.json' was not found at: 'master'." in caplog.text
@pytest.mark.parametrize(
"show, malformed_metric",
(
(lambda repo, target: repo.metrics.show(targets=[target]), '{"m": 1'),
(
lambda repo, target: repo.plots.show(targets=[target]),
'[{"m": 1}, {"m": 2}',
),
),
)
def test_show_malformed_metric(
tmp_dir, scm, dvc, caplog, show, malformed_metric
):
tmp_dir.gen("metric.json", malformed_metric)
with pytest.raises(NoMetricsParsedError):
show(dvc, "metric.json")
@pytest.mark.parametrize(
"show",
(lambda repo: repo.metrics.show(), lambda repo: repo.plots.show(),),
)
def test_show_no_metrics_files(tmp_dir, dvc, caplog, show):
with pytest.raises(NoMetricsFoundError):
show(dvc)
@pytest.mark.parametrize("clear_before_run", [True, False])
@pytest.mark.parametrize("typ", ["metrics", "plots"])
def test_metrics_show_overlap(
tmp_dir, dvc, run_copy_metrics, clear_before_run, typ
):
data_dir = PathInfo("data")
(tmp_dir / data_dir).mkdir()
outs = {typ: [str(data_dir / "m1.yaml")]}
dump_yaml(data_dir / "m1_temp.yaml", {"a": {"b": {"c": 2, "d": 1}}})
run_copy_metrics(
str(data_dir / "m1_temp.yaml"),
str(data_dir / "m1.yaml"),
single_stage=False,
commit=f"add m1 {typ}",
name="cp-m1",
**outs,
)
with modify_yaml("dvc.yaml") as d:
# trying to make an output overlaps error
d["stages"]["corrupted-stage"] = {
"cmd": "mkdir data",
"outs": ["data"],
}
# running by clearing and not clearing stuffs
# so as it works even for optimized cases
if clear_before_run:
remove(data_dir)
remove(dvc.cache.local.cache_dir)
dvc._reset()
show = dvc.metrics.show if typ == "metrics" else dvc.plots.show
with pytest.raises(OverlappingOutputPathsError):
show()
|
the-stack_0_5429 | """Tests for lr_scheduler.py"""
from distutils.version import LooseVersion
from unittest.mock import Mock
import numpy as np
import pytest
import torch
from sklearn.base import clone
from torch.optim import SGD
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import ExponentialLR
from torch.optim.lr_scheduler import LambdaLR
from torch.optim.lr_scheduler import MultiStepLR
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.lr_scheduler import StepLR
from torch.optim.lr_scheduler import CyclicLR as TorchCyclicLR
from skorch import NeuralNetClassifier
from skorch.callbacks.lr_scheduler import WarmRestartLR, LRScheduler
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
class TestLRCallbacks:
@pytest.mark.parametrize('policy', [StepLR, 'StepLR'])
def test_simulate_lrs_epoch_step(self, policy):
lr_sch = LRScheduler(policy, step_size=2)
lrs = lr_sch.simulate(6, 1)
expected = np.array([1.0, 1.0, 0.1, 0.1, 0.01, 0.01])
assert np.allclose(expected, lrs)
@pytest.mark.parametrize('policy', [TorchCyclicLR])
def test_simulate_lrs_batch_step(self, policy):
lr_sch = LRScheduler(
policy, base_lr=1, max_lr=5, step_size_up=4, step_every='batch')
lrs = lr_sch.simulate(11, 1)
expected = np.array([1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3])
assert np.allclose(expected, lrs)
@pytest.mark.parametrize('policy, instance, kwargs', [
('LambdaLR', LambdaLR, {'lr_lambda': (lambda x: 1e-1)}),
('StepLR', StepLR, {'step_size': 30}),
('MultiStepLR', MultiStepLR, {'milestones': [30, 90]}),
('ExponentialLR', ExponentialLR, {'gamma': 0.1}),
('ReduceLROnPlateau', ReduceLROnPlateau, {}),
('WarmRestartLR', WarmRestartLR, {}),
('CosineAnnealingLR', CosineAnnealingLR, {'T_max': 5, 'eta_min': 1e-3}),
(WarmRestartLR, WarmRestartLR, {}),
])
def test_lr_callback_init_policies(
self,
classifier_module,
classifier_data,
policy,
instance,
kwargs,
):
X, y = classifier_data
lr_policy = LRScheduler(policy, **kwargs)
net = NeuralNetClassifier(
classifier_module, max_epochs=2, callbacks=[lr_policy]
)
net.fit(X, y)
assert any(list(map(
lambda x: isinstance(
getattr(x[1], 'lr_scheduler_', None), instance),
net.callbacks_
)))
@pytest.mark.parametrize('policy, kwargs', [
('LambdaLR', {'lr_lambda': (lambda x: 1e-1)}),
('StepLR', {'step_size': 30}),
('MultiStepLR', {'milestones': [30, 90]}),
('ExponentialLR', {'gamma': 0.1}),
('ReduceLROnPlateau', {}),
('WarmRestartLR', {}),
('CosineAnnealingLR', {'T_max': 3}),
])
def test_lr_callback_steps_correctly(
self,
classifier_module,
classifier_data,
policy,
kwargs,
):
max_epochs = 2
X, y = classifier_data
lr_policy = LRScheduler(policy, **kwargs)
net = NeuralNetClassifier(
classifier_module(),
max_epochs=max_epochs,
batch_size=16,
callbacks=[lr_policy],
)
net.fit(X, y)
# pylint: disable=protected-access
assert lr_policy.lr_scheduler_.last_epoch == max_epochs
@pytest.mark.parametrize('policy, kwargs', [
(TorchCyclicLR, {'base_lr': 1e-3, 'max_lr': 6e-3, 'step_every': 'batch'}),
])
def test_lr_callback_batch_steps_correctly(
self,
classifier_module,
classifier_data,
policy,
kwargs,
):
batch_size = 100
max_epochs = 2
X, y = classifier_data
num_examples = len(X)
lr_policy = LRScheduler(policy, **kwargs)
net = NeuralNetClassifier(classifier_module(), max_epochs=max_epochs,
batch_size=batch_size, callbacks=[lr_policy])
net.fit(X, y)
total_iterations_per_epoch = num_examples / batch_size
# 80% of sample used for training by default
total_training_iterations_per_epoch = 0.8 * total_iterations_per_epoch
expected = int(total_training_iterations_per_epoch * max_epochs)
# pylint: disable=protected-access
assert lr_policy.batch_idx_ == expected
@pytest.mark.parametrize('policy, kwargs', [
(TorchCyclicLR, {'base_lr': 1e-3, 'max_lr': 6e-3, 'step_every': 'batch'}),
])
def test_lr_callback_batch_steps_correctly_fallback(
self,
classifier_module,
classifier_data,
policy,
kwargs,
):
batch_size = 100
max_epochs = 2
X, y = classifier_data
num_examples = len(X)
lr_policy = LRScheduler(policy, **kwargs)
net = NeuralNetClassifier(classifier_module(), max_epochs=max_epochs,
batch_size=batch_size, callbacks=[lr_policy])
net.fit(X, y)
# Removes batch count information in the last two epochs
for i in range(max_epochs):
del net.history[i]["train_batch_count"]
del net.history[i]["valid_batch_count"]
net.partial_fit(X, y)
total_iterations_per_epoch = num_examples / batch_size
# batch_counts were removed thus the total iterations of the last
# epoch is used
total_iterations_fit_run = total_iterations_per_epoch * max_epochs
# 80% of sample used for training by default
total_iterations_partial_fit_run = (
0.8 * total_iterations_per_epoch * max_epochs)
# called fit AND partial_fit
total_iterations = (total_iterations_fit_run +
total_iterations_partial_fit_run)
# Failback to using both valid and training batches counts on
# second run
expected = int(total_iterations)
# pylint: disable=protected-access
assert lr_policy.batch_idx_ == expected
def test_lr_scheduler_cloneable(self):
# reproduces bug #271
scheduler = LRScheduler(WarmRestartLR, base_lr=123)
clone(scheduler) # does not raise
def test_lr_scheduler_set_params(self, classifier_module, classifier_data):
scheduler = LRScheduler(
TorchCyclicLR, base_lr=123, max_lr=999, step_every='batch')
net = NeuralNetClassifier(
classifier_module,
max_epochs=0,
callbacks=[('scheduler', scheduler)],
)
net.set_params(callbacks__scheduler__base_lr=456)
net.fit(*classifier_data) # we need to trigger on_train_begin
assert net.callbacks[0][1].lr_scheduler_.base_lrs[0] == 456
@pytest.mark.parametrize('policy,kwargs', [
(StepLR, {'gamma': 0.9, 'step_size': 1})
])
@pytest.mark.skipif(
LooseVersion(torch.__version__) < '1.4',
reason="Feature isn't supported with this torch version."
)
def test_lr_scheduler_record_epoch_step(self,
classifier_module,
classifier_data,
policy,
kwargs):
epochs = 3
scheduler = LRScheduler(policy, **kwargs)
lrs = scheduler.simulate(epochs, initial_lr=123.)
net = NeuralNetClassifier(
classifier_module,
max_epochs=epochs,
lr=123.,
callbacks=[('scheduler', scheduler)]
)
net.fit(*classifier_data)
assert np.all(net.history[:, 'event_lr'] == lrs)
@pytest.mark.skipif(
LooseVersion(torch.__version__) < '1.4',
reason="Feature isn't supported with this torch version."
)
def test_lr_scheduler_record_batch_step(self, classifier_module, classifier_data):
X, y = classifier_data
batch_size = 128
scheduler = LRScheduler(
TorchCyclicLR,
base_lr=1,
max_lr=5,
step_size_up=4,
step_every='batch'
)
net = NeuralNetClassifier(
classifier_module,
max_epochs=1,
lr=123.,
batch_size=batch_size,
callbacks=[('scheduler', scheduler)]
)
net.fit(X, y)
new_lrs = scheduler.simulate(
net.history[-1, 'train_batch_count'],
initial_lr=123.,
)
assert np.all(net.history[-1, 'batches', :, 'event_lr'] == new_lrs)
def test_cyclic_lr_with_epoch_step_warning(self,
classifier_module,
classifier_data):
msg = ("The LRScheduler now makes a step every epoch by default. "
"To have the cyclic lr scheduler update "
"every batch set step_every='batch'")
with pytest.warns(FutureWarning, match=msg) as record:
scheduler = LRScheduler(
TorchCyclicLR, base_lr=123, max_lr=999)
net = NeuralNetClassifier(
classifier_module,
max_epochs=0,
callbacks=[('scheduler', scheduler)],
)
net.initialize()
assert len(record) == 1
class TestReduceLROnPlateau:
def get_net_with_mock(
self, classifier_data, classifier_module, monitor='train_loss'):
"""Returns a net with a mocked lr policy that allows to check what
it's step method was called with.
"""
X, y = classifier_data
net = NeuralNetClassifier(
classifier_module,
callbacks=[
('scheduler', LRScheduler(ReduceLROnPlateau, monitor=monitor)),
],
max_epochs=1,
).fit(X, y)
# mock the policy
policy = dict(net.callbacks_)['scheduler'].lr_scheduler_
mock_step = Mock(side_effect=policy.step)
policy.step = mock_step
# make sure that mocked policy is set
scheduler = dict(net.callbacks_)['scheduler']
# pylint: disable=protected-access
scheduler._get_scheduler = lambda *args, **kwargs: policy
net.partial_fit(X, y)
return net, mock_step
@pytest.mark.parametrize('monitor', ['train_loss', 'valid_loss', 'epoch'])
def test_reduce_lr_monitor_with_string(
self, monitor, classifier_data, classifier_module):
# step should be called with the 2nd to last value from that
# history entry
net, mock_step = self.get_net_with_mock(
classifier_data, classifier_module, monitor=monitor)
score = mock_step.call_args_list[0][0][0]
np.isclose(score, net.history[-2, monitor])
def test_reduce_lr_monitor_with_callable(
self, classifier_data, classifier_module):
# step should always be called with the return value from the
# callable, 55
_, mock_step = self.get_net_with_mock(
classifier_data, classifier_module, monitor=lambda x: 55)
score = mock_step.call_args_list[0][0][0]
assert score == 55
@pytest.mark.parametrize('mode,score', [
('min', np.inf),
('max', -np.inf)
])
def test_reduce_lr_monitor_max(
self, classifier_data, classifier_module, mode, score):
X, y = classifier_data
net = NeuralNetClassifier(
classifier_module,
callbacks=[
('scheduler', LRScheduler(
ReduceLROnPlateau, monitor='train_loss', mode=mode)),
],
max_epochs=1,
)
net.fit(X, y)
policy = dict(net.callbacks_)['scheduler'].lr_scheduler_
assert policy.best == score
class TestWarmRestartLR():
def assert_lr_correct(
self, optimizer, targets, epochs, min_lr, max_lr, base_period,
period_mult):
"""Test that learning rate was set correctly."""
targets = [targets] if len(optimizer.param_groups) == 1 else targets
scheduler = WarmRestartLR(
optimizer, min_lr, max_lr, base_period, period_mult
)
for epoch in range(epochs):
optimizer.step() # suppress warning about .step call order
scheduler.step(epoch)
for param_group, target in zip(optimizer.param_groups, targets):
assert param_group['lr'] == pytest.approx(target[epoch])
def _single_period_targets(self, epochs, min_lr, max_lr, period):
targets = 1 + np.cos(np.arange(epochs) * np.pi / period)
targets = min_lr + 0.5 * (max_lr - min_lr) * targets
return targets.tolist()
# pylint: disable=missing-docstring
def _multi_period_targets(
self, epochs, min_lr, max_lr, base_period, period_mult):
remaining_epochs = epochs
current_period = base_period
targets = list()
while remaining_epochs > 0:
period_epochs = min(remaining_epochs, current_period + 1)
remaining_epochs -= period_epochs
targets += self._single_period_targets(
period_epochs, min_lr, max_lr, current_period
)
current_period = current_period * period_mult
return targets
@pytest.fixture()
def init_optimizer(self, classifier_module):
return SGD(classifier_module().parameters(), lr=0.05)
def test_raise_incompatible_len_on_min_lr_err(self, init_optimizer):
with pytest.raises(ValueError) as excinfo:
WarmRestartLR(init_optimizer, min_lr=[1e-1, 1e-2])
assert 'min_lr' in str(excinfo.value)
def test_raise_incompatible_len_on_max_lr_err(self, init_optimizer):
with pytest.raises(ValueError) as excinfo:
WarmRestartLR(init_optimizer, max_lr=[1e-1, 1e-2])
assert 'max_lr' in str(excinfo.value)
def test_single_period(self, init_optimizer):
optimizer = init_optimizer
epochs = 3
min_lr = 5e-5
max_lr = 5e-2
base_period = 3
period_mult = 1
targets = self._single_period_targets(
epochs, min_lr, max_lr, base_period)
self.assert_lr_correct(
optimizer,
targets,
epochs,
min_lr,
max_lr,
base_period,
period_mult
)
def test_multi_period_with_restart(self, init_optimizer):
optimizer = init_optimizer
epochs = 9
min_lr = 5e-5
max_lr = 5e-2
base_period = 2
period_mult = 2
targets = self._multi_period_targets(
epochs, min_lr, max_lr, base_period, period_mult
)
self.assert_lr_correct(
optimizer,
targets,
epochs,
min_lr,
max_lr,
base_period,
period_mult
)
def test_restarts_with_multiple_groups(self, classifier_module):
classifier = classifier_module()
optimizer = SGD(
[
{'params': classifier.sequential[0].parameters(), 'lr': 1e-3},
{'params': classifier.sequential[1].parameters(), 'lr': 1e-2},
{'params': classifier.sequential[2].parameters(), 'lr': 1e-1},
]
)
epochs = 9
min_lr_group = [1e-5, 1e-4, 1e-3]
max_lr_group = [1e-3, 1e-2, 1e-1]
base_period = 2
period_mult = 2
targets = list()
for min_lr, max_lr in zip(min_lr_group, max_lr_group):
targets.append(
self._multi_period_targets(
epochs, min_lr, max_lr, base_period, period_mult
)
)
self.assert_lr_correct(
optimizer,
targets,
epochs,
min_lr_group,
max_lr_group,
base_period,
period_mult
)
|
the-stack_0_5430 | import os
import sys
import re
def terminal(cmd):
return os.popen(cmd).read()
def run(clauses, literals, num_vars):
terminal(f'python3 gen_random_SAT.py {clauses} {literals} {num_vars}')
output = terminal('./kissat_gb/build/kissat random_SAT.cnf | grep process-time:')
match = re.match('c process-time:\s+[^\s]+\s+([0-9\.]+)', output)
t1 = float(match.group(1))
t2 = 1000
return (t1, t2)
def header():
line = 'Clauses,'
line += 'Literals per clause,'
line += 'Variables,'
line += 'KISSAT_GB Time (s),'
line += 'BRUTE_FORCE Time (s),'
return line
def log(clauses, literals, num_vars, t1, t2):
line = str(clauses) + ','
line += str(literals) + ','
line += str(num_vars) + ','
line += str(t1) + ','
line += str(t2) + ','
return line
output = open('experiment_output.csv', 'w')
output.write(header() + '\n')
total_clauses = 1000
total_literals = 1000
total_variables = 1000
step = 100
count = 0
num_samples = (total_clauses / step) * (total_literals / step) * (total_variables / step)
for clauses in range(step, total_clauses, step):
for literals in range(step, total_literals, step):
for num_vars in range(step, total_variables, step):
if(count % 10 == 0): print(f'Progress: {count / num_samples}')
count += 1
(t1, t2) = run(clauses, literals, num_vars)
output.write(log(clauses, literals, num_vars, t1, t2) + '\n')
output.close()
print('SUCCESS!') |
the-stack_0_5431 | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "apros.com",
"name": "V2X_Solution",
},
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
|
the-stack_0_5439 | from warnings import warn
from django.conf import settings
from wagtail.utils.deprecation import RemovedInWagtail50Warning
def get_admin_base_url(context=None):
"""
Gets the base URL for the wagtail admin site. This is set in `settings.WAGTAILADMIN_BASE_URL`,
which was previously `settings.BASE_URL`.
If setting is omitted and this is called in a request context, falls back to
`request.site.root_url` or next the host_name of the request.
"""
admin_base_url = getattr(settings, "WAGTAILADMIN_BASE_URL", None)
if admin_base_url is None and hasattr(settings, "BASE_URL"):
warn(
"settings.BASE_URL has been renamed to settings.WAGTAILADMIN_BASE_URL",
category=RemovedInWagtail50Warning,
)
admin_base_url = settings.BASE_URL
if admin_base_url is None and context is not None:
request = context["request"]
admin_base_url = getattr(request.site, "root_url", None)
if admin_base_url is None:
admin_base_url = request.get_host()
secure_prefix = "http"
if request.is_secure():
secure_prefix = "https"
admin_base_url = secure_prefix + "://" + admin_base_url
return admin_base_url
|
the-stack_0_5441 | # -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from keras.utils import multi_gpu_model
class YOLO(object):
_defaults = {
"model_path": 'model_data/trained_weights_stage_1.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/AND_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"gpu_num" : 1,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=(255))
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=None)
draw.text(text_origin, label, fill=(0), font=font)
del draw
end = timer()
print(end - start)
return image
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
|
the-stack_0_5442 | from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Type,
)
from requests.exceptions import (
ConnectionError,
HTTPError,
Timeout,
TooManyRedirects,
)
from web3.types import (
RPCEndpoint,
RPCResponse,
)
if TYPE_CHECKING:
from web3 import Web3 # noqa: F401
whitelist = [
'admin',
'shh',
'miner',
'net',
'txpool'
'testing',
'evm',
'eth_protocolVersion',
'eth_syncing',
'eth_coinbase',
'eth_mining',
'eth_hashrate',
'eth_gasPrice',
'eth_accounts',
'eth_blockNumber',
'eth_getBalance',
'eth_getStorageAt',
'eth_getProof',
'eth_getCode',
'eth_getBlockByNumber',
'eth_getBlockByHash',
'eth_getBlockTransactionCountByNumber',
'eth_getBlockTransactionCountByHash',
'eth_getUncleCountByBlockNumber',
'eth_getUncleCountByBlockHash',
'eth_getTransactionByHash',
'eth_getTransactionByBlockHashAndIndex',
'eth_getTransactionByBlockNumberAndIndex',
'eth_getTransactionReceipt',
'eth_getTransactionCount',
'eth_call',
'eth_estimateGas',
'eth_newBlockFilter',
'eth_newPendingTransactionFilter',
'eth_newFilter',
'eth_getFilterChanges',
'eth_getFilterLogs',
'eth_getLogs',
'eth_uninstallFilter',
'eth_getCompilers',
'eth_getWork',
'eth_sign',
'eth_signTypedData',
'eth_sendRawTransaction',
'personal_importRawKey',
'personal_newAccount',
'personal_listAccounts',
'personal_lockAccount',
'personal_unlockAccount',
'personal_ecRecover',
'personal_sign',
'personal_signTypedData',
]
def check_if_retry_on_failure(method: RPCEndpoint) -> bool:
root = method.split('_')[0]
if root in whitelist:
return True
elif method in whitelist:
return True
else:
return False
def exception_retry_middleware(
make_request: Callable[[RPCEndpoint, Any], RPCResponse],
web3: "Web3",
errors: Collection[Type[BaseException]],
retries: int=5,
) -> Callable[[RPCEndpoint, Any], RPCResponse]:
"""
Creates middleware that retries failed HTTP requests. Is a default
middleware for HTTPProvider.
"""
def middleware(method: RPCEndpoint, params: Any) -> RPCResponse:
if check_if_retry_on_failure(method):
for i in range(retries):
try:
return make_request(method, params)
# https://github.com/python/mypy/issues/5349
except errors: # type: ignore
if i < retries - 1:
continue
else:
raise
return None
else:
return make_request(method, params)
return middleware
def http_retry_request_middleware(
make_request: Callable[[RPCEndpoint, Any], Any], web3: "Web3"
) -> Callable[[RPCEndpoint, Any], Any]:
return exception_retry_middleware(
make_request,
web3,
(ConnectionError, HTTPError, Timeout, TooManyRedirects)
)
|
the-stack_0_5443 | def hexal_to_decimal(s):
""" s in form 0X< hexal digits>
returns int in decimal"""
s = s[2:]
s = s[::-1]
s = list(s)
for i, e in enumerate(s):
if s[i] == "A": s[i] = "10"
if s[i] == "B": s[i] = "11"
if s[i] == "C": s[i] = "12"
if s[i] == "D": s[i] = "13"
if s[i] == "E": s[i] = "14"
if s[i] == "F": s[i] = "15"
sum = 0
for i, e in enumerate(s):
sum += (16 ** i) * int(e)
return sum
def octal_to_decimal(s):
"""s in form 0X<octal digits>
returns int in decimal"""
s = s[1:]
s = s[::-1] # reverse
sum = 0
for i, e in enumerate(s):
sum += (8 ** i) * int(e)
return sum
print(hexal_to_decimal("0XCC"))
print(octal_to_decimal("010"))
|
the-stack_0_5447 | from six import string_types
import numpy as np
import os
import h5py
from bmtk.simulator.core.io_tools import io
from .simulation_config import SimulationConfig
from bmtk.simulator.core.node_sets import NodeSet, NodeSetAll
from bmtk.simulator.core import sonata_reader
class SimNetwork(object):
def __init__(self):
self._components = {}
self._io = io
self._node_adaptors = {}
self._edge_adaptors = {}
self._register_adaptors()
self._node_populations = {}
self._node_sets = {}
self._edge_populations = []
self._gap_juncs = {}
@property
def io(self):
return self._io
@property
def node_populations(self):
return self._node_populations.values()
@property
def recurrent_edges(self):
return [ep for ep in self._edge_populations if ep.recurrent_connections]
@property
def py_function_caches(self):
return None
def _register_adaptors(self):
self._node_adaptors['sonata'] = sonata_reader.NodeAdaptor
self._edge_adaptors['sonata'] = sonata_reader.EdgeAdaptor
def get_node_adaptor(self, name):
return self._node_adaptors[name]
def get_edge_adaptor(self, name):
return self._edge_adaptors[name]
def add_component(self, name, path):
self._components[name] = path
def get_component(self, name):
if name not in self._components:
self.io.log_exception('No network component set with name {}'.format(name))
else:
return self._components[name]
def has_component(self, name):
return name in self._components
def get_node_population(self, name):
return self._node_populations[name]
def get_node_populations(self):
return self._node_populations.values()
def add_node_set(self, name, node_set):
self._node_sets[name] = node_set
def get_node_set(self, node_set):
if isinstance(node_set, string_types) and node_set in self._node_sets:
return self._node_sets[node_set]
elif isinstance(node_set, (dict, list)):
return NodeSet(node_set, self)
else:
self.io.log_exception('Unable to load or find node_set "{}"'.format(node_set))
def add_nodes(self, node_population):
pop_name = node_population.name
if pop_name in self._node_populations:
# Make sure their aren't any collisions
self.io.log_exception('There are multiple node populations with name {}.'.format(pop_name))
node_population.initialize(self)
self._node_populations[pop_name] = node_population
if node_population.mixed_nodes:
# We'll allow a population to have virtual and non-virtual nodes but it is not ideal
self.io.log_warning(('Node population {} contains both virtual and non-virtual nodes which can cause ' +
'memory and build-time inefficency. Consider separating virtual nodes into their ' +
'own population').format(pop_name))
# Used in inputs/reports when needed to get all gids belonging to a node population
self._node_sets[pop_name] = NodeSet({'population': pop_name}, self)
def node_properties(self, populations=None):
if populations is None:
selected_pops = self.node_populations
elif isinstance(populations, string_types):
selected_pops = [pop for pop in self.node_populations if pop.name == populations]
else:
selected_pops = [pop for pop in self.node_populations if pop.name in populations]
all_nodes_df = None
for node_pop in selected_pops:
node_pop_df = node_pop.nodes_df()
if 'population' not in node_pop_df:
node_pop_df['population'] = node_pop.name
node_pop_df = node_pop_df.set_index(['population', node_pop_df.index.astype(dtype=np.uint64)])
if all_nodes_df is None:
all_nodes_df = node_pop_df
else:
all_nodes_df = all_nodes_df.append(node_pop_df)
return all_nodes_df
def get_node_groups(self, populations=None):
if populations is None:
selected_pops = self.node_populations
elif isinstance(populations, string_types):
selected_pops = [pop for pop in self.node_populations if pop.name == populations]
else:
selected_pops = [pop for pop in self.node_populations if pop.name in populations]
all_nodes_df = None
for node_pop in selected_pops:
node_pop_df = node_pop.nodes_df(index_by_id=False)
if 'population' not in node_pop_df:
node_pop_df['population'] = node_pop.name
if all_nodes_df is None:
all_nodes_df = node_pop_df
else:
all_nodes_df = all_nodes_df.append(node_pop_df, sort=False)
return all_nodes_df
def get_node_sets(self, populations=None, groupby=None, **filterby):
selected_nodes_df = self.node_properties(populations)
for k, v in filterby:
if isinstance(v, (np.ndarray, list, tuple)):
selected_nodes_df = selected_nodes_df[selected_nodes_df[k].isin(v)]
else:
selected_nodes_df = selected_nodes_df[selected_nodes_df[k].isin(v)]
if groupby is not None:
return {k: v.tolist() for k, v in selected_nodes_df.groupby(groupby).groups.items()}
else:
return selected_nodes_df.index.tolist()
def add_edges(self, edge_population):
edge_population.initialize(self)
pop_name = edge_population.name
# Check that source_population exists
src_pop_name = edge_population.source_nodes
if src_pop_name not in self._node_populations:
self.io.log_exception('Source node population {} not found. Please update {} edges'.format(src_pop_name,
pop_name))
# Check that the target population exists and contains non-virtual nodes (we cannot synapse onto virt nodes)
trg_pop_name = edge_population.target_nodes
if trg_pop_name not in self._node_populations or self._node_populations[trg_pop_name].virtual_nodes_only:
self.io.log_exception(('Node population {} does not exists (or consists of only virtual nodes). ' +
'{} edges cannot create connections.').format(trg_pop_name, pop_name))
edge_population.set_connection_type(src_pop=self._node_populations[src_pop_name],
trg_pop = self._node_populations[trg_pop_name])
self._edge_populations.append(edge_population)
def load_gap_junc_files(self, gj_dic):
for p in gj_dic:
path = p['gap_juncs_file']
f_name = os.path.basename(path)
network = f_name[:f_name.find("_gap_juncs.h5")]
self._gap_juncs[network] = {}
with h5py.File(path, 'r') as f:
for key in ['source_ids', 'target_ids', 'src_gap_ids', 'trg_gap_ids']:
self._gap_juncs[network][key] = f[key][()]
def build(self):
self.build_nodes()
self.build_recurrent_edges()
def build_nodes(self):
raise NotImplementedError()
def build_recurrent_edges(self, **opts):
raise NotImplementedError()
def build_virtual_connections(self):
raise NotImplementedError()
@classmethod
def from_config(cls, conf, **properties):
"""Generates a graph structure from a json config file or dictionary.
:param conf: name of json config file, or a dictionary with config parameters
:param properties: optional properties.
:return: A graph object of type cls
"""
network = cls(**properties)
# The simulation run script should create a config-dict since it's likely to vary based on the simulator engine,
# however in the case the user doesn't we will try a generic conversion from dict/json to ConfigDict
if isinstance(conf, SimulationConfig):
config = conf
else:
try:
config = SimulationConfig.load(conf)
except Exception as e:
network.io.log_exception('Could not convert {} (type "{}") to json.'.format(conf, type(conf)))
if not config.with_networks:
network.io.log_exception('Could not find any network files. Unable to build network.')
# TODO: These are simulator specific
network.spike_threshold = config.spike_threshold
network.dL = config.dL
# load components
for name, value in config.components.items():
network.add_component(name, value)
# load nodes
gid_map = config.gid_mappings
node_adaptor = network.get_node_adaptor('sonata')
for node_dict in config.nodes:
nodes = sonata_reader.load_nodes(node_dict['nodes_file'], node_dict['node_types_file'], gid_map,
adaptor=node_adaptor)
for node_pop in nodes:
network.add_nodes(node_pop)
# TODO: Raise a warning if more than one internal population and no gids (node_id collision)
# load edges
edge_adaptor = network.get_edge_adaptor('sonata')
for edge_dict in config.edges:
if not edge_dict.get('enabled', True):
continue
edges = sonata_reader.load_edges(edge_dict['edges_file'], edge_dict['edge_types_file'],
adaptor=edge_adaptor)
for edge_pop in edges:
network.add_edges(edge_pop)
network.load_gap_junc_files(config.gap_juncs)
# Add nodeset section
network.add_node_set('all', NodeSetAll(network))
for ns_name, ns_filter in config.node_sets.items():
network.add_node_set(ns_name, NodeSet(ns_filter, network))
return network
@classmethod
def from_manifest(cls, manifest_json):
# TODO: Add adaptors to build a simulation network from model files downloaded celltypes.brain-map.org
raise NotImplementedError()
@classmethod
def from_builder(cls, network):
# TODO: Add adaptors to build a simulation network from a bmtk.builder Network object
raise NotImplementedError()
|
the-stack_0_5448 | import re
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import data
from yapf.yapflib.yapf_api import FormatCode
from ..core import format_json
from ..model.app_data import ExchangeRequest, ExchangeResponse, ApiCall, HttpExchange
internal_var_selector = re.compile(r"\$\{(\w+)\}")
def highlight_format_json(plain_text, formatter=HtmlFormatter()):
if not plain_text:
return ""
return highlight(format_json(plain_text), data.JsonLexer(), formatter)
def api_request_body_highlighted(api_call: ApiCall):
return highlight_format_json(api_call.http_request_body)
def request_body_highlighted(http_request: ExchangeRequest):
return highlight_format_json(http_request.request_body)
def response_body_highlighted(http_response: ExchangeResponse):
return highlight_format_json(http_response.response_body)
def encode_json_string(json_string):
return json_string.replace('"', '\\"')
def get_base_url(api_call: ApiCall):
return api_call.http_url
def get_function_name(api_call: ApiCall):
norm_title = api_call.title.lower().strip()
rgx = r"[^a-zA-Z]"
return re.sub(rgx, "", norm_title)
def dict_formatter(dict_items, form, splitter=","):
return splitter.join([form.format(**locals()) for k, v in dict_items])
def extract_uri(url, servers):
matched_server = next(
(server for server in servers if url.startswith(server)), None
)
if matched_server:
return url.replace(matched_server, "")
return url
def to_curl(api_call: ApiCall, exchange: HttpExchange, compressed=False, verify=True):
if api_call:
http_method = api_call.http_method
http_url = api_call.http_url
req_headers = api_call.enabled_headers()
req_qp = api_call.enabled_query_params()
req_body = api_call.request_body_without_comments()
if exchange.response.http_status_code != 0:
http_method = exchange.request.http_method
http_url = exchange.request.http_url
req_qp = exchange.request.query_params
req_headers = exchange.request.headers
req_body = exchange.request.request_body
elif not api_call:
raise ValueError(
"Unable to make curl request as api_call is null and exchange response is {}".format(
exchange.response
)
)
if req_qp:
http_url = http_url + "?" + "&".join([f"{k}={v}" for k, v in req_qp.items()])
parts = [("curl", None), ("-X", http_method)]
for k, v in sorted(req_headers.items()):
parts += [("-H", "{0}: {1}".format(k, v))]
if req_body:
body = req_body
if isinstance(body, bytes):
body = body.decode("utf-8")
parts += [("-d", body)]
if compressed:
parts += [("--compressed", None)]
if not verify:
parts += [("--insecure", None)]
parts += [(None, http_url)]
flat_parts = []
for k, v in parts:
if k:
flat_parts.append(k)
if v:
flat_parts.append("'{0}'".format(v))
return " ".join(flat_parts)
def format_python_code(unformatted_code):
return FormatCode(unformatted_code, style_config="pep8")
|
the-stack_0_5449 | # Copyright 2021 Chuwei Chen [email protected]
# Copyright 2021 Zhaozhong Qi [email protected]
# ===========START OF STUDENT'S CODE================
"2021FALL EC602 HW5"
def left_rotate(string, num):
"left rotate a string by num (CounterClockwise)"
return string[num:] + string[:num]
def right_rotate(string, num):
"right rotate a string by num (Clockwise)"
return string[-num:] + string[:-num]
def linear(orig: str, modified: str) -> bool:
"Check if one arrangement is linear, i.e. barriers at the ends"
orig = list(orig)
modified = list(modified)
for i in orig:
if abs(orig.index(i) - modified.index(i)) > 1:
return False
return True
def valid(orig: str, modified: str) -> bool:
"Check if one arrangement is valid, i.e. follow the Wedding seating rules"
orig = list(orig)
modified = list(modified)
for i in orig:
index_diff = abs(orig.index(i) - modified.index(i))
if index_diff > 1 and index_diff != (len(orig) - 1):
return False
return True
def find_linears(guests: str) -> list:
"Find all the linear arranges for the given str"
# If only one or empty guests, return it or None
if len(guests) == 1:
return [guests[0]]
if len(guests) == 0:
return None
# Initialize some data structures
linear_list = [guests[0]]
# From left to right, iterate through them cumulatively,
# i.e. 'a ab abc abcd'
for i in range(1, len(guests)):
buffer = []
orig = guests[0:i] + guests[i]
# Linear_list contains all prev. linear arranges
for j in linear_list:
new_arranges = add_person(j, guests[i])
# Get rid of non-linear arranges.
for k in new_arranges:
if linear(orig, k):
buffer.append(k)
linear_list = list(set(buffer))
return sorted(linear_list)
def add_person(orig: str, adder: str) -> list:
"Based on the original arrangement, \
return all new arranges when a new person seats in"
# 1. stay
one = orig + adder
# 2. swap w/ tail
two = list(one)
two[len(two)-1], two[len(two)-2] = two[len(two)-2], two[len(two)-1]
two = "".join(two)
# 3. swap w/ head
thr = list(one)
thr[len(thr)-1], thr[0] = thr[0], thr[len(thr)-1]
thr = "".join(thr)
# 4. swap w/ head & cw rotate
four = list(one)
four = right_rotate(four, 1)
four = "".join(four)
# 5. swap w/ tail & ccw rotate
five = list(one)
five = left_rotate(five, 1)
five = "".join(five)
ans = [one, two, thr, four, five]
return ans
def divide_str(guests: str, bars: list) -> list:
"Divide a string up between barriers"
divided = []
for i, val in enumerate(bars):
if i != len(bars) - 1:
divided.append(guests[val:bars[i+1]])
else:
divided.append(guests[val:]+guests[:bars[0]])
return divided
def countem(upper_limit: list, values: list):
"Return all the permutations"
current = [0] * len(upper_limit)
while True:
temp_string = ""
for i, val in enumerate(current):
temp_string = temp_string + values[i][val]
yield temp_string
j = 0
current[j] += 1
while current[j] == upper_limit[j]:
current[j] = 0
j += 1
if j == len(upper_limit):
return
current[j] += 1
class Wedding:
"The assignment: wedding class"
def __init__(self):
pass
def shuffle(self, guests: str) -> list:
"Return all possible seating arrangements"
# If only one or empty guests, return it or None
if len(guests) == 1:
return [guests[0]]
if len(guests) == 0:
return None
arranges = []
# Find prev. linear arranges
linear_list = find_linears(guests[0:len(guests)-1])
# For each prev. linear arranges, add the last person
for j in linear_list:
new_arranges = add_person(j, guests[len(guests)-1])
# Get rid of invalid arranges.
for k in new_arranges:
if valid(guests, k):
arranges.append(k)
return sorted(list(set(arranges)))
def barriers(self, guests: str, bars: list) -> list:
"Return all possible seating arrangements w/ barriers"
# Initialize some data structures
arranges = []
divided_linear = []
permutations = []
upper_limit = []
# Divide guests up and find their linears
divided = divide_str(guests, bars)
for i in divided:
divided_linear.append(find_linears(i))
# Find upper limit (len of each element) of divided_linear
for i in divided_linear:
upper_limit.append(len(i))
# Find permutations in divided_linear
for i in countem(upper_limit, divided_linear):
permutations.append(i)
# Format adjusting
for i in permutations:
i = right_rotate(i, bars[0])
offset = 0
for j in bars:
i = i[:j+offset] + '|' + i[j+offset:]
offset += 1
arranges.append(i)
return arranges
# ===========END OF STUDENT'S CODE================
def show_result(v, partial=False, ind=None):
v.sort()
if not partial:
print("", len(v), "\n".join(v), sep="\n")
else:
print("", len(v), v[ind], sep="\n")
def standard_tests():
standard = Wedding()
res = standard.shuffle("abc")
show_result(res)
res = standard.shuffle("WXYZ")
show_result(res)
res = standard.barriers("xyz", [0])
show_result(res)
res = standard.shuffle("abc")
show_result(res)
res = standard.shuffle("abcdefXY")
show_result(res)
res = standard.barriers("abcDEFxyz", [2, 5, 7])
show_result(res)
res = standard.barriers("ABCDef", [4])
show_result(res)
res = standard.barriers("bgywqa", [0, 1, 2, 4, 5])
show_result(res)
res = standard.barriers("n", [0])
show_result(res)
res = standard.shuffle("hi")
show_result(res)
def main():
print("""Type quit to exit.
Commands:
tests
s guests
b guests n barriers
sp guests ind
bp guests n barriers ind
""")
w = Wedding()
while True:
asktype = input().split()
if asktype[0] == "quit":
break
elif asktype[0] == "tests":
standard_tests()
elif asktype[0] == "s":
guests = asktype[1]
r = w.shuffle(guests)
show_result(r)
elif asktype[0] == "b":
guests, nbar, bars = asktype[1], asktype[2], asktype[3:]
r = w.barriers(guests, [int(x) for x in bars])
show_result(r)
elif asktype[0] == "sp":
guests, ind = asktype[1:]
r = w.shuffle(guests)
show_result(r, True, int(ind))
elif asktype[0] == "bp":
guests, nbar, bars, ind = asktype[1], \
asktype[2], asktype[3:-1], asktype[-1]
r = w.barriers(guests, [int(x) for x in bars])
show_result(r, True, int(ind))
if __name__ == '__main__':
main()
|
the-stack_0_5450 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pytest
from backend.dashboard.examples.utils import load_demo_manifest
from backend.tests.conftest import TEST_NAMESPACE
from backend.tests.dashboard.conftest import DASHBOARD_API_URL_COMMON_PREFIX as DAU_PREFIX
from backend.utils.basic import getitems
pytestmark = pytest.mark.django_db
class TestIngress:
""" 测试 Ingress 相关接口 """
manifest = load_demo_manifest('networks/simple_ingress')
create_url = f'{DAU_PREFIX}/networks/ingresses/'
list_url = f'{DAU_PREFIX}/namespaces/{TEST_NAMESPACE}/networks/ingresses/'
inst_url = f"{list_url}{getitems(manifest, 'metadata.name')}/"
def test_create(self, api_client):
""" 测试创建资源接口 """
response = api_client.post(self.create_url, data={'manifest': self.manifest})
assert response.json()['code'] == 0
def test_list(self, api_client):
""" 测试获取资源列表接口 """
response = api_client.get(self.list_url)
assert response.json()['code'] == 0
assert response.data['manifest']['kind'] == 'IngressList'
def test_update(self, api_client):
""" 测试更新资源接口 """
self.manifest['metadata']['annotations'] = {'t_key': 't_val'}
response = api_client.put(self.inst_url, data={'manifest': self.manifest})
assert response.json()['code'] == 0
def test_retrieve(self, api_client):
""" 测试获取单个资源接口 """
response = api_client.get(self.inst_url)
assert response.json()['code'] == 0
assert response.data['manifest']['kind'] == 'Ingress'
assert getitems(response.data, 'manifest.metadata.annotations.t_key') == 't_val'
def test_destroy(self, api_client):
""" 测试删除单个资源 """
response = api_client.delete(self.inst_url)
assert response.json()['code'] == 0
|
the-stack_0_5451 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import collections
import contextlib
import copy
import weakref
from absl import logging
import numpy as np
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding
from tensorflow.python.autograph.core import ag_ctx as autograph_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import tpu_values
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import TPUClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device_spec
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.tpu import device_assignment as device_assignment_lib # pylint: disable=unused-import
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
_XLA_OP_BY_OP_INPUTS_LIMIT = 200
@contextlib.contextmanager
def maybe_init_scope():
if ops.executing_eagerly_outside_functions():
yield
else:
with ops.init_scope():
yield
def validate_run_function(fn):
"""Validate the function passed into strategy.run."""
# We allow three types of functions/objects passed into TPUStrategy
# run in eager mode:
# 1. a user annotated tf.function
# 2. a ConcreteFunction, this is mostly what you get from loading a saved
# model.
# 3. a callable object and the `__call__` method itself is a tf.function.
#
# Otherwise we return an error, because we don't support eagerly running
# run in TPUStrategy.
if context.executing_eagerly() \
and not isinstance(fn, def_function.Function) \
and not isinstance(fn, function.ConcreteFunction) \
and not (callable(fn) and isinstance(fn.__call__, def_function.Function)):
raise NotImplementedError(
"TPUStrategy.run(fn, ...) does not support pure eager "
"execution. please make sure the function passed into "
"`strategy.run` is a `tf.function` or "
"`strategy.run` is called inside a `tf.function` if "
"eager behavior is enabled.")
@tf_export("distribute.TPUStrategy", v1=[])
class TPUStrategyV2(distribute_lib.Strategy):
"""Synchronous training on TPUs and TPU Pods.
To construct a TPUStrategy object, you need to run the
initialization code as below:
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> tf.tpu.experimental.initialize_tpu_system(resolver)
>>> strategy = tf.distribute.TPUStrategy(resolver)
While using distribution strategies, the variables created within the
strategy's scope will be replicated across all the replicas and can be kept in
sync using all-reduce algorithms.
To run TF2 programs on TPUs, you can either use `.compile` and
`.fit` APIs in `tf.keras` with TPUStrategy, or write your own customized
training loop by calling `strategy.run` directly. Note that
TPUStrategy doesn't support pure eager execution, so please make sure the
function passed into `strategy.run` is a `tf.function` or
`strategy.run` is called inside a `tf.function` if eager
behavior is enabled. See more details in https://www.tensorflow.org/guide/tpu.
`distribute_datasets_from_function` and
`experimental_distribute_dataset` APIs can be used to distribute the dataset
across the TPU workers when writing your own training loop. If you are using
`fit` and `compile` methods available in `tf.keras.Model`, then Keras will
handle the distribution for you.
An example of writing customized training loop on TPUs:
>>> with strategy.scope():
... model = tf.keras.Sequential([
... tf.keras.layers.Dense(2, input_shape=(5,)),
... ])
... optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
>>> def dataset_fn(ctx):
... x = np.random.random((2, 5)).astype(np.float32)
... y = np.random.randint(2, size=(2, 1))
... dataset = tf.data.Dataset.from_tensor_slices((x, y))
... return dataset.repeat().batch(1, drop_remainder=True)
>>> dist_dataset = strategy.distribute_datasets_from_function(
... dataset_fn)
>>> iterator = iter(dist_dataset)
>>> @tf.function()
... def train_step(iterator):
...
... def step_fn(inputs):
... features, labels = inputs
... with tf.GradientTape() as tape:
... logits = model(features, training=True)
... loss = tf.keras.losses.sparse_categorical_crossentropy(
... labels, logits)
...
... grads = tape.gradient(loss, model.trainable_variables)
... optimizer.apply_gradients(zip(grads, model.trainable_variables))
...
... strategy.run(step_fn, args=(next(iterator),))
>>> train_step(iterator)
For the advanced use cases like model parallelism, you can set
`experimental_device_assignment` argument when creating TPUStrategy to specify
number of replicas and number of logical devices. Below is an example to
initialize TPU system with 2 logical devices and 1 replica.
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> topology = tf.tpu.experimental.initialize_tpu_system(resolver)
>>> device_assignment = tf.tpu.experimental.DeviceAssignment.build(
... topology,
... computation_shape=[1, 1, 1, 2],
... num_replicas=1)
>>> strategy = tf.distribute.TPUStrategy(
... resolver, experimental_device_assignment=device_assignment)
Then you can run a `tf.add` operation only on logical device 0.
>>> @tf.function()
... def step_fn(inputs):
... features, _ = inputs
... output = tf.add(features, features)
...
... # Add operation will be executed on logical device 0.
... output = strategy.experimental_assign_to_logical_device(output, 0)
... return output
>>> dist_dataset = strategy.distribute_datasets_from_function(
... dataset_fn)
>>> iterator = iter(dist_dataset)
>>> strategy.run(step_fn, args=(next(iterator),))
"""
def __init__(self,
tpu_cluster_resolver=None,
experimental_device_assignment=None):
"""Synchronous training in TPU donuts or Pods.
Args:
tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster. If None, it will
assume running on a local TPU worker.
experimental_device_assignment: Optional
`tf.tpu.experimental.DeviceAssignment` to specify the placement of
replicas on the TPU cluster.
"""
super(TPUStrategyV2, self).__init__(TPUExtended(
self, tpu_cluster_resolver,
device_assignment=experimental_device_assignment))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set("TPUStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended.num_hosts)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended.num_replicas_per_host)
# Packed variable is used to reduce the overhead of function execution.
# For a DistributedVariable, only one variable handle is captured into a
# function graph. It's only supported in eager mode.
self._enable_packed_variable_in_eager_mode = True
def run(self, fn, args=(), kwargs=None, options=None):
"""Run the computation defined by `fn` on each TPU replica.
Executes ops specified by `fn` on each replica. If `args` or `kwargs` have
`tf.distribute.DistributedValues`, such as those produced by a
`tf.distribute.DistributedDataset` from
`tf.distribute.Strategy.experimental_distribute_dataset` or
`tf.distribute.Strategy.distribute_datasets_from_function`,
when `fn` is executed on a particular replica, it will be executed with the
component of `tf.distribute.DistributedValues` that correspond to that
replica.
`fn` may call `tf.distribute.get_replica_context()` to access members such
as `all_reduce`.
All arguments in `args` or `kwargs` should either be nest of tensors or
`tf.distribute.DistributedValues` containing tensors or composite tensors.
Example usage:
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> tf.tpu.experimental.initialize_tpu_system(resolver)
>>> strategy = tf.distribute.TPUStrategy(resolver)
>>> @tf.function
... def run():
... def value_fn(value_context):
... return value_context.num_replicas_in_sync
... distributed_values = (
... strategy.experimental_distribute_values_from_function(value_fn))
... def replica_fn(input):
... return input * 2
... return strategy.run(replica_fn, args=(distributed_values,))
>>> result = run()
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
options: (Optional) An instance of `tf.distribute.RunOptions` specifying
the options to run `fn`.
Returns:
Merged return value of `fn` across replicas. The structure of the return
value is the same as the return value from `fn`. Each element in the
structure can either be `tf.distribute.DistributedValues`, `Tensor`
objects, or `Tensor`s (for example, if running on a single replica).
"""
validate_run_function(fn)
# Note: the target function is converted to graph even when in Eager mode,
# so autograph is on by default here.
fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())
options = options or distribute_lib.RunOptions()
return self.extended.tpu_run(fn, args, kwargs, options)
def experimental_assign_to_logical_device(self, tensor, logical_device_id):
"""Adds annotation that `tensor` will be assigned to a logical device.
This adds an annotation to `tensor` specifying that operations on
`tensor` will be invoked on logical core device id `logical_device_id`.
When model parallelism is used, the default behavior is that all ops
are placed on zero-th logical device.
```python
# Initializing TPU system with 2 logical devices and 4 replicas.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology,
computation_shape=[1, 1, 1, 2],
num_replicas=4)
strategy = tf.distribute.TPUStrategy(
resolver, experimental_device_assignment=device_assignment)
iterator = iter(inputs)
@tf.function()
def step_fn(inputs):
output = tf.add(inputs, inputs)
# Add operation will be executed on logical device 0.
output = strategy.experimental_assign_to_logical_device(output, 0)
return output
strategy.run(step_fn, args=(next(iterator),))
```
Args:
tensor: Input tensor to annotate.
logical_device_id: Id of the logical core to which the tensor will be
assigned.
Raises:
ValueError: The logical device id presented is not consistent with total
number of partitions specified by the device assignment.
Returns:
Annotated tensor with identical value as `tensor`.
"""
num_logical_devices_per_replica = self.extended._tpu_devices.shape[1] # pylint: disable=protected-access
if (logical_device_id < 0 or
logical_device_id >= num_logical_devices_per_replica):
raise ValueError("`logical_core_id` to assign must be lower then total "
"number of logical devices per replica. Received "
"logical device id {} but there are only total of {} "
"logical devices in replica.".format(
logical_device_id, num_logical_devices_per_replica))
return xla_sharding.assign_device(
tensor, logical_device_id, use_sharding_op=True)
def experimental_split_to_logical_devices(self, tensor, partition_dimensions):
"""Adds annotation that `tensor` will be split across logical devices.
This adds an annotation to tensor `tensor` specifying that operations on
`tensor` will be be split among multiple logical devices. Tensor `tensor`
will be split across dimensions specified by `partition_dimensions`.
The dimensions of `tensor` must be divisible by corresponding value in
`partition_dimensions`.
For example, for system with 8 logical devices, if `tensor` is an image
tensor with shape (batch_size, width, height, channel) and
`partition_dimensions` is [1, 2, 4, 1], then `tensor` will be split
2 in width dimension and 4 way in height dimension and the split
tensor values will be fed into 8 logical devices.
```python
# Initializing TPU system with 8 logical devices and 1 replica.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology,
computation_shape=[1, 2, 2, 2],
num_replicas=1)
strategy = tf.distribute.TPUStrategy(
resolver, experimental_device_assignment=device_assignment)
iterator = iter(inputs)
@tf.function()
def step_fn(inputs):
inputs = strategy.experimental_split_to_logical_devices(
inputs, [1, 2, 4, 1])
# model() function will be executed on 8 logical devices with `inputs`
# split 2 * 4 ways.
output = model(inputs)
return output
strategy.run(step_fn, args=(next(iterator),))
```
Args:
tensor: Input tensor to annotate.
partition_dimensions: An unnested list of integers with the size equal to
rank of `tensor` specifying how `tensor` will be partitioned. The
product of all elements in `partition_dimensions` must be equal to the
total number of logical devices per replica.
Raises:
ValueError: 1) If the size of partition_dimensions does not equal to rank
of `tensor` or 2) if product of elements of `partition_dimensions` does
not match the number of logical devices per replica defined by the
implementing DistributionStrategy's device specification or
3) if a known size of `tensor` is not divisible by corresponding
value in `partition_dimensions`.
Returns:
Annotated tensor with identical value as `tensor`.
"""
num_logical_devices_per_replica = self.extended._tpu_devices.shape[1] # pylint: disable=protected-access
num_partition_splits = np.prod(partition_dimensions)
input_shape = tensor.shape
tensor_rank = len(input_shape)
if tensor_rank != len(partition_dimensions):
raise ValueError("Length of `partition_dimensions` ({}) must be "
"equal to the rank of `x` ({}).".format(
len(partition_dimensions), tensor_rank))
for dim_index, dim_size in enumerate(input_shape):
if dim_size is None:
continue
split_size = partition_dimensions[dim_index]
if dim_size % split_size != 0:
raise ValueError("Tensor shape at dimension ({}) must be "
"divisible by corresponding value specified "
"by `partition_dimensions` ({}).".format(
dim_index, split_size))
if num_partition_splits != num_logical_devices_per_replica:
raise ValueError("Number of logical devices ({}) does not match the "
"number of partition splits specified ({}).".format(
num_logical_devices_per_replica,
num_partition_splits))
tile_assignment = np.arange(num_partition_splits).reshape(
partition_dimensions)
return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)
def experimental_replicate_to_logical_devices(self, tensor):
"""Adds annotation that `tensor` will be replicated to all logical devices.
This adds an annotation to tensor `tensor` specifying that operations on
`tensor` will be invoked on all logical devices.
```python
# Initializing TPU system with 2 logical devices and 4 replicas.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology,
computation_shape=[1, 1, 1, 2],
num_replicas=4)
strategy = tf.distribute.TPUStrategy(
resolver, experimental_device_assignment=device_assignment)
iterator = iter(inputs)
@tf.function()
def step_fn(inputs):
images, labels = inputs
images = strategy.experimental_split_to_logical_devices(
inputs, [1, 2, 4, 1])
# model() function will be executed on 8 logical devices with `inputs`
# split 2 * 4 ways.
output = model(inputs)
# For loss calculation, all logical devices share the same logits
# and labels.
labels = strategy.experimental_replicate_to_logical_devices(labels)
output = strategy.experimental_replicate_to_logical_devices(output)
loss = loss_fn(labels, output)
return loss
strategy.run(step_fn, args=(next(iterator),))
```
Args:
tensor: Input tensor to annotate.
Returns:
Annotated tensor with identical value as `tensor`.
"""
return xla_sharding.replicate(tensor, use_sharding_op=True)
@tf_export("distribute.experimental.TPUStrategy", v1=[])
@deprecation.deprecated_endpoints("distribute.experimental.TPUStrategy")
class TPUStrategy(distribute_lib.Strategy):
"""Synchronous training on TPUs and TPU Pods.
To construct a TPUStrategy object, you need to run the
initialization code as below:
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> tf.tpu.experimental.initialize_tpu_system(resolver)
>>> strategy = tf.distribute.experimental.TPUStrategy(resolver)
While using distribution strategies, the variables created within the
strategy's scope will be replicated across all the replicas and can be kept in
sync using all-reduce algorithms.
To run TF2 programs on TPUs, you can either use `.compile` and
`.fit` APIs in `tf.keras` with TPUStrategy, or write your own customized
training loop by calling `strategy.run` directly. Note that
TPUStrategy doesn't support pure eager execution, so please make sure the
function passed into `strategy.run` is a `tf.function` or
`strategy.run` is called inside a `tf.function` if eager
behavior is enabled.
"""
def __init__(self,
tpu_cluster_resolver=None,
device_assignment=None):
"""Synchronous training in TPU donuts or Pods.
Args:
tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to
specify the placement of replicas on the TPU cluster.
"""
logging.warning(
"`tf.distribute.experimental.TPUStrategy` is deprecated, please use "
" the non experimental symbol `tf.distribute.TPUStrategy` instead.")
super(TPUStrategy, self).__init__(TPUExtended(
self, tpu_cluster_resolver, device_assignment=device_assignment))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set("TPUStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended.num_hosts)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended.num_replicas_per_host)
# Packed variable is used to reduce the overhead of function execution.
# For a DistributedVariable, only one variable handle is captured into a
# function graph. It's only supported in eager mode.
self._enable_packed_variable_in_eager_mode = True
# TODO(cjfj): Modify `_call_for_each_replica` in `TPUExtended` such that this
# can use the default implementation.
# This implementation runs a single step. It does not use infeed or outfeed.
def run(self, fn, args=(), kwargs=None, options=None):
"""See base class."""
validate_run_function(fn)
# Note: the target function is converted to graph even when in Eager mode,
# so autograph is on by default here.
fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())
options = options or distribute_lib.RunOptions()
return self.extended.tpu_run(fn, args, kwargs, options)
@property
def cluster_resolver(self):
"""Returns the cluster resolver associated with this strategy.
`tf.distribute.experimental.TPUStrategy` provides the
associated `tf.distribute.cluster_resolver.ClusterResolver`. If the user
provides one in `__init__`, that instance is returned; if the user does
not, a default
`tf.distribute.cluster_resolver.TPUClusterResolver` is provided.
"""
return self.extended._tpu_cluster_resolver # pylint: disable=protected-access
@tf_export(v1=["distribute.experimental.TPUStrategy"])
class TPUStrategyV1(distribute_lib.StrategyV1):
"""TPU distribution strategy implementation."""
def __init__(self,
tpu_cluster_resolver=None,
steps_per_run=None,
device_assignment=None):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
steps_per_run: Number of steps to run on device before returning to the
host. Note that this can have side-effects on performance, hooks,
metrics, summaries etc.
This parameter is only used when Distribution Strategy is used with
estimator or keras.
device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to
specify the placement of replicas on the TPU cluster. Currently only
supports the usecase of using a single core within a TPU cluster.
"""
super(TPUStrategyV1, self).__init__(TPUExtended(
self, tpu_cluster_resolver, steps_per_run, device_assignment))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set("TPUStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended.num_hosts)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended.num_replicas_per_host)
# Packed variable is used to reduce the overhead of function execution.
# For a DistributedVariable, only one variable handle is captured into a
# function graph. It's only supported in eager mode.
self._enable_packed_variable_in_eager_mode = True
@property
def steps_per_run(self):
"""DEPRECATED: use .extended.steps_per_run instead."""
return self._extended.steps_per_run
# TODO(cjfj): Modify `_call_for_each_replica` in `TPUExtended` such that this
# can use the default implementation.
# This implementation runs a single step. It does not use infeed or outfeed.
def run(self, fn, args=(), kwargs=None, options=None):
"""Run `fn` on each replica, with the given arguments.
Executes ops specified by `fn` on each replica. If `args` or `kwargs` have
"per-replica" values, such as those produced by a "distributed `Dataset`",
when `fn` is executed on a particular replica, it will be executed with the
component of those "per-replica" values that correspond to that replica.
`fn` may call `tf.distribute.get_replica_context()` to access members such
as `all_reduce`.
All arguments in `args` or `kwargs` should either be nest of tensors or
per-replica objects containing tensors or composite tensors.
Users can pass strategy specific options to `options` argument. An example
to enable bucketizing dynamic shapes in `TPUStrategy.run`
is:
>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
>>> tf.config.experimental_connect_to_cluster(resolver)
>>> tf.tpu.experimental.initialize_tpu_system(resolver)
>>> strategy = tf.distribute.experimental.TPUStrategy(resolver)
>>> options = tf.distribute.RunOptions(
... experimental_bucketizing_dynamic_shape=True)
>>> dataset = tf.data.Dataset.range(
... strategy.num_replicas_in_sync, output_type=dtypes.float32).batch(
... strategy.num_replicas_in_sync, drop_remainder=True)
>>> input_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> @tf.function()
... def step_fn(inputs):
... output = tf.reduce_sum(inputs)
... return output
>>> strategy.run(step_fn, args=(next(input_iterator),), options=options)
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
options: (Optional) An instance of `tf.distribute.RunOptions` specifying
the options to run `fn`.
Returns:
Merged return value of `fn` across replicas. The structure of the return
value is the same as the return value from `fn`. Each element in the
structure can either be "per-replica" `Tensor` objects or `Tensor`s
(for example, if running on a single replica).
"""
validate_run_function(fn)
fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())
options = options or distribute_lib.RunOptions()
return self.extended.tpu_run(fn, args, kwargs, options)
# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.
class TPUExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of TPUStrategy."""
def __init__(self,
container_strategy,
tpu_cluster_resolver=None,
steps_per_run=None,
device_assignment=None):
super(TPUExtended, self).__init__(container_strategy)
if tpu_cluster_resolver is None:
tpu_cluster_resolver = TPUClusterResolver("")
if steps_per_run is None:
# TODO(frankchn): Warn when we are being used by DS/Keras and this is
# not specified.
steps_per_run = 1
# `self._tpu_function_cache` is a dict of `tf.function`s, thus if a
# `tf.function` is passed into `strategy.run` in eager mode, the
# `tf.function` won't get retraced.
self._tpu_function_cache = weakref.WeakKeyDictionary()
self._tpu_cluster_resolver = tpu_cluster_resolver
self._tpu_metadata = self._tpu_cluster_resolver.get_tpu_system_metadata()
self._device_assignment = device_assignment
tpu_devices_flat = [
d.name for d in self._tpu_metadata.devices if "device:TPU:" in d.name]
# `self._tpu_devices` is a two-dimensional NumPy array of strings. It is
# indexed using `[replica_id][logical_device_id]`.
if device_assignment is None:
self._tpu_devices = np.array(
[[d] for d in tpu_devices_flat], dtype=object)
else:
job_name = device_spec.DeviceSpecV2.from_string(tpu_devices_flat[0]).job
tpu_devices = []
for replica_id in range(device_assignment.num_replicas):
replica_devices = []
for logical_core in range(device_assignment.num_cores_per_replica):
replica_devices.append(
device_util.canonicalize(
device_assignment.tpu_device(
replica=replica_id,
logical_core=logical_core,
job=job_name)))
tpu_devices.append(replica_devices)
self._tpu_devices = np.array(tpu_devices, dtype=object)
self._host_device = device_util.get_host_for_device(self._tpu_devices[0][0])
# Preload the data onto the TPUs. Currently we always preload onto logical
# device 0 for each replica.
# TODO(cjfj): Create `InputWorkers` lazily, allowing users to place the
# input onto a different logical device?
self._device_input_worker_devices = collections.OrderedDict()
self._host_input_worker_devices = collections.OrderedDict()
for tpu_device in self._tpu_devices[:, 0]:
host_device = device_util.get_host_for_device(tpu_device)
self._device_input_worker_devices.setdefault(host_device, [])
self._device_input_worker_devices[host_device].append(tpu_device)
self._host_input_worker_devices.setdefault(host_device, [])
self._host_input_worker_devices[host_device].append(host_device)
# TODO(sourabhbajaj): Remove this once performance of running one step
# at a time is comparable to multiple steps.
self.steps_per_run = steps_per_run
self._require_static_shapes = True
self.experimental_enable_get_next_as_optional = True
self._logical_device_stack = [0]
if context.executing_eagerly():
# In async remote eager, we want to sync the executors before exiting the
# program.
def async_wait():
if context.context()._context_handle is not None: # pylint: disable=protected-access
context.async_wait()
atexit.register(async_wait)
# Flag to turn on VariablePolicy
self._use_var_policy = False
def _validate_colocate_with_variable(self, colocate_with_variable):
distribute_utils. validate_colocate(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
"""Make iterators for each of the TPU hosts."""
input_workers = input_lib.InputWorkers(
tuple(self._device_input_worker_devices.items()))
return input_lib.DatasetIterator(
dataset,
input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
input_contexts = []
input_workers = input_lib.InputWorkers(
tuple(self._device_input_worker_devices.items()))
num_workers = input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.InputFunctionIterator(
input_fn,
input_workers,
input_contexts,
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, numpy_dataset.SingleDevice(self._host_device),
session)
def _get_input_workers(self, options):
if not options or options.experimental_prefetch_to_device:
return input_lib.InputWorkers(
tuple(self._device_input_worker_devices.items()))
else:
return input_lib.InputWorkers(
tuple(self._host_input_worker_devices.items()))
def _check_spec(self, element_spec):
if isinstance(element_spec, values.PerReplicaSpec):
element_spec = element_spec._component_specs # pylint: disable=protected-access
specs = nest.flatten_with_joined_string_paths(element_spec)
for path, spec in specs:
if isinstance(spec, (sparse_tensor.SparseTensorSpec,
ragged_tensor.RaggedTensorSpec)):
raise ValueError(
"Found tensor {} with spec {}. TPUStrategy does not support "
"distributed datasets with device prefetch when using sparse or "
"ragged tensors. If you indend to use sparse or ragged tensors, "
"please pass a tf.distribute.InputOptions object with "
"experimental_prefetch_to_device set to False to your dataset "
"distribution function.".format(path, type(spec)))
def _experimental_distribute_dataset(self, dataset, options):
if options is None or options.experimental_prefetch_to_device:
self._check_spec(dataset.element_spec)
return input_lib.get_distributed_dataset(
dataset,
self._get_input_workers(options),
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _distribute_datasets_from_function(self, dataset_fn, options):
input_workers = self._get_input_workers(options)
input_contexts = []
num_workers = input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
distributed_dataset = input_lib.get_distributed_datasets_from_function(
dataset_fn,
input_workers,
input_contexts,
self._container_strategy())
# We can only check after the dataset_fn is called.
if options is None or options.experimental_prefetch_to_device:
self._check_spec(distributed_dataset.element_spec)
return distributed_dataset
def _experimental_distribute_values_from_function(self, value_fn):
per_replica_values = []
for replica_id in range(self._num_replicas_in_sync):
per_replica_values.append(
value_fn(distribute_lib.ValueContext(replica_id,
self._num_replicas_in_sync)))
return distribute_utils.regroup(per_replica_values, always_wrap=True)
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
# TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have
# a mechanism to infer the outputs of `fn`. Pending b/110550782.
def _experimental_run_steps_on_iterator(
self, fn, multi_worker_iterator, iterations, initial_loop_values=None):
# Wrap `fn` for repeat.
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def run_fn(inputs):
"""Single step on the TPU device."""
fn_result = fn(ctx, inputs)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
if flat_last_step_outputs:
with ops.control_dependencies([fn_result]):
return [array_ops.identity(f) for f in flat_last_step_outputs]
else:
return fn_result
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop and TPU replicate context. This is useful in cases
# where we might need to exit these contexts and get back to the outer
# context to do some things, for e.g. create an op which should be
# evaluated only once at the end of the loop on the host. One such usage
# is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
def rewrite_fn(*args):
"""The rewritten step fn running on TPU."""
del args
per_replica_inputs = multi_worker_iterator.get_next()
replicate_inputs = []
for replica_id in range(self._num_replicas_in_sync):
select_replica = lambda x: distribute_utils.select_replica( # pylint: disable=g-long-lambda
replica_id, x) # pylint: disable=cell-var-from-loop
replicate_inputs.append((nest.map_structure(
select_replica, per_replica_inputs),))
replicate_outputs = tpu.replicate(
run_fn,
replicate_inputs,
device_assignment=self._device_assignment,
xla_options=tpu.XLAOptions(use_spmd_for_xla_partitioning=False))
# If run_fn has tensor outputs, tpu.replicate returns a list of list. We
# will flatten it in this case. If run_fn has no tensor outputs,
# tpu.replicate returns a list of no_ops, we will keep the output as it
# is.
if isinstance(replicate_outputs[0], list):
replicate_outputs = nest.flatten(replicate_outputs)
return replicate_outputs
# TODO(sourabhbajaj): The input to while loop should be based on the
# output type of the step_fn
assert isinstance(initial_loop_values, list)
initial_loop_values = initial_loop_values * self._num_replicas_in_sync
# Put the while loop op on TPU host 0.
with ops.device(self._host_device):
if self.steps_per_run == 1:
replicate_outputs = rewrite_fn()
else:
replicate_outputs = training_loop.repeat(iterations, rewrite_fn,
initial_loop_values)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(replicate_outputs)
if isinstance(replicate_outputs, list):
# Filter out any ops from the outputs, typically this would be the case
# when there were no tensor outputs.
last_step_tensor_outputs = [
x for x in replicate_outputs if not isinstance(x, ops.Operation)
]
# Outputs are currently of the structure (flattened)
# [output0_device0, output1_device0, output2_device0,
# output0_device1, output1_device1, output2_device1,
# ...]
# Convert this to the following structure instead: (grouped by output)
# [[output0_device0, output0_device1],
# [output1_device0, output1_device1],
# [output2_device0, output2_device1]]
output_num = len(last_step_tensor_outputs) // self._num_replicas_in_sync
last_step_tensor_outputs = [
last_step_tensor_outputs[i::output_num] for i in range(output_num)
]
else:
# no tensors returned.
last_step_tensor_outputs = []
_set_last_step_outputs(ctx, last_step_tensor_outputs)
return ctx
def _call_for_each_replica(self, fn, args, kwargs):
# TODO(jhseu): Consider making it so call_for_each_replica implies that
# we're in a tpu.rewrite(), and update TPUMirroredVariable accordingly.
with _TPUReplicaContext(self._container_strategy()):
return fn(*args, **kwargs)
@contextlib.contextmanager
def experimental_logical_device(self, logical_device_id):
"""Places variables and ops on the specified logical device."""
num_logical_devices_per_replica = self._tpu_devices.shape[1]
if logical_device_id >= num_logical_devices_per_replica:
raise ValueError(
"`logical_device_id` not in range (was {}, but there are only {} "
"logical devices per replica).".format(
logical_device_id, num_logical_devices_per_replica))
self._logical_device_stack.append(logical_device_id)
try:
if tpu_values.enclosing_tpu_context() is None:
yield
else:
with ops.device(tpu.core(logical_device_id)):
yield
finally:
self._logical_device_stack.pop()
def _experimental_initialize_system(self):
"""Experimental method added to be used by Estimator.
This is a private method only to be used by Estimator. Other frameworks
should directly be calling `tf.tpu.experimental.initialize_tpu_system`
"""
tpu_strategy_util.initialize_tpu_system(self._tpu_cluster_resolver)
def _create_variable(self, next_creator, **kwargs):
"""Create a TPUMirroredVariable. See `DistributionStrategy.scope`."""
if kwargs.pop("skip_mirrored_creator", False):
return next_creator(**kwargs)
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
devices = self._tpu_devices[:, self._logical_device_stack[-1]]
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(**kwargs)
else:
devices = colocate_with._devices # pylint: disable=protected-access
def _real_mirrored_creator(**kwargs): # pylint: disable=g-missing-docstring
initial_value = None
value_list = []
for i, d in enumerate(devices):
with ops.device(d):
if i == 0:
initial_value = kwargs["initial_value"]
# Note: some v1 code expects variable initializer creation to happen
# inside a init_scope.
with maybe_init_scope():
initial_value = initial_value() if callable(
initial_value) else initial_value
if i > 0:
# Give replicas meaningful distinct names:
var0name = value_list[0].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
kwargs["initial_value"] = initial_value
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(**kwargs)
assert not isinstance(v, tpu_values.TPUMirroredVariable)
value_list.append(v)
return value_list
return distribute_utils.create_mirrored_variable(
self._container_strategy(), _real_mirrored_creator,
distribute_utils.TPU_VARIABLE_CLASS_MAPPING,
distribute_utils.TPU_VARIABLE_POLICY_MAPPING, **kwargs)
def _reduce_to(self, reduce_op, value, destinations, experimental_hints):
if (isinstance(value, values.DistributedValues) or
tensor_util.is_tensor(value)
) and tpu_values.enclosing_tpu_context() is not None:
if reduce_op == reduce_util.ReduceOp.MEAN:
# TODO(jhseu): Revisit once we support model-parallelism.
value *= (1. / self._num_replicas_in_sync)
elif reduce_op != reduce_util.ReduceOp.SUM:
raise NotImplementedError(
"Currently only support sum & mean in TPUStrategy.")
return tpu_ops.cross_replica_sum(value)
if not isinstance(value, values.DistributedValues):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, self._num_replicas_in_sync)
value_list = value.values
# pylint: disable=protected-access
if isinstance(
value,
values.DistributedVariable) and value._packed_variable is not None:
value_list = tuple(
value._packed_variable.on_device(d)
for d in value._packed_variable.devices)
# pylint: enable=protected-access
# Currently XLA op by op mode has a limit for the number of inputs for a
# single op, thus we break one `add_n` op into a group of `add_n` ops to
# work around the constraint.
# TODO(cjfj): Detect when it is possible to use `cross_replica_sum`.
if len(value.values) <= _XLA_OP_BY_OP_INPUTS_LIMIT:
output = math_ops.add_n(value_list)
else:
output = array_ops.zeros_like(value_list[0], dtype=value_list[0].dtype)
for i in range(0, len(value_list), _XLA_OP_BY_OP_INPUTS_LIMIT):
output += math_ops.add_n(value_list[i:i + _XLA_OP_BY_OP_INPUTS_LIMIT])
if reduce_op == reduce_util.ReduceOp.MEAN:
output *= (1. / len(value_list))
devices = cross_device_ops_lib.get_devices_from(destinations)
if len(devices) == 1:
# If necessary, copy to requested destination.
dest_canonical = device_util.canonicalize(devices[0])
host_canonical = device_util.canonicalize(self._host_device)
if dest_canonical != host_canonical:
with ops.device(dest_canonical):
output = array_ops.identity(output)
else:
output = cross_device_ops_lib.simple_broadcast(output, destinations)
return output
def _update(self, var, fn, args, kwargs, group):
assert isinstance(var, tpu_values.TPUVariableMixin) or isinstance(
var, resource_variable_ops.BaseResourceVariable)
if tpu_values.enclosing_tpu_context() is not None:
if group:
return fn(var, *args, **kwargs)
else:
return (fn(var, *args, **kwargs),)
# Otherwise, we revert to MirroredStrategy behavior and update the variable
# on each replica directly.
updates = []
values_and_devices = []
packed_var = var._packed_variable # pylint: disable=protected-access
if packed_var is not None:
for device in packed_var.devices:
values_and_devices.append((packed_var, device))
else:
for value in var.values:
values_and_devices.append((value, value.device))
for i, value_and_device in enumerate(values_and_devices):
value = value_and_device[0]
device = value_and_device[1]
name = "update_%d" % i
with ops.device(device), \
distribute_lib.UpdateContext(i), \
ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates.append(
fn(value, *distribute_utils.select_replica_mirrored(i, args),
**distribute_utils.select_replica_mirrored(i, kwargs)))
return distribute_utils.update_regroup(self, updates, group)
def read_var(self, var):
assert isinstance(var, tpu_values.TPUVariableMixin) or isinstance(
var, resource_variable_ops.BaseResourceVariable)
return var.read_value()
def _local_results(self, val):
if isinstance(val, values.DistributedValues):
return val.values
return (val,)
def value_container(self, value):
return value
def _broadcast_to(self, tensor, destinations):
del destinations
# This is both a fast path for Python constants, and a way to delay
# converting Python values to a tensor until we know what type it
# should be converted to. Otherwise we have trouble with:
# global_step.assign_add(1)
# since the `1` gets broadcast as an int32 but global_step is int64.
if isinstance(tensor, (float, int)):
return tensor
if tpu_values.enclosing_tpu_context() is not None:
broadcast_tensor = [tensor for _ in range(self._num_replicas_in_sync)]
result = tpu_ops.all_to_all(
broadcast_tensor,
concat_dimension=0,
split_dimension=0,
split_count=self._num_replicas_in_sync)
# This uses the broadcasted value from the first replica because the only
# caller of this is for ONLY_FIRST_REPLICA variables aggregation.
return result[0]
return tensor
@property
def num_hosts(self):
if self._device_assignment is None:
return self._tpu_metadata.num_hosts
return len(set([self._device_assignment.host_device(r)
for r in range(self._device_assignment.num_replicas)]))
@property
def num_replicas_per_host(self):
if self._device_assignment is None:
return self._tpu_metadata.num_of_cores_per_host
# TODO(sourabhbajaj): Remove this method we use inputs and remove infeed
# as the computation of num_replicas_per_host is not a constant
# when using device_assignment. This is a temporary workaround to support
# StatefulRNN as everything is 1 in that case.
# This method needs to take host_id as input for correct computation.
max_models_per_host = (self._tpu_metadata.num_of_cores_per_host //
self._device_assignment.num_cores_per_replica)
return min(self._device_assignment.num_replicas, max_models_per_host)
@property
def _num_replicas_in_sync(self):
if self._device_assignment is None:
return self._tpu_metadata.num_cores
return self._device_assignment.num_replicas
@property
def experimental_between_graph(self):
return False
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
@property
def worker_devices(self):
return tuple(self._tpu_devices[:, self._logical_device_stack[-1]])
@property
def parameter_devices(self):
return self.worker_devices
def non_slot_devices(self, var_list):
return self._host_device
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
with ops.device(self._host_device), distribute_lib.UpdateContext(None):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del cluster_spec, task_type, task_id
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
updated_config.isolate_session_state = True
cluster_spec = self._tpu_cluster_resolver.cluster_spec()
if cluster_spec:
updated_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
return updated_config
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
def tpu_run(self, fn, args, kwargs, options=None):
func = self._tpu_function_creator(fn, options)
return func(args, kwargs)
def _tpu_function_creator(self, fn, options):
if context.executing_eagerly() and fn in self._tpu_function_cache:
return self._tpu_function_cache[fn]
strategy = self._container_strategy()
def tpu_function(args, kwargs):
"""TF Function used to replicate the user computation."""
if kwargs is None:
kwargs = {}
# Remove None at the end of args as they are not replicatable
# If there are None in the middle we can't do anything about it
# so let those cases fail.
# For example when Keras model predict is used they pass the targets as
# None. We want to handle it here so all client libraries don't have to
# do this as other strategies can handle None values better.
while args and args[-1] is None:
args = args[:-1]
# Used to re-structure flattened output tensors from `tpu.replicate()`
# into a structured format.
result = [[]]
def replicated_fn(replica_id, replica_args, replica_kwargs):
"""Wraps user function to provide replica ID and `Tensor` inputs."""
with _TPUReplicaContext(strategy, replica_id_in_sync_group=replica_id):
result[0] = fn(*replica_args, **replica_kwargs)
return result[0]
replicate_inputs = [] # By replica.
for i in range(strategy.num_replicas_in_sync):
replicate_inputs.append(
[constant_op.constant(i, dtype=dtypes.int32),
distribute_utils.select_replica(i, args),
distribute_utils.select_replica(i, kwargs)])
# Construct and pass `maximum_shapes` so that we could support dynamic
# shapes using dynamic padder.
if options.experimental_enable_dynamic_batch_size and replicate_inputs:
maximum_shapes = []
flattened_list = nest.flatten(replicate_inputs[0])
for input_tensor in flattened_list:
if tensor_util.is_tensor(input_tensor):
rank = input_tensor.shape.rank
else:
rank = np.ndim(input_tensor)
maximum_shape = tensor_shape.TensorShape([None] * rank)
maximum_shapes.append(maximum_shape)
maximum_shapes = nest.pack_sequence_as(replicate_inputs[0],
maximum_shapes)
else:
maximum_shapes = None
if options.experimental_bucketizing_dynamic_shape:
padding_spec = tpu.PaddingSpec.POWER_OF_TWO
else:
padding_spec = None
with strategy.scope():
replicate_outputs = tpu.replicate(
replicated_fn,
replicate_inputs,
device_assignment=self._device_assignment,
maximum_shapes=maximum_shapes,
padding_spec=padding_spec,
xla_options=tpu.XLAOptions(use_spmd_for_xla_partitioning=False))
# Remove all no ops that may have been added during 'tpu.replicate()'
if isinstance(result[0], list):
result[0] = [
output for output in result[0] if not isinstance(
output, ops.Operation)
]
# Workaround for `tpu.replicate` behaviour when single `Tensor` returned.
if result[0] is None or isinstance(result[0], ops.Operation):
replicate_outputs = [None] * len(replicate_outputs)
else:
replicate_outputs = [
nest.pack_sequence_as(result[0], nest.flatten(replica_output))
for replica_output in replicate_outputs
]
return distribute_utils.regroup(replicate_outputs)
if context.executing_eagerly():
tpu_function = def_function.function(tpu_function)
self._tpu_function_cache[fn] = tpu_function
return tpu_function
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
# TPUStrategy has different distributed training structure that the whole
# cluster should be treated as single worker from higher-level (e.g. Keras)
# library's point of view.
# TODO(rchao): Revisit this as we design a fault-tolerance solution for
# TPUStrategy.
return False
class _TPUReplicaContext(distribute_lib.ReplicaContext):
"""Replication Context class for TPU Strategy."""
# TODO(sourabhbajaj): Call for each replica should be updating this.
# TODO(b/118385803): Always properly initialize replica_id.
def __init__(self, strategy, replica_id_in_sync_group=0):
distribute_lib.ReplicaContext.__init__(
self, strategy, replica_id_in_sync_group=replica_id_in_sync_group)
@property
def devices(self):
distribute_lib.require_replica_context(self)
ds = self._strategy
replica_id = tensor_util.constant_value(self.replica_id_in_sync_group)
if replica_id is None: # Non-constant `Tensor` inside `tpu.replicate`.
# TODO(cjfj): Return other devices when model parallelism is supported.
return (tpu.core(0),)
else:
return (ds.extended.worker_devices[replica_id],)
def experimental_logical_device(self, logical_device_id):
"""Places variables and ops on the specified logical device."""
return self.strategy.extended.experimental_logical_device(logical_device_id)
def _set_last_step_outputs(ctx, last_step_tensor_outputs):
"""Sets the last step outputs on the given context."""
# Convert replicate_outputs to the original dict structure of
# last_step_outputs.
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that aren't reduced, return a PerReplica of all values. Else
# take the first value from the list as each value should be the same.
if reduce_op is None:
last_step_tensor_outputs_dict[name] = values.PerReplica(output)
else:
# TODO(priyag): Should this return the element or a list with 1 element
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
|
the-stack_0_5452 | config = {
"interfaces": {
"google.ads.googleads.v1.services.FeedPlaceholderViewService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000
}
},
"methods": {
"GetFeedPlaceholderView": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
|
the-stack_0_5453 |
import os
import io
import numpy as np
import librosa
import soundfile as sf
import tensorflow as tf
from scipy.signal import butter, lfilter
from scipy import signal
import copy
def read_raw_audio(audio, sample_rate=16000):
if isinstance(audio, str):
wave, _ = librosa.load(os.path.expanduser(audio), sr=sample_rate)
elif isinstance(audio, bytes):
wave, sr = sf.read(io.BytesIO(audio))
wave = np.asfortranarray(wave)
if sr != sample_rate:
wave = librosa.resample(wave, sr, sample_rate)
elif isinstance(audio, np.ndarray):
return audio
else:
raise ValueError("input audio must be either a path or bytes")
return wave
def normalize_audio_feature(audio_feature: np.ndarray, per_feature=False):
""" Mean and variance normalization """
axis = 0 if per_feature else None
mean = np.mean(audio_feature, axis=axis)
std_dev = np.std(audio_feature, axis=axis) + 1e-9
normalized = (audio_feature - mean) / std_dev
return normalized
def normalize_signal(signal: np.ndarray):
""" Normailize signal to [-1, 1] range """
gain = 1.0 / (np.max(np.abs(signal)) + 1e-9)
return signal * gain
class SpeechFeaturizer:
def __init__(self, speech_config: dict):
# Samples
self.speech_config=speech_config
self.sample_rate = speech_config["sample_rate"]
self.hop_size = int(self.sample_rate * (speech_config["hop_size"]))
self.win_size = int(self.sample_rate * (speech_config["win_size"]))
# Features
self.num_mels = speech_config["num_mels"]
self.preemphasis = speech_config["preemphasis"]
# Normalization
def smooth_energe(self,wav, sr):
factor = 5
cutoff = 20
nyq = 0.5 * sr
order = 3 # set low-pass filter order
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
envelop = lfilter(b, a, abs(wav)) # filter low frequency part as signal's envelop
envelop = envelop / np.abs(envelop).max()
envelop = envelop * factor + 1
wav = np.divide(wav, envelop)
wav /= np.abs(wav).max()
return wav
def load_wav(self,path):
wav=read_raw_audio(path,self.sample_rate)
wav=librosa.effects.preemphasis(wav)
wav=self.smooth_energe(wav,self.sample_rate)
wav=librosa.effects.trim(wav,top_db=20)[0]
return wav
def pad_signal(self,wavs,max_length):
wavs = tf.keras.preprocessing.sequence.pad_sequences(wavs, max_length, 'float32', 'post', 'post')
return wavs
def melspectrogram(self,wav):
D = librosa.stft(y=wav, n_fft=self.speech_config['n_fft'], hop_length=self.hop_size,
win_length=self.win_size)
assert self.speech_config['fmax'] <= self.sample_rate // 2
mel_basis= librosa.filters.mel(self.sample_rate, self.speech_config['n_fft'], n_mels=self.num_mels
, fmin=self.speech_config['fmin'], fmax=self.speech_config['fmax'])
D= np.dot(mel_basis, np.abs(D))
min_level = np.exp(self.speech_config['min_level_db'] / 20 * np.log(10))
D= 20 * np.log10(np.maximum(min_level, D))
S = D - self.speech_config['ref_level_db']
S=np.clip((2 * self.speech_config['max_abs_value']) * (
(S - self.speech_config['min_level_db']) / (-self.speech_config['min_level_db'])) - self.speech_config['max_abs_value'],
-self.speech_config['max_abs_value'], self.speech_config['max_abs_value'])
return S.T
def preemphasis(self,wav):
return np.append(wav[0], wav[1:] - 0.97 * wav[:-1])
def inv_preemphasis(self,wav):
return signal.lfilter([1], [1, -0.97], wav)
def inv_mel_spectrogram(self,mel_spectrogram):
'''Converts mel spectrogram to waveform using librosa'''
mel_spectrogram *= self.speech_config['power']
D=(((np.clip(mel_spectrogram, -self.speech_config['max_abs_value'],
self.speech_config['max_abs_value']) + self.speech_config['max_abs_value']) * -self.speech_config['min_level_db'] / (
2 * self.speech_config['max_abs_value']))+ self.speech_config['min_level_db'])
D=np.power(10.0, (D) * 0.05)
mel_basis = librosa.filters.mel(self.sample_rate, self.speech_config['n_fft'],
n_mels=self.num_mels
, fmin=self.speech_config['fmin'], fmax=self.speech_config['fmax'])
_inv_mel_basis = np.linalg.pinv(mel_basis)
S= np.maximum(1e-10, np.dot(_inv_mel_basis, D))
spectro = copy.deepcopy(S)
for i in range(self.speech_config['griffin_lim_iters']):
estimated_wav = librosa.istft(spectro, hop_length=self.hop_size, win_length=self.win_size)
est_stft = librosa.stft(y=estimated_wav, n_fft=self.speech_config['n_fft'], hop_length=self.hop_size,
win_length=self.win_size)
phase = est_stft / np.maximum(1e-8, np.abs(est_stft))
spectro = S * phase
estimated_wav = librosa.istft(spectro, hop_length=self.hop_size, win_length=self.win_size)
result = np.real(estimated_wav)
return self.inv_preemphasis(result)
def _compute_pitch_feature(self, signal: np.ndarray) -> np.ndarray:
pitches, _ = librosa.core.piptrack(
y=signal, sr=self.sample_rate,
n_fft=self.speech_config['n_fft'], hop_length=self.hop_size,
fmin=0, fmax=int(self.sample_rate / 2), win_length=self.win_size, center=True
)
pitches = pitches.T
assert self.num_mels <= self.speech_config['n_fft'] // 2 + 1, \
"num_features for spectrogram should \
be <= (sample_rate * window_size // 2 + 1)"
return pitches[:, :self.num_mels]
|
the-stack_0_5454 | import cv2 as cv
import os
i = 1
def capture(file, interval=450):
cap = cv.VideoCapture(file)
length = int(cap.get(cv.CAP_PROP_FRAME_COUNT))
global i
j = 0
while (cap.isOpened() and j < length):
cap.set(1, j)
ret, frame = cap.read()
if ret == False:
break
cv.imwrite("./trainingDataPreprocessing/img_y/" + "{:04d}".format(i) + ".png", frame)
i += 1
j += interval
cap.release()
cv.destroyAllWindows()
for dirpath, dirnames, files in os.walk('./trainingDataPreprocessing/vid', topdown=False):
for file_name in files:
capture("./trainingDataPreprocessing/vid/" + file_name)
|
the-stack_0_5455 | from chatnoir_api import Index
DEFAULT_START = 0
DEFAULT_SIZE = 10
DEFAULT_SLOP = 0
DEFAULT_INDEX = {
Index.ClueWeb09,
Index.ClueWeb12,
Index.CommonCrawl1511,
}
DEFAULT_MINIMAL = False
DEFAULT_EXPLAIN = False
DEFAULT_RETRIES = 5
DEFAULT_BACKOFF_SECONDS = 1
|
the-stack_0_5456 | # coding: utf-8
"""Constants used by Home Assistant components."""
MAJOR_VERSION = 0
MINOR_VERSION = 88
PATCH_VERSION = '0.dev0'
__short_version__ = '{}.{}'.format(MAJOR_VERSION, MINOR_VERSION)
__version__ = '{}.{}'.format(__short_version__, PATCH_VERSION)
REQUIRED_PYTHON_VER = (3, 5, 3)
# Format for platform files
PLATFORM_FORMAT = '{platform}.{domain}'
# Can be used to specify a catch all when registering state or event listeners.
MATCH_ALL = '*'
# Entity target all constant
ENTITY_MATCH_ALL = 'all'
# If no name is specified
DEVICE_DEFAULT_NAME = 'Unnamed Device'
# Sun events
SUN_EVENT_SUNSET = 'sunset'
SUN_EVENT_SUNRISE = 'sunrise'
# #### CONFIG ####
CONF_ABOVE = 'above'
CONF_ACCESS_TOKEN = 'access_token'
CONF_ADDRESS = 'address'
CONF_AFTER = 'after'
CONF_ALIAS = 'alias'
CONF_API_KEY = 'api_key'
CONF_API_VERSION = 'api_version'
CONF_AT = 'at'
CONF_AUTHENTICATION = 'authentication'
CONF_AUTH_MFA_MODULES = 'auth_mfa_modules'
CONF_AUTH_PROVIDERS = 'auth_providers'
CONF_BASE = 'base'
CONF_BEFORE = 'before'
CONF_BELOW = 'below'
CONF_BINARY_SENSORS = 'binary_sensors'
CONF_BLACKLIST = 'blacklist'
CONF_BRIGHTNESS = 'brightness'
CONF_CODE = 'code'
CONF_COLOR_TEMP = 'color_temp'
CONF_COMMAND = 'command'
CONF_COMMAND_CLOSE = 'command_close'
CONF_COMMAND_OFF = 'command_off'
CONF_COMMAND_ON = 'command_on'
CONF_COMMAND_OPEN = 'command_open'
CONF_COMMAND_STATE = 'command_state'
CONF_COMMAND_STOP = 'command_stop'
CONF_CONDITION = 'condition'
CONF_COVERS = 'covers'
CONF_CURRENCY = 'currency'
CONF_CUSTOMIZE = 'customize'
CONF_CUSTOMIZE_DOMAIN = 'customize_domain'
CONF_CUSTOMIZE_GLOB = 'customize_glob'
CONF_DELAY_TIME = 'delay_time'
CONF_DEVICE = 'device'
CONF_DEVICE_CLASS = 'device_class'
CONF_DEVICES = 'devices'
CONF_DISARM_AFTER_TRIGGER = 'disarm_after_trigger'
CONF_DISCOVERY = 'discovery'
CONF_DISKS = 'disks'
CONF_DISPLAY_CURRENCY = 'display_currency'
CONF_DISPLAY_OPTIONS = 'display_options'
CONF_DOMAIN = 'domain'
CONF_DOMAINS = 'domains'
CONF_EFFECT = 'effect'
CONF_ELEVATION = 'elevation'
CONF_EMAIL = 'email'
CONF_ENTITIES = 'entities'
CONF_ENTITY_ID = 'entity_id'
CONF_ENTITY_NAMESPACE = 'entity_namespace'
CONF_ENTITY_PICTURE_TEMPLATE = 'entity_picture_template'
CONF_EVENT = 'event'
CONF_EXCLUDE = 'exclude'
CONF_FILE_PATH = 'file_path'
CONF_FILENAME = 'filename'
CONF_FOR = 'for'
CONF_FORCE_UPDATE = 'force_update'
CONF_FRIENDLY_NAME = 'friendly_name'
CONF_FRIENDLY_NAME_TEMPLATE = 'friendly_name_template'
CONF_HEADERS = 'headers'
CONF_HOST = 'host'
CONF_HOSTS = 'hosts'
CONF_HS = 'hs'
CONF_ICON = 'icon'
CONF_ICON_TEMPLATE = 'icon_template'
CONF_INCLUDE = 'include'
CONF_ID = 'id'
CONF_IP_ADDRESS = 'ip_address'
CONF_LATITUDE = 'latitude'
CONF_LONGITUDE = 'longitude'
CONF_LIGHTS = 'lights'
CONF_MAC = 'mac'
CONF_METHOD = 'method'
CONF_MAXIMUM = 'maximum'
CONF_MINIMUM = 'minimum'
CONF_MODE = 'mode'
CONF_MONITORED_CONDITIONS = 'monitored_conditions'
CONF_MONITORED_VARIABLES = 'monitored_variables'
CONF_NAME = 'name'
CONF_OFFSET = 'offset'
CONF_OPTIMISTIC = 'optimistic'
CONF_PACKAGES = 'packages'
CONF_PASSWORD = 'password'
CONF_PATH = 'path'
CONF_PAYLOAD = 'payload'
CONF_PAYLOAD_OFF = 'payload_off'
CONF_PAYLOAD_ON = 'payload_on'
CONF_PENDING_TIME = 'pending_time'
CONF_PIN = 'pin'
CONF_PLATFORM = 'platform'
CONF_PORT = 'port'
CONF_PREFIX = 'prefix'
CONF_PROFILE_NAME = 'profile_name'
CONF_PROTOCOL = 'protocol'
CONF_PROXY_SSL = 'proxy_ssl'
CONF_QUOTE = 'quote'
CONF_RADIUS = 'radius'
CONF_RECIPIENT = 'recipient'
CONF_REGION = 'region'
CONF_RESOURCE = 'resource'
CONF_RESOURCES = 'resources'
CONF_RGB = 'rgb'
CONF_ROOM = 'room'
CONF_SCAN_INTERVAL = 'scan_interval'
CONF_SENDER = 'sender'
CONF_SENSOR_TYPE = 'sensor_type'
CONF_SENSORS = 'sensors'
CONF_SHOW_ON_MAP = 'show_on_map'
CONF_SLAVE = 'slave'
CONF_SOURCE = 'source'
CONF_SSL = 'ssl'
CONF_STATE = 'state'
CONF_STATE_TEMPLATE = 'state_template'
CONF_STRUCTURE = 'structure'
CONF_SWITCHES = 'switches'
CONF_TEMPERATURE_UNIT = 'temperature_unit'
CONF_TIME_ZONE = 'time_zone'
CONF_TIMEOUT = 'timeout'
CONF_TOKEN = 'token'
CONF_TRIGGER_TIME = 'trigger_time'
CONF_TTL = 'ttl'
CONF_TYPE = 'type'
CONF_UNIT_OF_MEASUREMENT = 'unit_of_measurement'
CONF_UNIT_SYSTEM = 'unit_system'
CONF_UPDATE_INTERVAL = 'update_interval'
CONF_URL = 'url'
CONF_USERNAME = 'username'
CONF_VALUE_TEMPLATE = 'value_template'
CONF_VERIFY_SSL = 'verify_ssl'
CONF_WEBHOOK_ID = 'webhook_id'
CONF_WEEKDAY = 'weekday'
CONF_WHITELIST = 'whitelist'
CONF_WHITELIST_EXTERNAL_DIRS = 'whitelist_external_dirs'
CONF_WHITE_VALUE = 'white_value'
CONF_XY = 'xy'
CONF_ZONE = 'zone'
# #### EVENTS ####
EVENT_HOMEASSISTANT_START = 'homeassistant_start'
EVENT_HOMEASSISTANT_STOP = 'homeassistant_stop'
EVENT_HOMEASSISTANT_CLOSE = 'homeassistant_close'
EVENT_STATE_CHANGED = 'state_changed'
EVENT_TIME_CHANGED = 'time_changed'
EVENT_CALL_SERVICE = 'call_service'
EVENT_PLATFORM_DISCOVERED = 'platform_discovered'
EVENT_COMPONENT_LOADED = 'component_loaded'
EVENT_SERVICE_REGISTERED = 'service_registered'
EVENT_SERVICE_REMOVED = 'service_removed'
EVENT_LOGBOOK_ENTRY = 'logbook_entry'
EVENT_THEMES_UPDATED = 'themes_updated'
EVENT_TIMER_OUT_OF_SYNC = 'timer_out_of_sync'
EVENT_AUTOMATION_TRIGGERED = 'automation_triggered'
EVENT_SCRIPT_STARTED = 'script_started'
# #### DEVICE CLASSES ####
DEVICE_CLASS_BATTERY = 'battery'
DEVICE_CLASS_HUMIDITY = 'humidity'
DEVICE_CLASS_ILLUMINANCE = 'illuminance'
DEVICE_CLASS_TEMPERATURE = 'temperature'
DEVICE_CLASS_TIMESTAMP = 'timestamp'
DEVICE_CLASS_PRESSURE = 'pressure'
# #### STATES ####
STATE_ON = 'on'
STATE_OFF = 'off'
STATE_HOME = 'home'
STATE_NOT_HOME = 'not_home'
STATE_UNKNOWN = 'unknown'
STATE_OPEN = 'open'
STATE_OPENING = 'opening'
STATE_CLOSED = 'closed'
STATE_CLOSING = 'closing'
STATE_PLAYING = 'playing'
STATE_PAUSED = 'paused'
STATE_IDLE = 'idle'
STATE_STANDBY = 'standby'
STATE_ALARM_DISARMED = 'disarmed'
STATE_ALARM_ARMED_HOME = 'armed_home'
STATE_ALARM_ARMED_AWAY = 'armed_away'
STATE_ALARM_ARMED_NIGHT = 'armed_night'
STATE_ALARM_ARMED_CUSTOM_BYPASS = 'armed_custom_bypass'
STATE_ALARM_PENDING = 'pending'
STATE_ALARM_ARMING = 'arming'
STATE_ALARM_DISARMING = 'disarming'
STATE_ALARM_TRIGGERED = 'triggered'
STATE_LOCKED = 'locked'
STATE_UNLOCKED = 'unlocked'
STATE_UNAVAILABLE = 'unavailable'
STATE_OK = 'ok'
STATE_PROBLEM = 'problem'
# #### STATE AND EVENT ATTRIBUTES ####
# Attribution
ATTR_ATTRIBUTION = 'attribution'
# Credentials
ATTR_CREDENTIALS = 'credentials'
# Contains time-related attributes
ATTR_NOW = 'now'
ATTR_DATE = 'date'
ATTR_TIME = 'time'
ATTR_SECONDS = 'seconds'
# Contains domain, service for a SERVICE_CALL event
ATTR_DOMAIN = 'domain'
ATTR_SERVICE = 'service'
ATTR_SERVICE_DATA = 'service_data'
# IDs
ATTR_ID = 'id'
# Name
ATTR_NAME = 'name'
# Contains one string or a list of strings, each being an entity id
ATTR_ENTITY_ID = 'entity_id'
# String with a friendly name for the entity
ATTR_FRIENDLY_NAME = 'friendly_name'
# A picture to represent entity
ATTR_ENTITY_PICTURE = 'entity_picture'
# Icon to use in the frontend
ATTR_ICON = 'icon'
# The unit of measurement if applicable
ATTR_UNIT_OF_MEASUREMENT = 'unit_of_measurement'
CONF_UNIT_SYSTEM_METRIC = 'metric' # type: str
CONF_UNIT_SYSTEM_IMPERIAL = 'imperial' # type: str
# Electrical attributes
ATTR_VOLTAGE = 'voltage'
# Contains the information that is discovered
ATTR_DISCOVERED = 'discovered'
# Location of the device/sensor
ATTR_LOCATION = 'location'
ATTR_BATTERY_CHARGING = 'battery_charging'
ATTR_BATTERY_LEVEL = 'battery_level'
ATTR_WAKEUP = 'wake_up_interval'
# For devices which support a code attribute
ATTR_CODE = 'code'
ATTR_CODE_FORMAT = 'code_format'
# For calling a device specific command
ATTR_COMMAND = 'command'
# For devices which support an armed state
ATTR_ARMED = 'device_armed'
# For devices which support a locked state
ATTR_LOCKED = 'locked'
# For sensors that support 'tripping', eg. motion and door sensors
ATTR_TRIPPED = 'device_tripped'
# For sensors that support 'tripping' this holds the most recent
# time the device was tripped
ATTR_LAST_TRIP_TIME = 'last_tripped_time'
# For all entity's, this hold whether or not it should be hidden
ATTR_HIDDEN = 'hidden'
# Location of the entity
ATTR_LATITUDE = 'latitude'
ATTR_LONGITUDE = 'longitude'
# Accuracy of location in meters
ATTR_GPS_ACCURACY = 'gps_accuracy'
# If state is assumed
ATTR_ASSUMED_STATE = 'assumed_state'
ATTR_STATE = 'state'
ATTR_OPTION = 'option'
# Bitfield of supported component features for the entity
ATTR_SUPPORTED_FEATURES = 'supported_features'
# Class of device within its domain
ATTR_DEVICE_CLASS = 'device_class'
# Temperature attribute
ATTR_TEMPERATURE = 'temperature'
# #### UNITS OF MEASUREMENT ####
# Temperature units
TEMP_CELSIUS = '°C'
TEMP_FAHRENHEIT = '°F'
# Length units
LENGTH_CENTIMETERS = 'cm' # type: str
LENGTH_METERS = 'm' # type: str
LENGTH_KILOMETERS = 'km' # type: str
LENGTH_INCHES = 'in' # type: str
LENGTH_FEET = 'ft' # type: str
LENGTH_YARD = 'yd' # type: str
LENGTH_MILES = 'mi' # type: str
# Volume units
VOLUME_LITERS = 'L' # type: str
VOLUME_MILLILITERS = 'mL' # type: str
VOLUME_GALLONS = 'gal' # type: str
VOLUME_FLUID_OUNCE = 'fl. oz.' # type: str
# Mass units
MASS_GRAMS = 'g' # type: str
MASS_KILOGRAMS = 'kg' # type: str
MASS_OUNCES = 'oz' # type: str
MASS_POUNDS = 'lb' # type: str
# UV Index units
UNIT_UV_INDEX = 'UV index' # type: str
# #### SERVICES ####
SERVICE_HOMEASSISTANT_STOP = 'stop'
SERVICE_HOMEASSISTANT_RESTART = 'restart'
SERVICE_TURN_ON = 'turn_on'
SERVICE_TURN_OFF = 'turn_off'
SERVICE_TOGGLE = 'toggle'
SERVICE_RELOAD = 'reload'
SERVICE_VOLUME_UP = 'volume_up'
SERVICE_VOLUME_DOWN = 'volume_down'
SERVICE_VOLUME_MUTE = 'volume_mute'
SERVICE_VOLUME_SET = 'volume_set'
SERVICE_MEDIA_PLAY_PAUSE = 'media_play_pause'
SERVICE_MEDIA_PLAY = 'media_play'
SERVICE_MEDIA_PAUSE = 'media_pause'
SERVICE_MEDIA_STOP = 'media_stop'
SERVICE_MEDIA_NEXT_TRACK = 'media_next_track'
SERVICE_MEDIA_PREVIOUS_TRACK = 'media_previous_track'
SERVICE_MEDIA_SEEK = 'media_seek'
SERVICE_SHUFFLE_SET = 'shuffle_set'
SERVICE_ALARM_DISARM = 'alarm_disarm'
SERVICE_ALARM_ARM_HOME = 'alarm_arm_home'
SERVICE_ALARM_ARM_AWAY = 'alarm_arm_away'
SERVICE_ALARM_ARM_NIGHT = 'alarm_arm_night'
SERVICE_ALARM_ARM_CUSTOM_BYPASS = 'alarm_arm_custom_bypass'
SERVICE_ALARM_TRIGGER = 'alarm_trigger'
SERVICE_LOCK = 'lock'
SERVICE_UNLOCK = 'unlock'
SERVICE_OPEN = 'open'
SERVICE_CLOSE = 'close'
SERVICE_CLOSE_COVER = 'close_cover'
SERVICE_CLOSE_COVER_TILT = 'close_cover_tilt'
SERVICE_OPEN_COVER = 'open_cover'
SERVICE_OPEN_COVER_TILT = 'open_cover_tilt'
SERVICE_SET_COVER_POSITION = 'set_cover_position'
SERVICE_SET_COVER_TILT_POSITION = 'set_cover_tilt_position'
SERVICE_STOP_COVER = 'stop_cover'
SERVICE_STOP_COVER_TILT = 'stop_cover_tilt'
SERVICE_SELECT_OPTION = 'select_option'
# #### API / REMOTE ####
SERVER_PORT = 8123
URL_ROOT = '/'
URL_API = '/api/'
URL_API_STREAM = '/api/stream'
URL_API_CONFIG = '/api/config'
URL_API_DISCOVERY_INFO = '/api/discovery_info'
URL_API_STATES = '/api/states'
URL_API_STATES_ENTITY = '/api/states/{}'
URL_API_EVENTS = '/api/events'
URL_API_EVENTS_EVENT = '/api/events/{}'
URL_API_SERVICES = '/api/services'
URL_API_SERVICES_SERVICE = '/api/services/{}/{}'
URL_API_COMPONENTS = '/api/components'
URL_API_ERROR_LOG = '/api/error_log'
URL_API_LOG_OUT = '/api/log_out'
URL_API_TEMPLATE = '/api/template'
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_MOVED_PERMANENTLY = 301
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_UNPROCESSABLE_ENTITY = 422
HTTP_TOO_MANY_REQUESTS = 429
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_SERVICE_UNAVAILABLE = 503
HTTP_BASIC_AUTHENTICATION = 'basic'
HTTP_DIGEST_AUTHENTICATION = 'digest'
HTTP_HEADER_HA_AUTH = 'X-HA-access'
HTTP_HEADER_X_REQUESTED_WITH = 'X-Requested-With'
CONTENT_TYPE_JSON = 'application/json'
CONTENT_TYPE_MULTIPART = 'multipart/x-mixed-replace; boundary={}'
CONTENT_TYPE_TEXT_PLAIN = 'text/plain'
# The exit code to send to request a restart
RESTART_EXIT_CODE = 100
UNIT_NOT_RECOGNIZED_TEMPLATE = '{} is not a recognized {} unit.' # type: str
LENGTH = 'length' # type: str
MASS = 'mass' # type: str
VOLUME = 'volume' # type: str
TEMPERATURE = 'temperature' # type: str
SPEED_MS = 'speed_ms' # type: str
ILLUMINANCE = 'illuminance' # type: str
WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The degree of precision for platforms
PRECISION_WHOLE = 1
PRECISION_HALVES = 0.5
PRECISION_TENTHS = 0.1
# Static list of entities that will never be exposed to
# cloud, alexa, or google_home components
CLOUD_NEVER_EXPOSED_ENTITIES = ['group.all_locks']
|
the-stack_0_5457 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras index lookup preprocessing layer."""
# pylint: disable=g-classes-have-attributes
# pylint: disable=g-direct-tensorflow-import
import collections
from keras import backend
from keras.engine import base_layer_utils
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import preprocessing_utils as utils
from keras.saving.saved_model import layer_serialization
from keras.utils import layer_utils
from keras.utils import tf_utils
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
INT = utils.INT
MULTI_HOT = utils.MULTI_HOT
ONE_HOT = utils.ONE_HOT
COUNT = utils.COUNT
TF_IDF = utils.TF_IDF
_VOCAB_NAME = "vocab"
_IDF_WEIGHTS_NAME = "idf_weights"
class NullInitializer(tf.lookup.KeyValueTensorInitializer):
"""A placeholder initializer for restoring this layer from a SavedModel."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = key_dtype
self._value_dtype = value_dtype
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
pass
class VocabWeightHandler(base_layer_utils.TrackableWeightHandler):
"""Adds the vocabulary as a layer weight during serialization."""
def __init__(self, lookup_layer):
self._layer = lookup_layer
self._dtype = lookup_layer.vocabulary_dtype
self._distribute_strategy = tf.distribute.get_strategy()
@property
def num_tensors(self):
return 1
def set_weights(self, weights):
tokens = tf.convert_to_tensor(weights[0], self._dtype)
self._layer.lookup_table = self._layer._lookup_table_from_tokens(tokens) # pylint: disable=protected-access
def get_tensors(self):
# Just save the non-config part of the vocab (no special tokens).
tokens = self._layer.get_vocabulary(include_special_tokens=False)
tokens = tf.convert_to_tensor(tokens, self._dtype)
return [tokens]
class IndexLookup(base_preprocessing_layer.PreprocessingLayer):
"""Maps values from a vocabulary to integer indices.
This layer translates a set of arbitrary hashables into an integer output via
a table-based lookup, with optional out-of-vocabulary handling. This is the
basis layer for both IntegerLookup and StringLookup; it holds the common
logic but is not intended to be exported as part of the Keras API.
Args:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this size
includes the OOV and mask tokens.
num_oov_indices: The number of out-of-vocabulary tokens to use. If this
value is more than 1, OOV inputs are hashed to determine their OOV value.
If this value is 0, OOV inputs will cause an error when calling the layer.
mask_token: A token that represents masked inputs. When `output_mode` is
`"int"`, the token is included in vocabulary and mapped to index 0. In
other output modes, the token will not appear in the vocabulary and
instances of the mask token in the input will be dropped. If set to None,
no mask term will be added.
oov_token: Only used when `invert` is True. The token to return for OOV
indices.
vocabulary: Optional. Either an array or a string path to a text file. If
passing an array, can pass a tuple, list, 1D numpy array, or 1D tensor
containing the vocbulary terms. If passing a file path, the file should
contain one line per term in the vocabulary. If this argument is set,
there is no need to `adapt` the layer.
vocabulary_dtype: The dtype of the vocabulary terms. For example, `"int64"`
or `"string"`.
idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list, 1D
numpy array, or 1D tensor or the same length as the vocabulary, containing
the floating point inverse document frequency weights, which will be
multiplied by per sample term counts for the final `tf_idf` weight. If the
`vocabulary` argument is set, and `output_mode` is `"tf_idf"`, this
argument must be supplied.
invert: Only valid when `output_mode` is `"int"`. If True, this layer will
map indices to vocabulary items instead of mapping vocabulary items to
indices. Default to False.
output_mode: Specification for the output of the layer. Defaults to `"int"`.
Values can be `"int"`, `"one_hot"`, `"multi_hot"`, `"count"`, or
`"tf_idf"` configuring the layer as follows:
- `"int"`: Return the raw integer indices of the input tokens.
- `"one_hot"`: Encodes each individual element in the input into an
array the same size as the vocabulary, containing a 1 at the element
index. If the last dimension is size 1, will encode on that dimension.
If the last dimension is not size 1, will append a new dimension for
the encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single array
the same size as the vocabulary, containing a 1 for each vocabulary
term present in the sample. Treats the last dimension as the sample
dimension, if input shape is (..., sample_length), output shape will
be (..., num_tokens).
- `"count"`: As `"multi_hot"`, but the int array contains a count of the
number of times the token at that index appeared in the sample.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is applied to
find the value in each token slot.
pad_to_max_tokens: Only valid when `output_mode` is `"multi_hot"`,
`"count"`, or `"tf_idf"`. If True, the output will have its feature axis
padded to `max_tokens` even if the number of unique tokens in the
vocabulary is less than max_tokens, resulting in a tensor of shape
[batch_size, max_tokens] regardless of vocabulary size. Defaults to False.
sparse: Boolean. Only applicable to `"one_hot"`, `"multi_hot"`, `"count"`
and `"tf-idf"` output modes. If True, returns a `SparseTensor` instead of
a dense `Tensor`. Defaults to False.
"""
def __init__(self,
max_tokens,
num_oov_indices,
mask_token,
oov_token,
vocabulary_dtype,
vocabulary=None,
idf_weights=None,
invert=False,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
**kwargs):
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens <= 1:
raise ValueError(f"If set, `max_tokens` must be greater than 1. "
f"Received: max_tokens={max_tokens}")
if pad_to_max_tokens and max_tokens is None:
raise ValueError(f"If pad_to_max_tokens is True, must set `max_tokens`. "
f"Received: max_tokens={max_tokens}")
if num_oov_indices < 0:
raise ValueError(f"`num_oov_indices` must be greater than or equal to 0. "
f"Received: num_oov_indices={num_oov_indices}")
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = MULTI_HOT
if output_mode == "tf-idf":
output_mode = TF_IDF
# 'output_mode' must be one of (INT, ONE_HOT, MULTI_HOT, COUNT, TF_IDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, ONE_HOT, MULTI_HOT, COUNT, TF_IDF),
layer_name=self.__class__.__name__,
arg_name="output_mode")
if invert and output_mode != INT:
raise ValueError(f"`output_mode` must be `'int'` when `invert` is true. "
f"Received: output_mode={output_mode}")
if sparse and output_mode == INT:
raise ValueError(f"`sparse` may only be true if `output_mode` is "
f"`'one_hot'`, `'multi_hot'`, `'count'` or `'tf_idf'`. "
f"Received: sparse={sparse} and "
f"output_mode={output_mode}")
if idf_weights is not None and output_mode != TF_IDF:
raise ValueError(f"`idf_weights` should only be set if `output_mode` is "
f"`'tf_idf'`. Received: idf_weights={idf_weights} and "
f"output_mode={output_mode}")
self.invert = invert
self.max_tokens = max_tokens
self.num_oov_indices = num_oov_indices
self.mask_token = mask_token
self.oov_token = oov_token
self.output_mode = output_mode
self.sparse = sparse
self.pad_to_max_tokens = pad_to_max_tokens
self.vocabulary_dtype = vocabulary_dtype
self._frozen_vocab_size = None
self.input_vocabulary = vocabulary
self.input_idf_weights = idf_weights
# VocabularySavedModelSaver will clear the config vocabulary to restore the
# lookup table ops directly. We persist this hidden option to persist the
# fact that we have have a non-adaptable layer with a manually set vocab.
self._has_input_vocabulary = kwargs.pop("has_input_vocabulary",
(vocabulary is not None))
# Drop deprecated config options.
kwargs.pop("vocabulary_size", None)
kwargs.pop("has_static_table", None)
# By default, output int64 when output_mode='int' and floats otherwise.
if "dtype" not in kwargs:
kwargs["dtype"] = tf.int64 if output_mode == INT else backend.floatx()
super().__init__(**kwargs)
# Check dtype only after base layer parses it; dtype parsing is complex.
if output_mode == INT and not tf.as_dtype(self.compute_dtype).is_integer:
input_dtype = kwargs["dtype"]
raise ValueError("When `output_mode='int'`, `dtype` should be an integer "
f"type. Received: dtype={input_dtype}")
if invert:
self._key_dtype = self.dtype if output_mode == INT else tf.int64
self._value_dtype = tf.as_dtype(self.vocabulary_dtype)
mask_key = 0
mask_value = mask_token
self._default_value = self.oov_token
else:
self._key_dtype = tf.as_dtype(self.vocabulary_dtype)
self._value_dtype = self.dtype if output_mode == INT else tf.int64
mask_key = mask_token
# Masks should map to 0 for int output and be dropped otherwise. Max ints
# will be dropped from the bincount op.
mask_value = 0 if self.output_mode == INT else self._value_dtype.max
if self.num_oov_indices == 0:
# If there are no OOV indices, we map OOV tokens to -1 and error out
# during call if we find a negative index.
self._default_value = -1
elif self.num_oov_indices == 1:
# If there is only one OOV index, we can set that index as the default
# value of the index_lookup table.
self._default_value = self._oov_start_index()
else:
# If we hav multiple OOV values, we need to do a further hashing step;
# to make this easier, we set the OOV value to -1. (This lets us do a
# vectorized add and cast to boolean to determine locations where we
# need to do extra hashing.)
self._default_value = -1
if self.mask_token is not None:
self._mask_key = tf.convert_to_tensor(mask_key, self._key_dtype)
self._mask_value = tf.convert_to_tensor(mask_value, self._value_dtype)
if self.output_mode == TF_IDF:
self.idf_weights = tf.Variable(
[0] * self._token_start_index(),
shape=(None,),
dtype=self.compute_dtype,
trainable=False)
self.idf_weights_const = self.idf_weights.value()
if vocabulary is not None:
self.set_vocabulary(vocabulary, idf_weights)
else:
# When restoring from a keras SavedModel, the loading code will expect to
# find and restore a lookup_table attribute on the layer. This table needs
# to be uninitialized as a StaticHashTable cannot be initialized twice.
self.lookup_table = self._uninitialized_lookup_table()
# Only set up adapt state if we did not recieve a vocab on construction.
if not self._has_input_vocabulary:
# Add a custom weight handler to return the layers vocab as it's weight.
self._add_trackable(VocabWeightHandler(self), False)
# Set adapt state.
self.token_counts = tf.lookup.experimental.MutableHashTable(
key_dtype=vocabulary_dtype, value_dtype=tf.int64, default_value=0)
if self.output_mode == TF_IDF:
self.token_document_counts = tf.lookup.experimental.MutableHashTable(
key_dtype=vocabulary_dtype, value_dtype=tf.int64, default_value=0)
self.num_documents = tf.Variable(0, dtype=tf.int64, trainable=False)
def compute_output_shape(self, input_shape):
if self.output_mode == INT:
return input_shape
depth = (
self.max_tokens if self.pad_to_max_tokens else self._frozen_vocab_size)
return tf.TensorShape([input_shape[0], depth])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = self.vocabulary_dtype if self.invert else self.compute_dtype
return tf.TensorSpec(shape=output_shape, dtype=output_dtype)
def get_vocabulary(self, include_special_tokens=True):
"""Returns the current vocabulary of the layer.
Args:
include_special_tokens: If True, the returned vocabulary will include mask
and OOV tokens, and a term's index in the vocabulary will equal the
term's index when calling the layer. If False, the returned vocabulary
will not include any mask or OOV tokens.
"""
# The lookup table data will not be sorted, so we will create a inverted
# lookup here, and use that to lookup a range of indices [0, vocab_size).
if self.lookup_table.size() == 0:
vocab, indices = [], []
else:
keys, values = self.lookup_table.export()
vocab, indices = (values, keys) if self.invert else (keys, values)
vocab, indices = (self._tensor_vocab_to_numpy(vocab), indices.numpy())
lookup = collections.defaultdict(lambda: self.oov_token,
zip(indices, vocab))
vocab = [lookup[x] for x in range(self.vocabulary_size())]
if self.mask_token is not None and self.output_mode == INT:
vocab[0] = self.mask_token
if not include_special_tokens:
vocab = vocab[self._token_start_index():]
return vocab
def vocabulary_size(self):
"""Gets the current size of the layer's vocabulary.
Returns:
The integer size of the voculary, including optional mask and oov indices.
"""
return int(self.lookup_table.size().numpy()) + self._token_start_index()
def vocab_size(self):
logging.warning("vocab_size is deprecated, please use vocabulary_size.")
return self.vocabulary_size()
def get_config(self):
config = {
"invert": self.invert,
"max_tokens": self.max_tokens,
"num_oov_indices": self.num_oov_indices,
"oov_token": self.oov_token,
"mask_token": self.mask_token,
"output_mode": self.output_mode,
"pad_to_max_tokens": self.pad_to_max_tokens,
"vocabulary": utils.listify_tensors(self.input_vocabulary),
"vocabulary_dtype": self.vocabulary_dtype,
"idf_weights": utils.listify_tensors(self.input_idf_weights),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def set_vocabulary(self, vocabulary, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and idf weights for this layer directly,
instead of analyzing a dataset through `adapt`. It should be used whenever
the vocab (and optionally document frequency) information is already known.
If vocabulary data is already present in the layer, this method will replace
it.
Args:
vocabulary: Either an array or a string path to a text file. If passing an
array, can pass a tuple, list, 1D numpy array, or 1D tensor containing
the vocbulary terms. If passing a file path, the file should contain one
line per term in the vocabulary.
idf_weights: A tuple, list, 1D numpy array, or 1D tensor of inverse
document frequency weights with equal length to vocabulary. Must be set
if `output_mode` is `"tf_idf"`. Should not be set otherwise.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when `"multi_hot"`, `"count"`, and `"tf_idf"`
modes, if `pad_to_max_tokens` is False and the layer itself has already
been called.
RuntimeError: If a tensor vocabulary is passed outside of eager execution.
"""
if self.output_mode != TF_IDF and idf_weights is not None:
raise ValueError(f"`idf_weights` should only be set if output_mode is "
f"`'tf_idf'`. Received: output_mode={self.output_mode} "
f"and idf_weights={idf_weights}")
if isinstance(vocabulary, str):
if not tf.io.gfile.exists(vocabulary):
raise ValueError(
"Vocabulary file {} does not exist.".format(vocabulary))
if self.output_mode == TF_IDF:
raise ValueError("output_mode `'tf_idf'` does not support loading a "
"vocabulary from file.")
self.lookup_table = self._lookup_table_from_file(vocabulary)
return
if not tf.executing_eagerly() and (tf.is_tensor(vocabulary) or
tf.is_tensor(idf_weights)):
raise RuntimeError(
"Cannot set a tensor vocabulary on {} layer {} when not executing "
"eagerly. Create this layer or call `set_vocabulary` outside of "
"any `tf.function`s and with eager execution enabled.".format(
self.__class__.__name__, self.name))
# TODO(mattdangerw): for better performance we should rewrite this entire
# function to operate on tensors and convert vocabulary to a tensor here.
if tf.is_tensor(vocabulary):
vocabulary = self._tensor_vocab_to_numpy(vocabulary)
elif isinstance(vocabulary, (list, tuple)):
vocabulary = np.array(vocabulary)
if tf.is_tensor(idf_weights):
idf_weights = idf_weights.numpy()
elif isinstance(idf_weights, (list, tuple)):
idf_weights = np.array(idf_weights)
if vocabulary.size == 0:
raise ValueError(
"Cannot set an empty vocabulary, you passed {}.".format(vocabulary))
oov_start = self._oov_start_index()
token_start = self._token_start_index()
special_tokens = (
[self.mask_token] * oov_start + [self.oov_token] * self.num_oov_indices)
found_special_tokens = np.array_equal(
special_tokens, vocabulary[:token_start])
if found_special_tokens:
tokens = vocabulary[token_start:]
else:
tokens = vocabulary
repeated_tokens = self._find_repeated_tokens(tokens)
if repeated_tokens:
raise ValueError("The passed vocabulary has at least one repeated "
"term. Please uniquify your dataset. The repeated terms "
"are {}".format(repeated_tokens))
if self.mask_token is not None and self.mask_token in tokens:
mask_index = np.argwhere(vocabulary == self.mask_token)[-1]
raise ValueError(
"Found reserved mask token at unexpected location in `vocabulary`. "
"Note that passed `vocabulary` does not need to include the OOV and "
"mask tokens. Either remove all mask and OOV tokens, or include them "
"only at the start of the vocabulary in precisely this order: "
f"{special_tokens}. Received: mask_token={self.mask_token} at "
f"vocabulary index {mask_index}")
# Only error out for oov_token when invert=True. When invert=False,
# oov_token is unused during lookup.
if self.oov_token is not None and self.invert and self.oov_token in tokens:
oov_index = np.argwhere(vocabulary == self.oov_token)[-1]
raise ValueError(
"Found reserved OOV token at unexpected location in `vocabulary`. "
"Note that passed `vocabulary` does not need to include the OOV and "
"mask tokens. Either remove all mask and OOV tokens, or include them "
"only at the start of the vocabulary in precisely this order: "
f"{special_tokens}. Received: oov_token={self.oov_token} at "
f"vocabulary index {oov_index}")
new_vocab_size = token_start + len(tokens)
if self.max_tokens is not None and (new_vocab_size > self.max_tokens):
raise ValueError(
"Attempted to set a vocabulary larger than the maximum vocab size. "
"Passed vocab size is {}, max vocab size is {}.".format(
new_vocab_size, self.max_tokens))
self.lookup_table = self._lookup_table_from_tokens(tokens)
if self.output_mode == TF_IDF:
if idf_weights is None:
raise ValueError("`idf_weights` must be set if output_mode is TF_IDF")
if len(vocabulary) != len(idf_weights):
raise ValueError("`idf_weights` must be the same length as vocabulary. "
"len(idf_weights) is {}, len(vocabulary) is {}".format(
len(vocabulary), len(idf_weights)))
idf_weights = self._convert_to_ndarray(idf_weights)
if idf_weights.ndim != 1:
raise ValueError(
"TF-IDF data must be a 1-index array, but received {}".format(
type(idf_weights)))
# If the passed vocabulary has no special tokens, we need to pad the front
# of idf_weights. We don't have real document frequencies for these tokens
# so we will use an average of all idf_weights passed in as a reasonable
# default.
if found_special_tokens:
front_padding = 0
front_padding_value = 0
else:
front_padding = token_start
front_padding_value = np.average(idf_weights)
# If pad_to_max_tokens is true, and max_tokens is greater than our total
# vocab size, we need to pad the back of idf_weights with zeros as well.
back_padding_value = 0
if self.pad_to_max_tokens and self.max_tokens is not None:
back_padding = self.max_tokens - front_padding - len(idf_weights)
else:
back_padding = 0
weights = np.pad(
idf_weights, (front_padding, back_padding),
"constant",
constant_values=(front_padding_value, back_padding_value))
weights = tf.convert_to_tensor(weights, dtype=backend.floatx())
self.idf_weights.assign(weights)
self.idf_weights_const = self.idf_weights.value()
def update_state(self, data):
if self._has_input_vocabulary:
raise ValueError(
"Cannot adapt {} layer after setting a static vocabulary via init "
"argument or `set_vocabulary`.".format(self.__class__.__name__))
data = self._standardize_inputs(data, self.vocabulary_dtype)
if data.shape.rank == 0:
data = tf.expand_dims(data, -1)
if data.shape.rank == 1:
data = tf.expand_dims(data, -1)
tokens, counts = self._num_tokens(data)
self.token_counts.insert(tokens, counts + self.token_counts.lookup(tokens))
if self.output_mode == TF_IDF:
# Dedupe each row of our dataset.
deduped_doc_data = tf.map_fn(lambda x: tf.unique(x)[0], data)
# Flatten and count tokens.
tokens, doc_counts = self._num_tokens(deduped_doc_data)
self.token_document_counts.insert(
tokens, doc_counts + self.token_document_counts.lookup(tokens))
if tf_utils.is_ragged(data):
self.num_documents.assign_add(data.nrows())
else:
self.num_documents.assign_add(tf.shape(data, out_type=tf.int64)[0])
def finalize_state(self):
if self._has_input_vocabulary or tf.equal(self.token_counts.size(), 0):
# Finalize idf_weights to a const for call even if we don't need to
# compute a new vocabulary.
if self.output_mode == TF_IDF:
self.idf_weights_const = self.idf_weights.value()
return
# Remove special tokens from our counts.
if self.mask_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.mask_token], self.vocabulary_dtype))
if self.oov_token is not None:
self.token_counts.remove(
tf.convert_to_tensor([self.oov_token], self.vocabulary_dtype))
tokens, counts = self.token_counts.export()
# To keep vocabs deterministic, we sort our tokens by count and break ties
# by sorting the tokens themselves. Tensorflow has no ops for sorting
# strings, so we need to use numpy for the sort.
sorted_indices = np.lexsort((tokens.numpy(), counts.numpy()))[::-1]
token_start = self._token_start_index()
if self.max_tokens:
max_learned_tokens = self.max_tokens - token_start
sorted_indices = sorted_indices[:max_learned_tokens]
tokens = tf.gather(tokens, sorted_indices)
self.lookup_table = self._lookup_table_from_tokens(tokens)
if self.output_mode == TF_IDF:
token_document_counts = self.token_document_counts.lookup(tokens)
idf_weights = self._inverse_document_frequency(token_document_counts,
self.num_documents)
idf_weights = tf.cast(idf_weights, backend.floatx())
# Pad the front of idf_weights with the average idf weight for OOV tokens.
# We cannot compute the real idf weight of OOV in a single pass.
idf_weights = tf.pad(
idf_weights, [[self._token_start_index(), 0]],
constant_values=tf.reduce_mean(idf_weights))
self.idf_weights.assign(idf_weights)
self.idf_weights_const = self.idf_weights.value()
# We call this here to save memory, now that we've built our vocabulary, we
# don't want to keep every token we've seen in separate lookup tables.
self.reset_state()
def reset_state(self): # pylint: disable=method-hidden
if self._has_input_vocabulary:
return
self.token_counts.remove(self.token_counts.export()[0])
if self.output_mode == TF_IDF:
self.token_document_counts.remove(self.token_document_counts.export()[0])
self.num_documents.assign(0)
def call(self, inputs):
self._maybe_freeze_vocab_size()
inputs = self._standardize_inputs(inputs, self._key_dtype)
original_shape = inputs.shape
# Some ops will not handle scalar input, so uprank to rank 1.
if inputs.shape.rank == 0:
inputs = self._expand_dims(inputs, -1)
if tf_utils.is_sparse(inputs):
lookups = tf.SparseTensor(inputs.indices,
self._lookup_dense(inputs.values),
inputs.dense_shape)
elif tf_utils.is_ragged(inputs):
lookups = tf.ragged.map_flat_values(self._lookup_dense, inputs)
else:
lookups = self._lookup_dense(inputs)
if self.output_mode == INT:
# If we received a scalar input, downrank back to a scalar.
if original_shape.rank == 0:
lookups = tf.squeeze(lookups, -1)
return lookups
depth = (
self.max_tokens if self.pad_to_max_tokens else self._frozen_vocab_size)
idf_weights = self.idf_weights_const if self.output_mode == TF_IDF else None
return utils.encode_categorical_inputs(
lookups,
output_mode=self.output_mode,
depth=depth,
dtype=self.compute_dtype,
sparse=self.sparse,
idf_weights=idf_weights)
def _lookup_dense(self, inputs):
"""Lookup table values for a dense Tensor, handling masking and OOV."""
# When executing eagerly and tracing keras.Inputs, do not call lookup. This
# is critical for restoring SavedModel, which will first trace layer.call
# and then attempt to restore the table. We need the table to be unitialized
# for the restore to work, but calling the table unitialized would error.
if tf.executing_eagerly() and backend.is_keras_tensor(inputs):
lookups = tf.zeros_like(inputs, dtype=self._value_dtype)
else:
lookups = self.lookup_table.lookup(inputs)
if self.mask_token is not None:
mask_locations = tf.equal(inputs, self._mask_key)
lookups = tf.where(mask_locations, self._mask_value, lookups)
if self.invert:
return lookups
lookup_checks = []
if self.num_oov_indices == 0:
# If we have zero oov indices, we need to check for oov inputs.
oov_indices = tf.where(tf.equal(lookups, -1))
oov_inputs = tf.gather_nd(inputs, oov_indices)
msg = tf.strings.format(
"When `num_oov_indices=0` all inputs should be in vocabulary, "
"found OOV values {}, consider setting `num_oov_indices=1`.",
(oov_inputs,))
assertion = tf.Assert(tf.equal(tf.size(oov_indices), 0), [msg])
lookup_checks.append(assertion)
elif self.num_oov_indices > 1:
# If we have multiple oov indices, we need a further hashing step.
if self._key_dtype.is_integer:
oov_indices = tf.math.floormod(inputs, self.num_oov_indices)
else:
oov_indices = tf.strings.to_hash_bucket_fast(
inputs, num_buckets=self.num_oov_indices)
oov_indices = oov_indices + self._oov_start_index()
oov_locations = tf.equal(lookups, self._default_value)
lookups = tf.where(oov_locations, oov_indices, lookups)
with tf.control_dependencies(lookup_checks):
return tf.identity(lookups)
def _uninitialized_lookup_table(self):
with tf.init_scope():
initializer = NullInitializer(self._key_dtype, self._value_dtype)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _lookup_table_from_tokens(self, tokens):
with tf.init_scope():
token_start = self._token_start_index()
token_end = token_start + tf.size(tokens)
indices_dtype = self._key_dtype if self.invert else self._value_dtype
indices = tf.range(token_start, token_end, dtype=indices_dtype)
keys, values = (indices, tokens) if self.invert else (tokens, indices)
initializer = tf.lookup.KeyValueTensorInitializer(keys, values,
self._key_dtype,
self._value_dtype)
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _lookup_table_from_file(self, filename):
if self.invert:
key_index = tf.lookup.TextFileIndex.LINE_NUMBER
value_index = tf.lookup.TextFileIndex.WHOLE_LINE
else:
key_index = tf.lookup.TextFileIndex.WHOLE_LINE
value_index = tf.lookup.TextFileIndex.LINE_NUMBER
with tf.init_scope():
initializer = tf.lookup.TextFileInitializer(
filename=filename,
key_dtype=self._key_dtype,
key_index=key_index,
value_dtype=self._value_dtype,
value_index=value_index,
value_index_offset=self._token_start_index())
return tf.lookup.StaticHashTable(initializer, self._default_value)
def _standardize_inputs(self, inputs, dtype):
if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor, tf.SparseTensor)):
inputs = tf.convert_to_tensor(inputs, dtype)
elif inputs.dtype != dtype:
inputs = tf.cast(inputs, dtype)
return inputs
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def _expand_dims(self, inputs, axis):
if tf_utils.is_sparse(inputs):
return tf.sparse.expand_dims(inputs, axis)
else:
return tf.expand_dims(inputs, axis)
def _oov_start_index(self):
return 1 if self.mask_token is not None and self.output_mode == INT else 0
def _token_start_index(self):
return self._oov_start_index() + self.num_oov_indices
def _maybe_freeze_vocab_size(self):
if self.output_mode == INT or self.pad_to_max_tokens:
return
with tf.init_scope():
if not tf.executing_eagerly():
raise RuntimeError(
"When using `output_mode={}` eager execution must be enabled."
.format(self.output_mode))
new_vocab_size = self.vocabulary_size()
if new_vocab_size == self._token_start_index():
raise RuntimeError(
"When using `output_mode={}` and `pad_to_max_tokens=False`, you "
"must set the layer's vocabulary before calling it. Either pass "
"a `vocabulary` argument to the layer, or call `adapt` with some "
"sample data.".format(self.output_mode))
elif (self._frozen_vocab_size is not None and
new_vocab_size != self._frozen_vocab_size):
raise RuntimeError(
"When using `output_mode={}` and `pad_to_max_tokens=False`, the "
"vocabulary size cannot be changed after the layer is called. "
"Vocab size is {}, new vocab size is {}".format(
self.output_mode, self._frozen_vocab_size, new_vocab_size))
self._frozen_vocab_size = new_vocab_size
def _find_repeated_tokens(self, vocabulary):
"""Return all repeated tokens in a vocabulary."""
vocabulary_set = set(vocabulary)
if len(vocabulary) != len(vocabulary_set):
return [
item for item, count in collections.Counter(vocabulary).items()
if count > 1
]
else:
return []
def _num_tokens(self, data):
"""Count the number of tokens in a ragged, sparse or dense tensor."""
if tf_utils.is_sparse(data):
flat_values = data.values
elif tf_utils.is_ragged(data):
flat_values = data.flat_values
else:
flat_values = tf.reshape(data, [-1])
tokens, _, counts = tf.unique_with_counts(flat_values, out_idx=tf.int64)
return tokens, counts
def _inverse_document_frequency(self, token_document_counts, num_documents):
"""Computes the inverse-document-frequency (IDF) component of "tf_idf".
Uses the default weighting scheme described in
https://en.wikipedia.org/wiki/Tf%E2%80%93idf.
Args:
token_document_counts: An array of the # of documents each token appears
in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights.
"""
return tf.math.log(1 + num_documents / (1 + token_document_counts))
@property
def _trackable_saved_model_saver(self):
return layer_serialization.VocabularySavedModelSaver(self)
# Override points for IntegerLookup and StringLookup.
def _tensor_vocab_to_numpy(self, vocabulary):
"""Converts a tensor vocabulary to a numpy vocabulary."""
return vocabulary.numpy()
|
the-stack_0_5460 | """
1249. Minimum Remove to Make Valid Parentheses
Given a string s of '(' , ')' and lowercase English characters.
Your task is to remove the minimum number of parentheses ( '(' or ')', in any positions ) so that the resulting parentheses string is valid and return any valid string.
Formally, a parentheses string is valid if and only if:
It is the empty string, contains only lowercase characters, or
It can be written as AB (A concatenated with B), where A and B are valid strings, or
It can be written as (A), where A is a valid string.
Example 1:
Input: s = "lee(t(c)o)de)"
Output: "lee(t(c)o)de"
Explanation: "lee(t(co)de)" , "lee(t(c)ode)" would also be accepted.
Example 2:
Input: s = "a)b(c)d"
Output: "ab(c)d"
Example 3:
Input: s = "))(("
Output: ""
Explanation: An empty string is also valid.
Example 4:
Input: s = "(a(b(c)d)"
Output: "a(b(c)d)"
Constraints:
1 <= s.length <= 10^5
s[i] is one of '(' , ')' and lowercase English letters.
"""
class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
stack, i, invalids = [], 0, set()
while i < len(s):
if s[i] in ['(', ')']:
invalids.add(i)
if s[i] == '(':
stack.append(i)
else:
if stack:
invalids.remove(stack.pop())
invalids.remove(i)
i += 1
res = []
for i in range(len(s)):
if i not in invalids:
res.append(s[i])
return ''.join(res)
class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
right = s.count(')')
left = 0
result = ""
for item in s:
if item == '(':
if right == 0:
continue
else:
left += 1
right -= 1
if item == ')':
if left == 0:
right -= 1
continue
else:
left -= 1
result += item
return result |
the-stack_0_5462 | print('='*30)
print(' ANALISANDO VALORES')
print('='*30)
op = 0
while op!= 5:
n1 = float(input('Digite o 1º valor: '))
n2 = float(input('Digite o 2º valor: '))
op = int(input('''[1] SOMAR \n[2] MULTIPLICAR \n[3] MAIOR \n[4] NOVOS NÚMEROS \n[5] SAIR \nOpção desejada: '''))
if op == 4:
while op == 4:
n1 = float(input('Digite o 1º valor: '))
n2 = float(input('Digite o 2º valor: '))
op = int(input('Escolha uma nova opção'))
print('')
while op < 1 or op > 5:
op = int(input('Escolha uma opção válida: '))
if op == 1:
print(f'A soma entre os valores {n1} e {n2} é {n1+n2}')
if op == 2:
print(f'A multiplicação entre os valores {n1} e {n2} é {n1 * n2}')
if op == 3:
if n1 == n2:
print('Os números digitados são iguais')
else:
if n1 > n2:
print(f'O número maior é {n1}')
else:
print(f'O número maior é {n2}')
print('')
|
the-stack_0_5464 | from .base_entity import BaseEntity
from psutil import net_io_counters, net_connections
class Network(BaseEntity):
""" A simple object to return network usage """
@property
def get_usage(self):
return self.__get_net_usage()
def __get_net_usage(self):
n = net_io_counters()
nc = net_connections(kind='all')
ret = {
"bytes_sent": n.bytes_sent,
"bytes_recv": n.bytes_recv,
"packets_sent": n.packets_sent,
"packets_recv": n.packets_recv,
"dropin": n.dropin,
"dropout": n.dropout,
"errin": n.errin,
"errout": n.errout,
"num_connections": len(nc),
}
return ret
|
the-stack_0_5466 | from datadog import initialize, api
from datadog.api.constants import CheckStatus
options = {'api_key': '<YOUR_API_KEY>',
'app_key': '<YOUR_APP_KEY>'}
initialize(**options)
check = 'app.ok'
host = 'app1'
status = CheckStatus.OK # equals 0
api.ServiceCheck.check(check=check, host_name=host, status=status,
message='Response: 200 OK')
|
the-stack_0_5467 | #!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
# Allows recording of tied matches.
# Matches opponents of relative standings.
# Pairs players in unique matches.
#
# TODO: implement match byes
# TODO: implement pairing for odd number of players
# TODO: implement Opponent match win tie breaker algorithm
# TODO: implement tournament tracking
#
import psycopg2
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament")
def deleteMatches():
"""Remove all the match records from the database."""
dbh = connect()
sth = dbh.cursor()
sth.execute("TRUNCATE TABLE matches")
dbh.commit()
dbh.close()
def deletePlayers():
"""Remove all the player records from the database."""
dbh = connect()
sth = dbh.cursor()
sth.execute("TRUNCATE TABLE players CASCADE")
dbh.commit()
dbh.close()
def countPlayers():
"""Returns the number of players currently registered."""
dbh = connect()
sth = dbh.cursor()
sth.execute("SELECT count(players) FROM players")
result = sth.fetchone()
dbh.commit()
dbh.close()
return result[0]
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
dbh = connect()
sth = dbh.cursor()
query = "INSERT INTO players (name) VALUES (%s)"
values = [name]
sth.execute(query, values)
dbh.commit()
dbh.close()
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
dbh = connect()
sth = dbh.cursor()
query = '''
SELECT * FROM standings
'''
sth.execute(query)
result = sth.fetchall()
dbh.commit()
dbh.close()
return result
def reportMatch(winner, challenger, tied=None):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
challenger: the id number of the player who lost
"""
dbh = connect()
sth = dbh.cursor()
query = "INSERT INTO matches (winner_id, challenger_id, tie) VALUES (%s, %s, %s)"
values = [winner, challenger, tied]
sth.execute(query, values)
dbh.commit()
dbh.close()
def getPlayerOpponents():
"""Returns list of opponents for all players
Returns:
A list of tuples, each of which contains (id, list)
id: player's unique id
list: list of opponent id
"""
dbh = connect()
sth = dbh.cursor()
query = '''
SELECT
opponents.id,
array_agg(challenger_id) AS challenger_id_list
FROM opponents
GROUP BY opponents.id
'''
sth.execute(query)
result = sth.fetchall()
dbh.commit()
dbh.close()
return result
def getStandingGroups():
"""Returns a list of standings grouped by win, tie, loss
Assuming standings are provided ordered by (win, match, tie), each standings
group contains players with equivalent standings
Returns:
A list of sets of tuples, each of which contains (id, name)
id: player's unique ID
name: player's name
"""
standings = playerStandings()
standings_groups = []
group = set()
# set initial standings
(win, match, tie) = standings[0][2:5]
for player in standings:
# test if player standings does not match current standings
if ((win, match, tie) != player[2:5]):
# append current player group to the standings group
standings_groups.append(group.copy())
# set new standings
(win, match, tie) = player[2:5]
# reset group
group.clear()
# add (player id, player name) to group of players
group.add(player[0:2])
# add last group to standings_groups
standings_groups.append(group.copy())
return standings_groups
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
# reduce opponents to a dictionary of player_id and the set of their
# previously played opponent_id
opponents = {}
for (id, cid_list) in getPlayerOpponents():
opponents[id] = set(cid_list)
standings_groups = getStandingGroups()
pending_players = set()
pending_players.update(set(standings_groups.pop(0)))
pairs = []
player = None
challenger = None
while len(pending_players) > 0:
player = pending_players.pop()
# if no more pending players add players from next group
if len(pending_players) == 0 and len(standings_groups) > 0:
pending_players.update(set(standings_groups.pop(0)))
challenger = pending_players.pop()
if len(pending_players) == 0 and len(standings_groups) > 0:
pending_players.update(set(standings_groups.pop(0)))
if challenger[0] in opponents[player[0]]:
new_challenger = pending_players.pop()
pending_players.add(challenger)
challenger = new_challenger
pairs.append((player[0], player[1], challenger[0], challenger[1]))
return pairs
|
the-stack_0_5468 | #
# ARCADIA Mocks
#
# Copyright (C) 2017 SINTEF Digital
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
from requests import Request, Session
from requests.exceptions import ConnectionError
from time import sleep
from arcadiamock.adapters import XMLParser, XMLPrinter
class Client(object):
DEFAULT_HOST_NAME = "localhost"
DEFAULT_PORT = 5000
def __init__(self, hostname=None, port=None):
self._hostname = hostname or self.DEFAULT_HOST_NAME
self._port = port or self.DEFAULT_PORT
self._headers = {
"accept": "application/xml"
}
self._parse = XMLParser()
self._formatter = XMLPrinter()
def register_service_graph(self, service_graph):
xml = service_graph.accept(self._formatter)
response = self._fetch(resource=self._url_of("/register"),
method="POST",
payload=xml.as_text())
response.raise_for_status()
def component_with_CNID(self, cnid):
resource = "/components/{0}".format(cnid)
url = self._url_of(resource)
response = self._fetch(url)
return self._parse.component_from(response.text)
def service_graphs(self):
response = self._fetch(self._url_of("/service_graphs"))
return self._parse.service_graphs_from(response.text)
def components(self):
response = self._fetch(self._url_of("/components"))
return self._parse.components_from(response.text)
def register_component(self, component):
xml = component.accept(self._formatter)
response = self._fetch(resource=self._url_of("/register_component"),
method="POST",
payload=xml.as_text())
response.raise_for_status()
def about(self):
response = self._fetch(self._url_of("/about"))
return self._parse.about_from(response.text)
def _url_of(self, page):
return self._base_url + page
@property
def _base_url(self):
URL = "http://{hostname}:{port}"
return URL.format(
hostname=self._hostname,
port=self._port)
def _fetch(self, resource, method="GET", payload=None):
attempt = self.MAX_ATTEMPTS
while attempt >= 0:
try:
attempt -= 1
request = Request(method, resource, headers=self._headers, data=payload)
return Session().send(request.prepare())
except ConnectionError:
sleep(self.DELAY)
message = self.ERROR_CANNOT_GET_PAGE.format(page=page,
attempts=self.MAX_ATTEMPTS)
raise RuntimeError(message)
MAX_ATTEMPTS = 3
DELAY = 5
ERROR_CANNOT_GET_PAGE = "Cannot access '{page}' ({attempts} attempts)."
|
the-stack_0_5470 | # ---------------------------
# Alexander Camuto, Matthew Willetts -- 2019
# The University of Oxford, The Alan Turing Institute
# contact: [email protected], [email protected]
# ---------------------------
"""Functions to preprocess SVHN data
"""
import numpy as np
import tensorflow as tf
import os
import sys
import shutil
import zipfile
import scipy.misc
import scipy.io as sio
import pickle as Pkl
import gzip, tarfile
import re, string, fnmatch
import urllib.request
def data_generator_train(x, y, batch_size):
"""
Generates an infinite sequence of data
Args:
x: training data
y: training labels
batch_size: batch size to yield
Yields:
tuples of x,y pairs each of size batch_size
"""
num = x.shape[0]
while True:
# --- Randomly select batch_size elements from the training set
idx = np.random.choice(list(range(num)), batch_size, replace=False)
# idx = np.random.randint(0, num, batch_size)
x_batch = x[idx]
y_batch = y[idx]
# --- Now yield
yield (x_batch, y_batch)
def data_generator_eval(x, y, batch_size):
"""
Generates an infinite sequence of test data
Args:
x: test data
y: test labels
batch_size: batch size to yield
Yields:
tuples of x,y pairs each of size batch_size
"""
num = x.shape[0]
n_batches = int(num / batch_size)
for i in range(n_batches):
idx = list(range(i * batch_size, (i + 1) * batch_size))
x_batch = x[idx]
y_batch = y[idx]
yield (x_batch, y_batch)
def build_input_fns(params, extra=False):
"""Builds an Iterator switching between train and heldout data."""
x_train, y_train, x_test, y_test = load_svhn(dataset=params["data_dir"],
extra=extra)
#
# x_train, y_train = x_train[:params["B"]], y_train[:params[
# "batch_size"]]
def gen_train():
return data_generator_train(x_train, y_train, params["B"])
def gen_eval():
return data_generator_eval(x_test, y_test, params["B"])
def train_input_fn():
# Build an iterator over training batches.
dataset = tf.data.Dataset.from_generator(
gen_train, (tf.float32, tf.int32),
(tf.TensorShape([params["B"], 32, 32, 3
]), tf.TensorShape([params["B"], 10])))
dataset = dataset.prefetch(1)
return dataset.make_one_shot_iterator().get_next()
def eval_input_fn():
# Build an iterator over training batches.
dataset = tf.data.Dataset.from_generator(
gen_eval, (tf.float32, tf.int32),
(tf.TensorShape([params["B"], 32, 32, 3
]), tf.TensorShape([params["B"], 10])))
dataset = dataset.prefetch(1)
return dataset.make_one_shot_iterator().get_next()
# Build an iterator over the heldout set.
return train_input_fn, eval_input_fn, x_train.shape[0]
def _get_datafolder_path():
full_path = os.path.abspath('.')
path = full_path + '/data'
return path
def _unpickle(f):
import cPickle
fo = open(f, 'rb')
d = Pkl.load(fo)
fo.close()
return d
def load_svhn(dataset=_get_datafolder_path() + '/svhn/',
normalize=True,
dequantify=True,
extra=False):
'''
:param dataset:
:param normalize:
:param dequantify: Add uniform noise to dequantify the data following
Uria et. al 2013
"RNADE: The real-valued neural autoregressive density-estimator"
:param extra: include extra svhn samples
:return:
'''
if not os.path.isfile(dataset + 'svhn_train.pkl'):
datasetfolder = os.path.dirname(dataset + 'svhn_train.pkl')
if not os.path.exists(datasetfolder):
os.makedirs(datasetfolder)
_download_svhn(dataset, extra=False)
with open(dataset + 'svhn_train.pkl', 'rb') as f:
train_x, train_y = Pkl.load(f)
with open(dataset + 'svhn_test.pkl', 'rb') as f:
test_x, test_y = Pkl.load(f)
if extra:
if not os.path.isfile(dataset + 'svhn_extra.pkl'):
datasetfolder = os.path.dirname(dataset + 'svhn_train.pkl')
if not os.path.exists(datasetfolder):
os.makedirs(datasetfolder)
_download_svhn(dataset, extra=True)
with open(dataset + 'svhn_extra.pkl', 'rb') as f:
extra_x, extra_y = Pkl.load(f)
train_x = np.concatenate([train_x, extra_x])
train_y = np.concatenate([train_y, extra_y])
train_x = train_x.astype('float32')
test_x = test_x.astype('float32')
train_y = tf.keras.utils.to_categorical(train_y.astype('int32'), 10)
test_y = tf.keras.utils.to_categorical(test_y.astype('int32'), 10)
if dequantify:
train_x += np.random.uniform(0, 1,
size=train_x.shape).astype('float32')
test_x += np.random.uniform(0, 1, size=test_x.shape).astype('float32')
if normalize:
normalizer = train_x.max().astype('float32')
train_x = train_x / normalizer
test_x = test_x / normalizer
return train_x, train_y, test_x, test_y
def _download_svhn(dataset, extra):
"""
Download the SVHN dataset
"""
from scipy.io import loadmat
print('Downloading data from http://ufldl.stanford.edu/housenumbers/, ' \
'this may take a while...')
if extra:
print("Downloading extra data...")
urllib.request.urlretrieve(
'http://ufldl.stanford.edu/housenumbers/extra_32x32.mat',
dataset + 'extra_32x32.mat')
extra = loadmat(dataset + 'extra_32x32.mat')
extra_x = extra['X'].swapaxes(2, 3).swapaxes(1, 2).swapaxes(0, 1)
extra_y = extra['y'].reshape((-1)) - 1
print("Saving extra data")
with open(dataset + 'svhn_extra.pkl', 'wb') as f:
Pkl.dump([extra_x, extra_y], f, protocol=Pkl.HIGHEST_PROTOCOL)
os.remove(dataset + 'extra_32x32.mat')
else:
print("Downloading train data...")
urllib.request.urlretrieve(
'http://ufldl.stanford.edu/housenumbers/train_32x32.mat',
dataset + 'train_32x32.mat')
print("Downloading test data...")
urllib.request.urlretrieve(
'http://ufldl.stanford.edu/housenumbers/test_32x32.mat',
dataset + 'test_32x32.mat')
train = loadmat(dataset + 'train_32x32.mat')
train_x = train['X'].swapaxes(2, 3).swapaxes(1, 2).swapaxes(0, 1)
train_y = train['y'].reshape((-1)) - 1
test = loadmat(dataset + 'test_32x32.mat')
test_x = test['X'].swapaxes(2, 3).swapaxes(1, 2).swapaxes(0, 1)
test_y = test['y'].reshape((-1)) - 1
print("Saving train data")
with open(dataset + 'svhn_train.pkl', 'wb') as f:
Pkl.dump([train_x, train_y], f, protocol=Pkl.HIGHEST_PROTOCOL)
print("Saving test data")
with open(dataset + 'svhn_test.pkl', 'wb') as f:
Pkl.dump([test_x, test_y], f, protocol=Pkl.HIGHEST_PROTOCOL)
os.remove(dataset + 'train_32x32.mat')
os.remove(dataset + 'test_32x32.mat')
|
the-stack_0_5471 |
# Write results to this file
OUTFILE = 'runs/10KB/src2-tgt1/seq-nobro-iter06000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1', '10.0.0.3']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [False, False]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [False]
# Connection mode (par = parallel, seq = sequential)
MODE = 'seq'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 6000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 4
|
the-stack_0_5474 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl, is_parameter_related
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import is_valid_list_index
from ..utils import compute_compatible_dim_mapping
from ..utils import compute_compatible_dims_mapping
from ..utils import compute_compatible_and_update_dim_mapping
from ..utils import set_dist_op_desc_original_id
from ..dist_attribute import OperatorDistributedAttribute
from paddle.fluid import core, unique_name
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.framework import Program, Parameter, Variable, program_guard
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle.distributed.fleet.meta_optimizers.common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY
from ..process_group import new_process_group
from ..utils import _get_comm_group, _get_corresponding_rank
class DistributedDefault(DistributedOperatorImplContainer):
def __init__(self, name):
super(DistributedDefault, self).__init__()
self._name = name
register_distributed_operator_impl_container("default",
DistributedDefault("default"))
# Replicated Default
class DistributedDefaultImpl0(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedDefaultImpl0, self).__init__()
self._name = name
self._forward_implemented = True
self._backward_implemented = True
def is_input_compatible(self, dist_op):
raise NotImplementedError("Please Implement this method.")
def is_output_compatible(self, dist_op):
raise NotImplementedError("Please Implement this method.")
def update_dims_mapping(self, dist_op):
raise NotImplementedError("Please Implement this method.")
@staticmethod
def forward(ctx, *args, **kwargs):
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.get_dst_main_program().global_block()
startup_block = dist_op_context.get_dst_startup_program().global_block()
src_op = dist_op_context.get_cur_src_op()
rank_id = dist_op_context.get_rank_id()
# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
assert input_name in kwargs, "input [{}] is not given".format(
input_name)
assert len(kwargs[input_name]) == len(
src_op.desc.input(input_name)
), "number of tensor for input [{}] is not match".format(input_name)
for output_name in src_op.desc.output_names():
assert output_name in kwargs, "input [{}] is not given".format(
output_name)
assert len(kwargs[output_name]) == len(
src_op.desc.output(output_name)
), "number of tensor for input [{}] is not match".format(
output_name)
# replicate op in dist program
dist_op_desc = main_block.desc.append_op()
dist_op_desc.copy_from(src_op.desc)
set_dist_op_desc_original_id(dist_op_desc, src_op.desc, ctx)
for input_name in src_op.desc.input_names():
dist_op_desc.set_input(input_name, kwargs[input_name])
for output_name in src_op.desc.output_names():
dist_op_desc.set_output(output_name, kwargs[output_name])
main_block._sync_with_cpp()
# param initialization sync
for varname in dist_op_desc.input_arg_names():
if startup_block.has_var(varname) and startup_block.var(
varname
).is_parameter and varname not in dist_op_context.already_init_sync_vars:
dist_op_context.already_init_sync_vars.add(varname)
param = startup_block.var(varname)
param_dist_attr = ctx.get_tensor_dist_attr_for_program(param)
process_mesh = param_dist_attr.process_mesh
dims_mapping = param_dist_attr.dims_mapping
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, process_mesh,
rank_id)
# NOTE all not splited axis should be presented in mesh
for axis, size in enumerate(process_mesh.topology):
if size <= 1 or axis in dims_mapping:
pass
else:
group_ranks = _get_comm_group(process_mesh.processes,
process_mesh.topology,
axis, rank_id)
sync_group = new_process_group(group_ranks)
new_op = startup_block.append_op(
type='c_broadcast',
inputs={'X': param},
outputs={'Out': param},
attrs={
'ring_id': sync_group.id,
'root': 0,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Forward
})
# set distributed attribute
op_attr = OperatorDistributedAttribute()
op_attr.process_mesh = process_mesh
op_attr.set_output_dims_mapping(param.name,
dims_mapping)
op_attr.set_input_dims_mapping(param.name, dims_mapping)
ctx.set_op_dist_attr_for_program(new_op, op_attr)
startup_block._sync_with_cpp()
@staticmethod
def backward(ctx, *args, **kwargs):
# by now the backward function only insert the gradient allreduce for dist op itself
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.get_dst_main_program().global_block()
backward_op = dist_op_context.get_cur_src_op()
dist_attr = ctx.get_op_dist_attr_for_program(backward_op)
assert dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
str(backward_op))
rank_id = dist_op_context.get_rank_id()
# check validation of inputs / outputs
for input_name in backward_op.desc.input_names():
assert input_name in kwargs, "input [{}] is not given".format(
input_name)
assert len(kwargs[input_name]) == len(
backward_op.desc.input(input_name)
), "number of tensor for input [{}] is not match".format(input_name)
for output_name in backward_op.desc.output_names():
assert output_name in kwargs, "input [{}] is not given".format(
output_name)
assert len(kwargs[output_name]) == len(
backward_op.desc.output(output_name)
), "number of tensor for input [{}] is not match".format(
output_name)
# replicate op in dist program
dist_op_desc = main_block.desc.append_op()
dist_op_desc.copy_from(backward_op.desc)
# Refer to the related dist op
set_dist_op_desc_original_id(dist_op_desc, backward_op.desc, ctx)
for input_name in backward_op.desc.input_names():
dist_op_desc.set_input(input_name, kwargs[input_name])
for output_name in backward_op.desc.output_names():
dist_op_desc.set_output(output_name, kwargs[output_name])
main_block._sync_with_cpp()
# check if need gradient allreduce
# if there is a non-gradient & non-parameter input and its batch dimension is splited,
# we need insert gradient allreduce for the gradient of parameter in its output
need_gradient_allreduce = False
for input_name in backward_op.desc.input_names():
for varname in backward_op.desc.input(input_name):
if "@GRAD" not in varname and not is_parameter_related(
varname, main_block):
# NOTE input var's dim_mapping of backward op should be the same with input var instead of corresponding varname of forward op
process_mesh = dist_attr.process_mesh
var_dim_mapping = dist_attr.get_input_dims_mapping(varname)
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, process_mesh,
rank_id)
mesh_shape = process_mesh.topology
batch_size_axis = var_dim_mapping[0]
if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
need_gradient_allreduce = True
group_ranks = _get_comm_group(process_mesh.processes,
process_mesh.topology,
batch_size_axis, rank_id)
dp_degree = len(group_ranks)
dp_group = new_process_group(group_ranks)
break
if need_gradient_allreduce:
allreduce_vars = []
for input_name in backward_op.desc.input_names():
for varname in backward_op.desc.input(input_name):
if "@GRAD" not in varname and is_parameter_related(
varname, main_block):
assert len(
backward_op.desc.input(input_name)
) == 1, "parameter input to grad op should be length 1, but got [{}]".format(
backward_op.desc.input(input_name))
assert varname + "@GRAD" in backward_op.desc.output_arg_names(
), "parameter's grad [{}] not found in the grad op's output".format(
varname + "@GRAD")
assert len(
backward_op.desc.output(input_name + "@GRAD")
) == 1, "parameter grad of grad op should be length 1, but got [{}]".format(
backward_op.desc.output(input_name + "@GRAD"))
allreduce_vars.append(
backward_op.desc.output(input_name + "@GRAD")[0])
if len(allreduce_vars) > 0:
for varname in allreduce_vars:
grad_var = main_block.var(varname)
allreduce_op = main_block.append_op(
type='c_allreduce_sum',
inputs={'X': [grad_var]},
outputs={'Out': [grad_var]},
attrs={
'ring_id': dp_group.id,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Backward
})
scale_op = main_block.append_op(
type='scale',
inputs={'X': grad_var},
outputs={'Out': grad_var},
attrs={
'scale': 1.0 / dp_degree,
OP_ROLE_KEY: OpRole.Backward
})
dims_mapping = ctx.get_tensor_dist_attr_for_program(
grad_var).dims_mapping
process_mesh = dist_attr.process_mesh
for op in [allreduce_op, scale_op]:
op_attr = OperatorDistributedAttribute()
op_attr.process_mesh = process_mesh
op_attr.set_output_dims_mapping(grad_var.name,
dims_mapping)
op_attr.set_input_dims_mapping(grad_var.name,
dims_mapping)
ctx.set_op_dist_attr_for_program(op, op_attr)
main_block._sync_with_cpp()
register_distributed_operator_impl(
"default", DistributedDefaultImpl0("replicate_parallel"))
|
the-stack_0_5475 | # coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model Reformer."""
import os
from shutil import copyfile
from typing import Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
from .tokenization_reformer import ReformerTokenizer
else:
ReformerTokenizer = None
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
####################################################
# Mapping from the keyword arguments names of Tokenizer `__init__`
# to file names for serializing Tokenizer instances
####################################################
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
####################################################
# Mapping from the keyword arguments names of Tokenizer `__init__`
# to pretrained vocabulary URL for all the model ids.
####################################################
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"google/reformer-crime-and-punishment": "https://cdn.huggingface.co/google/reformer-crime-and-punishment/spiece.model"
},
"tokenizer_file": {
"google/reformer-crime-and-punishment": "https://cdn.huggingface.co/google/reformer-crime-and-punishment/tokenizer.json"
},
}
####################################################
# Mapping from model ids to max length of inputs
####################################################
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"google/reformer-crime-and-punishment": 524288,
}
class ReformerTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" Reformer tokenizer (backed by HuggingFace's `tokenizers` library). Based on `SentencePiece
<https://github.com/google/sentencepiece>`__ .
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
additional_special_tokens (:obj:`List[str]`, `optional`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["attention_mask"]
slow_tokenizer_class = ReformerTokenizer
def __init__(
self,
vocab_file,
tokenizer_file=None,
eos_token="</s>",
unk_token="<unk>",
additional_special_tokens=[],
**kwargs
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
eos_token=eos_token,
unk_token=unk_token,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
the-stack_0_5477 | import numpy as np
import scipy.io as sio
import xarray as xr
import pkg_resources
from .nortek import read_nortek
from .nortek2 import read_signature
from .rdi import read_rdi
from .base import _create_dataset
from ..rotate.base import _set_coords
from ..time import epoch2date, date2epoch, date2matlab, matlab2date
# time variables stored as data variables (as opposed to coordinates)
t_additional = ['hdwtime_gps', ]
def read(fname, userdata=True, nens=None):
"""Read a binary Nortek (e.g., .VEC, .wpr, .ad2cp, etc.) or RDI
(.000, .PD0, .ENX, etc.) data file.
Parameters
----------
filename : string
Filename of instrument file to read.
userdata : True, False, or string of userdata.json filename (default ``True``)
Whether to read the '<base-filename>.userdata.json' file.
nens : None (default: read entire file), int, or 2-element tuple (start, stop)
Number of pings or ensembles to read from the file
Returns
-------
ds : xarray.Dataset
An xarray dataset from instrument datafile.
"""
# Loop over binary readers until we find one that works.
for func in [read_nortek, read_signature, read_rdi]:
try:
ds = func(fname, userdata=userdata, nens=nens)
except:
continue
else:
return ds
raise Exception(
"Unable to find a suitable reader for file {}.".format(fname))
def read_example(name, **kwargs):
"""Read an ADCP or ADV datafile from the examples directory.
Parameters
----------
name : str
A few available files:
AWAC_test01.wpr
BenchFile01.ad2cp
RDI_test01.000
burst_mode01.VEC
vector_data01.VEC
vector_data_imu01.VEC
winriver01.PD0
winriver02.PD0
Returns
-------
ds : xarray.Dataset
An xarray dataset from the binary instrument data.
"""
filename = pkg_resources.resource_filename(
'dolfyn',
'example_data/' + name)
return read(filename, **kwargs)
def save(dataset, filename):
"""Save xarray dataset as netCDF (.nc).
Parameters
----------
dataset : xarray.Dataset
filename : str
Filename and/or path with the '.nc' extension
Notes
-----
Drops 'config' lines.
"""
if '.' in filename:
assert filename.endswith('nc'), 'File extension must be of the type nc'
else:
filename += '.nc'
# Dropping the detailed configuration stats because netcdf can't save it
for key in list(dataset.attrs.keys()):
if 'config' in key:
dataset.attrs.pop(key)
# Handling complex values for netCDF4
dataset.attrs['complex_vars'] = []
for var in dataset.data_vars:
if np.iscomplexobj(dataset[var]):
dataset[var+'_real'] = dataset[var].real
dataset[var+'_imag'] = dataset[var].imag
dataset = dataset.drop(var)
dataset.attrs['complex_vars'].append(var)
# Keeping time in raw file's time instance, unaware of timezone
t_list = [t for t in dataset.coords if 'time' in t]
for ky in t_list:
dt = epoch2date(dataset[ky])
dataset = dataset.assign_coords({ky: dt})
t_data = [t for t in dataset.data_vars if t in t_additional]
for ky in t_data:
dt = epoch2date(dataset[ky])
dataset = dataset.drop_vars(ky) # must do b/c of netcdf encoding error
dataset[ky] = xr.DataArray(dt, coords={'time_gps': dataset.time_gps})
dataset.to_netcdf(filename, format='NETCDF4', engine='netcdf4')
def load(filename):
"""Load xarray dataset from netCDF (.nc)
Parameters
----------
filename : str
Filename and/or path with the '.nc' extension
Returns
-------
ds : xarray.Dataset
An xarray dataset from the binary instrument data.
"""
if '.' in filename:
assert filename.endswith('nc'), 'File extension must be of the type nc'
else:
filename += '.nc'
ds = xr.load_dataset(filename, engine='netcdf4')
# Single item lists were saved as 'int' or 'str'
if hasattr(ds, 'rotate_vars') and len(ds.rotate_vars[0]) == 1:
ds.attrs['rotate_vars'] = [ds.rotate_vars]
# Python lists were saved as numpy arrays
if hasattr(ds, 'rotate_vars') and type(ds.rotate_vars) is not list:
ds.attrs['rotate_vars'] = list(ds.rotate_vars)
# Rejoin complex numbers
if hasattr(ds, 'complex_vars') and len(ds.complex_vars):
if len(ds.complex_vars[0]) == 1:
ds.attrs['complex_vars'] = [ds.complex_vars]
for var in ds.complex_vars:
ds[var] = ds[var+'_real'] + ds[var+'_imag'] * 1j
ds = ds.drop_vars([var+'_real', var+'_imag'])
ds.attrs.pop('complex_vars')
# Reload raw file's time instance since the timezone is unknown
t_list = [t for t in ds.coords if 'time' in t]
for ky in t_list:
dt = ds[ky].values.astype('datetime64[us]').tolist()
ds = ds.assign_coords({ky: date2epoch(dt)})
ds[ky].attrs['description'] = 'seconds since 1970-01-01 00:00:00'
# Time data variables
t_data = [t for t in ds.data_vars if t in t_additional]
for ky in t_data:
dt = ds[ky].values.astype('datetime64[us]').tolist()
ds[ky].data = date2epoch(dt)
ds[ky].attrs['description'] = 'seconds since 1970-01-01 00:00:00'
return ds
def save_mat(dataset, filename, datenum=True):
"""Save xarray dataset as a MATLAB (.mat) file
Parameters
----------
dataset : xarray.Dataset
Data to save
filename : str
Filename and/or path with the '.mat' extension
datenum : bool
Converts epoch time into MATLAB datenum
Notes
-----
The xarray data format is saved as a MATLAB structure with the fields
'vars, coords, config, units'
See Also
--------
scipy.io.savemat()
"""
if '.' in filename:
assert filename.endswith(
'mat'), 'File extension must be of the type mat'
else:
filename += '.mat'
# Convert from epoch time to datenum
if datenum:
t_list = [t for t in dataset.coords if 'time' in t]
for ky in t_list:
dt = date2matlab(epoch2date(dataset[ky]))
dataset = dataset.assign_coords({ky: dt})
t_data = [t for t in dataset.data_vars if t in t_additional]
for ky in t_data:
dt = date2matlab(epoch2date(dataset[ky]))
dataset[ky].data = dt
# Save xarray structure with more descriptive structure names
matfile = {'vars': {}, 'coords': {}, 'config': {}, 'units': {}}
for key in dataset.data_vars:
matfile['vars'][key] = dataset[key].values
if hasattr(dataset[key], 'units'):
matfile['units'][key] = dataset[key].units
for key in dataset.coords:
matfile['coords'][key] = dataset[key].values
matfile['config'] = dataset.attrs
sio.savemat(filename, matfile)
def load_mat(filename, datenum=True):
"""Load xarray dataset from MATLAB (.mat) file, complimentary to `save_mat()`
A .mat file must contain the fields: {vars, coords, config, units},
where 'coords' contain the dimensions of all variables in 'vars'.
Parameters
----------
filename : str
Filename and/or path with the '.mat' extension
datenum : bool
Converts MATLAB datenum into epoch time
Returns
-------
ds : xarray.Dataset
An xarray dataset from the binary instrument data.
See Also
--------
scipy.io.loadmat()
"""
if '.' in filename:
assert filename.endswith(
'mat'), 'File extension must be of the type mat'
else:
filename += '.mat'
data = sio.loadmat(filename, struct_as_record=False, squeeze_me=True)
ds_dict = {'vars': {}, 'coords': {}, 'config': {}, 'units': {}}
for nm in ds_dict:
key_list = data[nm]._fieldnames
for ky in key_list:
ds_dict[nm][ky] = getattr(data[nm], ky)
ds_dict['data_vars'] = ds_dict.pop('vars')
ds_dict['attrs'] = ds_dict.pop('config')
# Recreate dataset
ds = _create_dataset(ds_dict)
ds = _set_coords(ds, ds.coord_sys)
# Convert datenum time back into epoch time
if datenum:
t_list = [t for t in ds.coords if 'time' in t]
for ky in t_list:
dt = date2epoch(matlab2date(ds[ky].values))
ds = ds.assign_coords({ky: dt})
ds[ky].attrs['description'] = 'seconds since 1970-01-01 00:00:00'
t_data = [t for t in ds.data_vars if t in t_additional]
for ky in t_data:
dt = date2epoch(matlab2date(ds[ky].values))
ds[ky].data = dt
ds[ky].attrs['description'] = 'seconds since 1970-01-01 00:00:00'
# Restore 'rotate vars" to a proper list
if hasattr(ds, 'rotate_vars') and len(ds.rotate_vars[0]) == 1:
ds.attrs['rotate_vars'] = [ds.rotate_vars]
else:
ds.attrs['rotate_vars'] = [x.strip(' ') for x in list(ds.rotate_vars)]
return ds
|
the-stack_0_5480 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import importlib
import logging
import re
import six
from saml2_tophat import saml
from saml2_tophat import xmlenc
from saml2_tophat.attribute_converter import from_local, ac_factory
from saml2_tophat.attribute_converter import get_local_name
from saml2_tophat.s_utils import assertion_factory
from saml2_tophat.s_utils import factory
from saml2_tophat.s_utils import sid
from saml2_tophat.s_utils import MissingValue
from saml2_tophat.saml import NAME_FORMAT_URI
from saml2_tophat.time_util import instant
from saml2_tophat.time_util import in_a_while
logger = logging.getLogger(__name__)
def _filter_values(vals, vlist=None, must=False):
""" Removes values from *vals* that does not appear in vlist
:param vals: The values that are to be filtered
:param vlist: required or optional value
:param must: Whether the allowed values must appear
:return: The set of values after filtering
"""
if not vlist: # No value specified equals any value
return vals
if isinstance(vlist, six.string_types):
vlist = [vlist]
res = []
for val in vlist:
if val in vals:
res.append(val)
if must:
if res:
return res
else:
raise MissingValue("Required attribute value missing")
else:
return res
def _match(attr, ava):
if attr in ava:
return attr
_la = attr.lower()
if _la in ava:
return _la
for _at in ava.keys():
if _at.lower() == _la:
return _at
return None
def filter_on_attributes(ava, required=None, optional=None, acs=None,
fail_on_unfulfilled_requirements=True):
""" Filter
:param ava: An attribute value assertion as a dictionary
:param required: list of RequestedAttribute instances defined to be
required
:param optional: list of RequestedAttribute instances defined to be
optional
:param fail_on_unfulfilled_requirements: If required attributes
are missing fail or fail not depending on this parameter.
:return: The modified attribute value assertion
"""
def _match_attr_name(attr, ava):
local_name = None
for a in ['name_format', 'friendly_name']:
_val = attr.get(a)
if _val:
if a == 'name_format':
local_name = get_local_name(acs, attr['name'], _val)
else:
local_name = _val
break
if local_name:
_fn = _match(local_name, ava)
else:
_fn = None
if not _fn: # In the unlikely case that someone has provided us with
# URIs as attribute names
_fn = _match(attr["name"], ava)
return _fn
def _apply_attr_value_restrictions(attr, res, must=False):
try:
values = [av["text"] for av in attr["attribute_value"]]
except KeyError:
values = []
try:
res[_fn].extend(_filter_values(ava[_fn], values))
except KeyError:
res[_fn] = _filter_values(ava[_fn], values)
return _filter_values(ava[_fn], values, must)
res = {}
if required is None:
required = []
for attr in required:
_fn = _match_attr_name(attr, ava)
if _fn:
_apply_attr_value_restrictions(attr, res, True)
elif fail_on_unfulfilled_requirements:
desc = "Required attribute missing: '%s'" % (attr["name"])
raise MissingValue(desc)
if optional is None:
optional = []
for attr in optional:
_fn = _match_attr_name(attr, ava)
if _fn:
_apply_attr_value_restrictions(attr, res, False)
return res
def filter_on_demands(ava, required=None, optional=None):
""" Never return more than is needed. Filters out everything
the server is prepared to return but the receiver doesn't ask for
:param ava: Attribute value assertion as a dictionary
:param required: Required attributes
:param optional: Optional attributes
:return: The possibly reduced assertion
"""
# Is all what's required there:
if required is None:
required = {}
lava = dict([(k.lower(), k) for k in ava.keys()])
for attr, vals in required.items():
attr = attr.lower()
if attr in lava:
if vals:
for val in vals:
if val not in ava[lava[attr]]:
raise MissingValue(
"Required attribute value missing: %s,%s" % (attr,
val))
else:
raise MissingValue("Required attribute missing: %s" % (attr,))
if optional is None:
optional = {}
oka = [k.lower() for k in required.keys()]
oka.extend([k.lower() for k in optional.keys()])
# OK, so I can imaging releasing values that are not absolutely necessary
# but not attributes that are not asked for.
for attr in lava.keys():
if attr not in oka:
del ava[lava[attr]]
return ava
def filter_on_wire_representation(ava, acs, required=None, optional=None):
"""
:param ava: A dictionary with attributes and values
:param acs: List of tuples (Attribute Converter name,
Attribute Converter instance)
:param required: A list of saml.Attributes
:param optional: A list of saml.Attributes
:return: Dictionary of expected/wanted attributes and values
"""
acsdic = dict([(ac.name_format, ac) for ac in acs])
if required is None:
required = []
if optional is None:
optional = []
res = {}
for attr, val in ava.items():
done = False
for req in required:
try:
_name = acsdic[req.name_format]._to[attr]
if _name == req.name:
res[attr] = val
done = True
except KeyError:
pass
if done:
continue
for opt in optional:
try:
_name = acsdic[opt.name_format]._to[attr]
if _name == opt.name:
res[attr] = val
break
except KeyError:
pass
return res
def filter_attribute_value_assertions(ava, attribute_restrictions=None):
""" Will weed out attribute values and values according to the
rules defined in the attribute restrictions. If filtering results in
an attribute without values, then the attribute is removed from the
assertion.
:param ava: The incoming attribute value assertion (dictionary)
:param attribute_restrictions: The rules that govern which attributes
and values that are allowed. (dictionary)
:return: The modified attribute value assertion
"""
if not attribute_restrictions:
return ava
for attr, vals in list(ava.items()):
_attr = attr.lower()
try:
_rests = attribute_restrictions[_attr]
except KeyError:
del ava[attr]
else:
if _rests is None:
continue
if isinstance(vals, six.string_types):
vals = [vals]
rvals = []
for restr in _rests:
for val in vals:
if restr.match(val):
rvals.append(val)
if rvals:
ava[attr] = list(set(rvals))
else:
del ava[attr]
return ava
def restriction_from_attribute_spec(attributes):
restr = {}
for attribute in attributes:
restr[attribute.name] = {}
for val in attribute.attribute_value:
if not val.text:
restr[attribute.name] = None
break
else:
restr[attribute.name] = re.compile(val.text)
return restr
def post_entity_categories(maps, **kwargs):
restrictions = {}
try:
required = [d['friendly_name'].lower() for d in kwargs['required']]
except (KeyError, TypeError):
required = []
if kwargs["mds"]:
if "sp_entity_id" in kwargs:
ecs = kwargs["mds"].entity_categories(kwargs["sp_entity_id"])
for ec_map in maps:
for key, (atlist, only_required) in ec_map.items():
if key == "": # always released
attrs = atlist
elif isinstance(key, tuple):
if only_required:
attrs = [a for a in atlist if a in required]
else:
attrs = atlist
for _key in key:
try:
assert _key in ecs
except AssertionError:
attrs = []
break
elif key in ecs:
if only_required:
attrs = [a for a in atlist if a in required]
else:
attrs = atlist
else:
attrs = []
for attr in attrs:
restrictions[attr] = None
else:
for ec_map in maps:
for attr in ec_map[""]:
restrictions[attr] = None
return restrictions
class Policy(object):
""" handles restrictions on assertions """
def __init__(self, restrictions=None):
if restrictions:
self.compile(restrictions)
else:
self._restrictions = None
self.acs = []
def compile(self, restrictions):
""" This is only for IdPs or AAs, and it's about limiting what
is returned to the SP.
In the configuration file, restrictions on which values that
can be returned are specified with the help of regular expressions.
This function goes through and pre-compiles the regular expressions.
:param restrictions:
:return: The assertion with the string specification replaced with
a compiled regular expression.
"""
self._restrictions = copy.deepcopy(restrictions)
for who, spec in self._restrictions.items():
if spec is None:
continue
try:
items = spec["entity_categories"]
except KeyError:
pass
else:
ecs = []
for cat in items:
_mod = importlib.import_module(
"saml2_tophat.entity_category.%s" % cat)
_ec = {}
for key, items in _mod.RELEASE.items():
alist = [k.lower() for k in items]
try:
_only_required = _mod.ONLY_REQUIRED[key]
except (AttributeError, KeyError):
_only_required = False
_ec[key] = (alist, _only_required)
ecs.append(_ec)
spec["entity_categories"] = ecs
try:
restr = spec["attribute_restrictions"]
except KeyError:
continue
if restr is None:
continue
_are = {}
for key, values in restr.items():
if not values:
_are[key.lower()] = None
continue
_are[key.lower()] = [re.compile(value) for value in values]
spec["attribute_restrictions"] = _are
logger.debug("policy restrictions: %s", self._restrictions)
return self._restrictions
def get(self, attribute, sp_entity_id, default=None, post_func=None,
**kwargs):
"""
:param attribute:
:param sp_entity_id:
:param default:
:param post_func:
:return:
"""
if not self._restrictions:
return default
try:
try:
val = self._restrictions[sp_entity_id][attribute]
except KeyError:
try:
val = self._restrictions["default"][attribute]
except KeyError:
val = None
except KeyError:
val = None
if val is None:
return default
elif post_func:
return post_func(val, sp_entity_id=sp_entity_id, **kwargs)
else:
return val
def get_nameid_format(self, sp_entity_id):
""" Get the NameIDFormat to used for the entity id
:param: The SP entity ID
:retur: The format
"""
return self.get("nameid_format", sp_entity_id,
saml.NAMEID_FORMAT_TRANSIENT)
def get_name_form(self, sp_entity_id):
""" Get the NameFormat to used for the entity id
:param: The SP entity ID
:retur: The format
"""
return self.get("name_form", sp_entity_id, NAME_FORMAT_URI)
def get_lifetime(self, sp_entity_id):
""" The lifetime of the assertion
:param sp_entity_id: The SP entity ID
:param: lifetime as a dictionary
"""
# default is a hour
return self.get("lifetime", sp_entity_id, {"hours": 1})
def get_attribute_restrictions(self, sp_entity_id):
""" Return the attribute restriction for SP that want the information
:param sp_entity_id: The SP entity ID
:return: The restrictions
"""
return self.get("attribute_restrictions", sp_entity_id)
def get_fail_on_missing_requested(self, sp_entity_id):
""" Return the whether the IdP should should fail if the SPs
requested attributes could not be found.
:param sp_entity_id: The SP entity ID
:return: The restrictions
"""
return self.get("fail_on_missing_requested", sp_entity_id, True)
def entity_category_attributes(self, ec):
if not self._restrictions:
return None
ec_maps = self._restrictions["default"]["entity_categories"]
for ec_map in ec_maps:
try:
return ec_map[ec]
except KeyError:
pass
return []
def get_entity_categories(self, sp_entity_id, mds, required):
"""
:param sp_entity_id:
:param mds: MetadataStore instance
:return: A dictionary with restrictions
"""
kwargs = {"mds": mds, 'required': required}
return self.get("entity_categories", sp_entity_id, default={},
post_func=post_entity_categories, **kwargs)
def not_on_or_after(self, sp_entity_id):
""" When the assertion stops being valid, should not be
used after this time.
:param sp_entity_id: The SP entity ID
:return: String representation of the time
"""
return in_a_while(**self.get_lifetime(sp_entity_id))
def filter(self, ava, sp_entity_id, mdstore, required=None, optional=None):
""" What attribute and attribute values returns depends on what
the SP has said it wants in the request or in the metadata file and
what the IdP/AA wants to release. An assumption is that what the SP
asks for overrides whatever is in the metadata. But of course the
IdP never releases anything it doesn't want to.
:param ava: The information about the subject as a dictionary
:param sp_entity_id: The entity ID of the SP
:param mdstore: A Metadata store
:param required: Attributes that the SP requires in the assertion
:param optional: Attributes that the SP regards as optional
:return: A possibly modified AVA
"""
_ava = None
if not self.acs: # acs MUST have a value, fall back to default.
self.acs = ac_factory()
_rest = self.get_entity_categories(sp_entity_id, mdstore, required)
if _rest:
_ava = filter_attribute_value_assertions(ava.copy(), _rest)
elif required or optional:
logger.debug("required: %s, optional: %s", required, optional)
_ava = filter_on_attributes(
ava.copy(), required, optional, self.acs,
self.get_fail_on_missing_requested(sp_entity_id))
_rest = self.get_attribute_restrictions(sp_entity_id)
if _rest:
if _ava is None:
_ava = ava.copy()
_ava = filter_attribute_value_assertions(_ava, _rest)
elif _ava is None:
_ava = ava.copy()
if _ava is None:
return {}
else:
return _ava
def restrict(self, ava, sp_entity_id, metadata=None):
""" Identity attribute names are expected to be expressed in
the local lingo (== friendlyName)
:return: A filtered ava according to the IdPs/AAs rules and
the list of required/optional attributes according to the SP.
If the requirements can't be met an exception is raised.
"""
if metadata:
spec = metadata.attribute_requirement(sp_entity_id)
if spec:
return self.filter(ava, sp_entity_id, metadata,
spec["required"], spec["optional"])
return self.filter(ava, sp_entity_id, metadata, [], [])
def conditions(self, sp_entity_id):
""" Return a saml.Condition instance
:param sp_entity_id: The SP entity ID
:return: A saml.Condition instance
"""
return factory(saml.Conditions,
not_before=instant(),
# How long might depend on who's getting it
not_on_or_after=self.not_on_or_after(sp_entity_id),
audience_restriction=[factory(
saml.AudienceRestriction,
audience=[factory(saml.Audience,
text=sp_entity_id)])])
def get_sign(self, sp_entity_id):
"""
Possible choices
"sign": ["response", "assertion", "on_demand"]
:param sp_entity_id:
:return:
"""
return self.get("sign", sp_entity_id, [])
class EntityCategories(object):
pass
def _authn_context_class_ref(authn_class, authn_auth=None):
"""
Construct the authn context with a authn context class reference
:param authn_class: The authn context class reference
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
cntx_class = factory(saml.AuthnContextClassRef, text=authn_class)
if authn_auth:
return factory(saml.AuthnContext,
authn_context_class_ref=cntx_class,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
else:
return factory(saml.AuthnContext,
authn_context_class_ref=cntx_class)
def _authn_context_decl(decl, authn_auth=None):
"""
Construct the authn context with a authn context declaration
:param decl: The authn context declaration
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
return factory(saml.AuthnContext,
authn_context_decl=decl,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
def _authn_context_decl_ref(decl_ref, authn_auth=None):
"""
Construct the authn context with a authn context declaration reference
:param decl_ref: The authn context declaration reference
:param authn_auth: Authenticating Authority
:return: An AuthnContext instance
"""
return factory(saml.AuthnContext,
authn_context_decl_ref=decl_ref,
authenticating_authority=factory(
saml.AuthenticatingAuthority, text=authn_auth))
def authn_statement(authn_class=None, authn_auth=None,
authn_decl=None, authn_decl_ref=None, authn_instant="",
subject_locality="", session_not_on_or_after=None):
"""
Construct the AuthnStatement
:param authn_class: Authentication Context Class reference
:param authn_auth: Authenticating Authority
:param authn_decl: Authentication Context Declaration
:param authn_decl_ref: Authentication Context Declaration reference
:param authn_instant: When the Authentication was performed.
Assumed to be seconds since the Epoch.
:param subject_locality: Specifies the DNS domain name and IP address
for the system from which the assertion subject was apparently
authenticated.
:return: An AuthnContext instance
"""
if authn_instant:
_instant = instant(time_stamp=authn_instant)
else:
_instant = instant()
if authn_class:
res = factory(
saml.AuthnStatement,
authn_instant=_instant,
session_index=sid(),
session_not_on_or_after=session_not_on_or_after,
authn_context=_authn_context_class_ref(
authn_class, authn_auth))
elif authn_decl:
res = factory(
saml.AuthnStatement,
authn_instant=_instant,
session_index=sid(),
session_not_on_or_after=session_not_on_or_after,
authn_context=_authn_context_decl(authn_decl, authn_auth))
elif authn_decl_ref:
res = factory(
saml.AuthnStatement,
authn_instant=_instant,
session_index=sid(),
session_not_on_or_after=session_not_on_or_after,
authn_context=_authn_context_decl_ref(authn_decl_ref,
authn_auth))
else:
res = factory(
saml.AuthnStatement,
authn_instant=_instant,
session_index=sid(),
session_not_on_or_after=session_not_on_or_after)
if subject_locality:
res.subject_locality = saml.SubjectLocality(text=subject_locality)
return res
def do_subject_confirmation(policy, sp_entity_id, key_info=None, **treeargs):
"""
:param policy: Policy instance
:param sp_entity_id: The entityid of the SP
:param subject_confirmation_method: How was the subject confirmed
:param address: The network address/location from which an attesting entity
can present the assertion.
:param key_info: Information of the key used to confirm the subject
:param in_response_to: The ID of a SAML protocol message in response to
which an attesting entity can present the assertion.
:param recipient: A URI specifying the entity or location to which an
attesting entity can present the assertion.
:param not_before: A time instant before which the subject cannot be
confirmed. The time value MUST be encoded in UTC.
:return:
"""
_sc = factory(saml.SubjectConfirmation, **treeargs)
_scd = _sc.subject_confirmation_data
_scd.not_on_or_after = policy.not_on_or_after(sp_entity_id)
if _sc.method == saml.SCM_HOLDER_OF_KEY:
_scd.add_extension_element(key_info)
return _sc
def do_subject(policy, sp_entity_id, name_id, **farg):
#
specs = farg['subject_confirmation']
if isinstance(specs, list):
res = [do_subject_confirmation(policy, sp_entity_id, **s) for s in
specs]
else:
res = [do_subject_confirmation(policy, sp_entity_id, **specs)]
return factory(saml.Subject, name_id=name_id, subject_confirmation=res)
class Assertion(dict):
""" Handles assertions about subjects """
def __init__(self, dic=None):
dict.__init__(self, dic)
self.acs = []
def construct(self, sp_entity_id, attrconvs, policy, issuer, farg,
authn_class=None, authn_auth=None, authn_decl=None,
encrypt=None, sec_context=None, authn_decl_ref=None,
authn_instant="", subject_locality="", authn_statem=None,
name_id=None, session_not_on_or_after=None):
""" Construct the Assertion
:param sp_entity_id: The entityid of the SP
:param in_response_to: An identifier of the message, this message is
a response to
:param name_id: An NameID instance
:param attrconvs: AttributeConverters
:param policy: The policy that should be adhered to when replying
:param issuer: Who is issuing the statement
:param authn_class: The authentication class
:param authn_auth: The authentication instance
:param authn_decl: An Authentication Context declaration
:param encrypt: Whether to encrypt parts or all of the Assertion
:param sec_context: The security context used when encrypting
:param authn_decl_ref: An Authentication Context declaration reference
:param authn_instant: When the Authentication was performed
:param subject_locality: Specifies the DNS domain name and IP address
for the system from which the assertion subject was apparently
authenticated.
:param authn_statem: A AuthnStatement instance
:return: An Assertion instance
"""
if policy:
_name_format = policy.get_name_form(sp_entity_id)
else:
_name_format = NAME_FORMAT_URI
attr_statement = saml.AttributeStatement(attribute=from_local(
attrconvs, self, _name_format))
if encrypt == "attributes":
for attr in attr_statement.attribute:
enc = sec_context.encrypt(text="%s" % attr)
encd = xmlenc.encrypted_data_from_string(enc)
encattr = saml.EncryptedAttribute(encrypted_data=encd)
attr_statement.encrypted_attribute.append(encattr)
attr_statement.attribute = []
# start using now and for some time
conds = policy.conditions(sp_entity_id)
if authn_statem:
_authn_statement = authn_statem
elif authn_auth or authn_class or authn_decl or authn_decl_ref:
_authn_statement = authn_statement(authn_class, authn_auth,
authn_decl, authn_decl_ref,
authn_instant,
subject_locality,
session_not_on_or_after=session_not_on_or_after)
else:
_authn_statement = None
subject = do_subject(policy, sp_entity_id, name_id,
**farg['subject'])
_ass = assertion_factory(issuer=issuer, conditions=conds,
subject=subject)
if _authn_statement:
_ass.authn_statement = [_authn_statement]
if not attr_statement.empty():
_ass.attribute_statement = [attr_statement]
return _ass
def apply_policy(self, sp_entity_id, policy, metadata=None):
""" Apply policy to the assertion I'm representing
:param sp_entity_id: The SP entity ID
:param policy: The policy
:param metadata: Metadata to use
:return: The resulting AVA after the policy is applied
"""
policy.acs = self.acs
ava = policy.restrict(self, sp_entity_id, metadata)
for key, val in list(self.items()):
if key in ava:
self[key] = ava[key]
else:
del self[key]
return ava
|
the-stack_0_5482 | from __future__ import absolute_import
import unittest
import yaml
from attrdict import AttrDict
from pyswitch.device import Device
class InterfaceISISTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(InterfaceISISTestCase, self).__init__(*args, **kwargs)
with open('config.yaml') as fileobj:
cfg = AttrDict(yaml.safe_load(fileobj))
switch = cfg.InterfaceISISTestCase.switch
self.switch_ip = switch.ip
self.switch_username = switch.username
self.switch_pasword = switch.password
self.intf_name = str(switch.intf_name)
self.intf_type = str(switch.intf_type)
self.conn = (self.switch_ip, '22')
self.auth = (self.switch_username, self.switch_pasword)
def setUp(self):
with Device(conn=self.conn, auth=self.auth) as dev:
dev.services.isis()
op = dev.services.isis(get=True)
self.assertEqual(op)
dev.isis.address_family_ipv4_unicast()
op = dev.isis.address_family_ipv4_unicast(get=True)
self.assertEqual(op)
dev.isis.log_adjacency()
op = dev.isis.log_adjacency(get=True)
self.assertEqual(op)
dev.isis.net_address(net='49.0001.0100.1001.0006.00')
op = dev.isis.net_address(get=True)
self.assertEqual(op)
def tearDown(self):
with Device(conn=self.conn, auth=self.auth) as dev:
dev.services.isis(enable=False)
dev.isis.address_family_ipv4_unicast(delete=True)
dev.isis.log_adjacency(delete=True)
dev.isis.net_address(delete=True)
def test_enable_isis_on_intf(self):
with Device(conn=self.conn, auth=self.auth) as dev:
dev.interface.ip_router_isis(
intf_type='loopback',
intf_name='11')
op = dev.interface.ip_ospf(
intf_type='loopback',
intf_name='11',
get=True)
self.assertEqual(op)
dev.interface.ip_ospf(
intf_type='loopback',
intf_name='11',
delete=True)
|
the-stack_0_5487 | # Copyright (c) 2017 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from datetime import datetime, timezone, timedelta
from collections import namedtuple
import errno
import os
import time
import sys
if 'require' in globals():
git = require('./git')
else:
from . import git
BRANCH = 'worklog'
now = datetime.now
time_fmt = '%d/%b/%Y:%H:%M:%S %z'
CheckinData = namedtuple('CheckinData', 'name time')
CheckoutData = namedtuple('CheckoutData', 'name begin end interval message')
Log = namedtuple('Log', 'begin end message')
def makedirs(path):
if not os.path.isdir(path):
os.makedirs(path)
def now():
tz = timezone(timedelta(hours=-time.timezone/3600))
return datetime.now().replace(tzinfo=tz)
def strftime(time, fmt=None):
return time.strftime(fmt or time_fmt)
def strptime(value, fmt=None):
return datetime.strptime(value, fmt or time_fmt)
def splittimedelta(tdelta, components='DHMS'):
l = {'D': 86400, 'H': 3600, 'M': 60, 'S': 1}
r = []
rem = int(tdelta.total_seconds())
for k in components:
d, rem = divmod(rem, l[k])
r.append(d)
return r
def strftimedelta(tdelta, components='DHMS'):
parts = []
for i, val in enumerate(splittimedelta(tdelta, components)):
if val > 0:
parts.append('{}{}'.format(val, components[i].lower()))
return ', '.join(parts)
def parse_time(value, dt=None):
"""
Parses a time string in multiple possible variants and otherwise applies
the defaults from *dt*. If *dt* is not specified, the result of #now() is
used.
"""
# Intentionally leaving out microseconds.
fields = ['year', 'month', 'day', 'hour', 'minute', 'second', 'tzinfo']
formats = [
(time_fmt, []),
('%H:%M', ['hour', 'minute']),
('%H:%M:%S', ['hour', 'minute', 'second']),
('%H-%M', ['hour', 'minute']),
('%H-%M-%S', ['hour', 'minute', 'second']),
('%d/%H:%M', ['day', 'hour', 'minute']),
('%d/%H:%M:%S', ['day', 'hour', 'minute', 'second']),
('%d', ['day', '#0daytime']),
('%d/%b', ['day', 'month', '#0daytime']),
('%m/%d/%H:%M', ['month', 'day', 'hour', 'minute']),
('%m/%d/%H:%M:%S', ['month', 'day', 'hour', 'minute', 'second']),
]
for fmt, filled_fields in formats:
try:
result = datetime.strptime(value, fmt)
break
except ValueError:
pass
else:
raise ValueError('invalid time string: {!r}'.format(value))
# Update the values that haven't been parsed.
if dt is None:
dt = now()
kwargs = {k: getattr(dt, k) for k in fields if k not in filled_fields}
if '#0daytime' in filled_fields:
kwargs['hour'] = 0
kwargs['minute'] = 0
kwargs['second'] = 0
return result.replace(**kwargs)
def parse_sheet(data):
"""
Parses a timetable sheet and returns a list of #Log entries.
"""
result = []
for line in data.split('\n'):
cols = line.split('\t', 3)
cols[0] = strptime(cols[0])
cols[1] = strptime(cols[1])
result.append(Log(*cols))
return result
class NoCheckinAvailable(Exception):
pass
def get_checkin_file(fatal=True):
return os.path.join(git.dir(fatal=fatal), 'worklog', 'checkin')
def get_commit_repo_and_branch():
# Check if we should check-in to a different repository.
target_repo = git.config('worklog.repository')
if target_repo:
if not os.path.isdir(target_repo):
print('fatal: worklog.repository={}'.format(target_repo), file=sys.stderr)
print(' the specified directory does not exist.')
sys.exit(128)
target_branch = git.config('worklog.project')
if not target_branch:
print('fatal: worklog.repository is set but worklog.project is not', file=sys.stderr)
print(' please do `git config worklog.project <projectname>` first', file=sys.stderr)
sys.exit(128)
else:
target_branch = git.config('worklog.branch') or BRANCH
return target_repo or None, target_branch
def set_checkin(name, time=None):
time = time or now()
filename = get_checkin_file()
makedirs(os.path.dirname(filename))
with open(filename, 'w') as fp:
fp.write('{}\n{}\n'.format(name, strftime(time)))
return CheckinData(name, time)
def get_checkin():
filename = get_checkin_file()
if not os.path.isfile(filename):
raise NoCheckinAvailable(filename)
with open(filename) as fp:
name = fp.readline().rstrip()
time = fp.readline().rstrip()
time = strptime(time)
if not name or fp.read().strip():
raise ValueError('invalid check-in file at {!r}'.format(filename))
return CheckinData(name, time)
def rem_checkin():
filename = get_checkin_file()
try:
os.remove(filename)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
def add_checkout(name, begin, end, message=None):
interval = end - begin
if not message:
message = 'Checkout ' + str(interval)
repo, branch = get_commit_repo_and_branch()
# Read the contents of the timetable file for this user.
filename = name + '.tsv'
try:
contents = git.show('{}:{}'.format(branch, filename), cwd=repo)
except git.DoesNotExist:
contents = ''
# Add an entry to the file.
if not contents.endswith('\n'):
contents += '\n'
contents += '{}\t{}\t{}\n'.format(strftime(begin), strftime(end), message or '')
# Create a commit to add the line to the timetable.
commit = git.Commit()
commit.head(branch, message)
commit.add_file_contents(contents, filename)
git.fast_import(commit.getvalue(), date_format='raw', quiet=True, cwd=repo)
return CheckoutData(name, begin, end, interval, message)
|
the-stack_0_5488 | import datetime
import time
import pytz
import pandas as pd
import json
import urllib.request
import requests
from tzwhere import tzwhere
from darksky import forecast
import numpy as np
from helpers import okta_to_percent, granularity_to_freq
def get_temperature_cloudcover(start_time=None, end_time=None,
granularity=None,latitude=None, longitude=None, source='weather_underground', timezone='US/Eastern', darksky_api_key=None):
if (source == 'weather_underground' or darksky_api_key == None):
# create a pandas datetimeindex
df = pd.date_range(start_time - datetime.timedelta(days=1), end_time , freq='D')
print(df)
# convert it into a simple dataframe and rename the column
df = df.to_frame(index=False)
df.columns = ['time']
# convert it into required format for weather underground
df['time'] = df['time'].dt.strftime('%Y%m%d')
temp_cloud_df = pd.DataFrame()
for _ , row in df.iterrows():
# print(row['time'])
try:
url = "https://api.weather.com/v1/geocode/{}/{}/observations/historical.json?apiKey=6532d6454b8aa370768e63d6ba5a832e&startDate={}&endDate={}&units=e".format(latitude, longitude, row['time'], row['time'])
data = urllib.request.urlopen(url).read()
output = json.loads(data)
output= pd.DataFrame(output['observations'])
output = output[['valid_time_gmt', 'temp', 'clds', 'wx_phrase']]
output.columns = ['time', 'temperature', 'clds', 'wx_phrase']
temp_cloud_df = temp_cloud_df.append(output, ignore_index=True)
except urllib.error.HTTPError as e:
# print(e)
pass
# time.sleep(0.01)
# convert to datetime and set the correct timezone
temp_cloud_df['time_s'] = temp_cloud_df['time']
temp_cloud_df['time'] = pd.to_datetime(temp_cloud_df['time'],unit='s').dt.tz_localize('utc').dt.tz_convert(timezone)
# temp_cloud_df['time'] = temp_cloud_df['time'].dt.round("H")
# resample the data to desired granularity
temp_cloud_df = temp_cloud_df.set_index(temp_cloud_df['time'])
temp_cloud_df = temp_cloud_df.resample(granularity_to_freq(granularity)).ffill()
temp_cloud_df = temp_cloud_df[['temperature', 'clds']]
temp_cloud_df = temp_cloud_df.reset_index()
# chnage to C from F
temp_cloud_df['temperature'] = (temp_cloud_df['temperature'] - 32) * 5/9
# cloud okta code to percent
temp_cloud_df['clouds'] = pd.to_numeric(temp_cloud_df['clds'].apply(lambda x: okta_to_percent(x)))
# keep only relevant columns
temp_cloud_df = temp_cloud_df[['time', 'temperature', 'clouds', 'clds']]
######################### future release ############################
# # create a pandas datetimeindex
# df = pd.date_range(start_time, end_time, freq=granularity_to_freq(granularity), tz=timezone)
# # convert it into a simple dataframe and rename the column
# df = df.to_frame(index=False)
# df.columns = ['time']
# # combine both df and temperature_df
# temp_cloud_df = df.join(temp_cloud_df.set_index('time'), on='time')
####################################################################
# temp_cloud_df['time'] = temp_cloud_df['time'].dt.tz_localize('utc').dt.tz_convert(timezone)
temp_cloud_df['time'] = temp_cloud_df['time'].dt.tz_localize(None)
# print(temp_cloud_df)
elif (source == 'darksky' and darksky_api_key != None):
time = []
temperature = []
cloudcover = []
summary = []
# localizing the datetime based on the timezone
start: datetime.datetime = timezone.localize(start_time)
end: datetime.datetime = timezone.localize(end_time)
while start <= end:
day = int(start.timestamp())
start = start + datetime.timedelta(days=1)
response = urllib.request.urlopen('https://api.darksky.net/forecast/{}/{},{},{}?exclude=currently,daily,flags'.format(darksky_api_key, latitude, longitude, day)).read()
output = json.loads(response)['hourly']['data']
for item in output:
time.append(item['time'])
temperature.append(item['temperature'])
cloudcover.append(item['cloudCover'])
summary.append(item['summary'])
temp_cloud_df = pd.DataFrame({'time':time, 'temperature':temperature,'clouds':cloudcover,'clds':summary})
temp_cloud_df['time'] = pd.to_datetime(temp_cloud_df['time'], unit='s').dt.tz_localize('utc').dt.tz_convert(timezone).dt.tz_localize(None)
temp_cloud_df['temperature'] = (temp_cloud_df['temperature'] - 32) * 5/9
else:
print('Sorry, {} source has not been implemented yet.'.format(source))
return temp_cloud_df |
the-stack_0_5489 | # ========================================================================
# Copyright (C) 2019 The MITRE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
# This file contains the implementation for Grover's algorithm.
# It's an intriguing approach at "reversing a function" - essentially
# if you have a function f(x) = y, Grover's algorithm figures out
# x given f and y. If you're interested in trying all possible inputs of a
# fixed search space (like, say, finding the password for for an encrypted
# file or something), Grover can do it in O(√N) steps instead of brute
# force searching the entire space (which is O(N)).
from projectq import MainEngine
from projectq.ops import *
from projectq.meta import Dagger, Control
from utility import reset
from oracle_utility import run_flip_marker_as_phase_marker
import oracles
def grover_iteration(oracle, qubits, oracle_args):
"""
Runs a single iteration of the main loop in Grover's algorithm,
which is the oracle followed by the diffusion operator.
Parameters:
oracle (function): The oracle that flags the correct answer to
the problem being solved (essentially, this should just
implement the function as a quantum program)
qubits (Qureg): The register to run the oracle on
oracle_args (anything): An oracle-specific argument object to pass to the
oracle during execution
"""
# Run the oracle on the input to see if it was a correct result
run_flip_marker_as_phase_marker(oracle, qubits, oracle_args)
# Run the diffusion operator
All(H) | qubits
run_flip_marker_as_phase_marker(oracles.check_if_all_zeros, qubits, None)
All(H) | qubits
def grover_search(oracle, qubits, oracle_args):
"""
Runs Grover's algorithm on the provided oracle, turning the input into
a superposition where the correct answer's state has a very large amplitude
relative to all of the other states.
Parameters:
oracle (function): The oracle that flags the correct answer to
the problem being solved (essentially, this should just
implement the function as a quantum program)
qubits (Qureg): The register to run the oracle on
oracle_args (anything): An oracle-specific argument object to pass to the
oracle during execution
"""
# Run the algorithm for √N iterations.
All(H) | qubits
iterations = round(2 ** (len(qubits) / 2))
for i in range(0, iterations):
grover_iteration(oracle, qubits, oracle_args)
def run_grover_search(number_of_qubits, oracle, oracle_args):
"""
Uses Grover's quantum search to find the single answer to a problem
with high probability.
Parameters:
number_of_qubits (int): The number of qubits that the oracle expects
(the number of qubits that the answer will contain)
oracle (function): The oracle that flags the correct answer to
the problem being solved (essentially, this should just
implement the function as a quantum program)
oracle_args (anything): An oracle-specific argument object to pass to the
oracle during execution
Returns:
A list[int] that represents the discovered answer as a bit string
"""
# Build the engine and run the search
engine = MainEngine()
qubits = engine.allocate_qureg(number_of_qubits)
grover_search(oracle, qubits, oracle_args)
# Measure the potential solution and return it
solution = []
for qubit in qubits:
Measure | qubit
solution += [int(qubit)]
return solution
|
the-stack_0_5491 | import fnmatch
import functools
from collections import OrderedDict
from datetime import datetime
import pytest
from django.db.models import F
from django.utils import timezone
from pontoon.base.models import TranslatedResource, Translation
from pontoon.tags.models import Tag
from .site import _factory
def tag_factory():
def instance_attrs(instance, i):
if not instance.slug:
instance.slug = "factorytag%s" % i
if not instance.name:
instance.name = "Factory Tag %s" % i
return functools.partial(
_factory, Model=Tag, instance_attrs=instance_attrs
)
def _assert_tags(expected, data):
assert len(expected) == len(data)
results = dict((d['slug'], d) for d in data)
attrs = [
"pk",
"name",
# "last_change",
"total_strings",
"approved_strings",
"unreviewed_strings",
"fuzzy_strings",
]
for slug, stats in results.items():
_exp = expected[slug]
for attr in attrs:
assert _exp[attr] == stats[attr]
@pytest.fixture
def assert_tags():
"""This fixture provides a function for comparing calculated
tag stats against those provided by the tags tool
"""
return _assert_tags
def _calculate_resource_tags(**kwargs):
# returns the tags associated with a given resource, filters
# on priority if given
priority = kwargs.get("priority", None)
resource_tags = {}
tags_through = Tag.resources.through.objects.values_list(
"resource",
"tag",
"tag__slug",
"tag__name",
)
if priority is not None:
if priority is True:
tags_through = tags_through.exclude(tag__priority__isnull=True)
elif priority is False:
tags_through = tags_through.exclude(tag__priority__isnull=False)
else:
tags_through = tags_through.filter(tag__priority=priority)
for resource, tag, _slug, name in tags_through.iterator():
resource_tags[resource] = (
resource_tags.get(resource, []) + [(tag, _slug, name)]
)
return resource_tags
def _tag_iterator(things, **kwargs):
# for given qs.values() (`things`) and **kwargs to filter on, this will
# find and iterate matching tags.
# `things` can be either translations or translated_resources, but
# they must have their `locale`, `project`, `resource` and
# `resource.path` denormalized where required.
locales = list(l.id for l in kwargs.get("locales", []))
projects = list(p.id for p in kwargs.get("projects", []))
slug = kwargs.get("slug", None)
path = kwargs.get("path", None)
resource_tags = _calculate_resource_tags(**kwargs)
for thing in things.iterator():
if locales and thing["locale"] not in locales:
continue
if projects and thing["project"] not in projects:
continue
if path and not fnmatch.fnmatch(thing['path'], path):
continue
for tag in resource_tags.get(thing['resource'], []):
__, _slug, __ = tag
if slug and not fnmatch.fnmatch(_slug, slug):
continue
yield thing, tag
def _calculate_tags(**kwargs):
# calculate what the stats per-tag with given **kwargs should be
# the long-hand way
trs = TranslatedResource.objects.all()
attrs = [
"approved_strings",
"unreviewed_strings",
"fuzzy_strings",
]
totals = {}
resource_attrs = [
"resource",
"locale",
"latest_translation__date",
]
annotations = dict(
total_strings=F('resource__total_strings'),
project=F('resource__project'),
path=F('resource__path'),
)
# this is a `values` of translated resources, with the project, path
# and total_strings denormalized to project/path/total_strings.
qs = trs.values(*resource_attrs + attrs).annotate(**annotations)
translated_resource_tags = _tag_iterator(qs, **kwargs)
attrs = ['total_strings'] + attrs
# iterate through associated tags for all matching translated resources
for tr, (_pk, _slug, _name) in translated_resource_tags:
if kwargs.get('groupby'):
key = tr[kwargs['groupby']]
else:
key = _slug
if key not in totals:
# create a totals[tag] with zeros for this tag
totals[key] = dict((attr, 0) for attr in attrs)
totals[key].update(dict(name=_name, pk=_pk, last_change=None))
for attr in attrs:
# add the total for this translated resource to the tags total
totals[key][attr] += tr[attr]
return totals
@pytest.fixture
def calculate_tags():
"""This fixture provides a function for calculating the tags and their
expected stats etc currently in the database, after filtering for
provided **kwargs
"""
return _calculate_tags
def _calculate_tags_latest(**kwargs):
# calculate what the latest events per-tag with given **kwargs should be
# the long-hand way
translations = Translation.objects.all()
latest_dates = {}
translation_attrs = [
"pk",
"date",
"locale",
]
annotations = dict(
resource=F('entity__resource'),
path=F('entity__resource__path'),
project=F('entity__resource__project'),
)
# this is a `values` of translations, with the resource, path and project
# denormalized to resource/path/project.
qs = translations.values(*translation_attrs).annotate(**annotations)
translation_tags = _tag_iterator(qs, **kwargs)
# iterate through associated tags for all matching translations
for translation, (tag, __, __) in translation_tags:
if kwargs.get('groupby'):
key = translation[kwargs['groupby']]
else:
key = tag
# get the current latest for this tag
_pk, _date = latest_dates.get(
key, (None, timezone.make_aware(datetime.min))
)
if translation['date'] > _date:
# set this translation if its newer than the current latest
# for this tag
latest_dates[key] = (translation['pk'], translation['date'])
return latest_dates
@pytest.fixture
def calculate_tags_latest():
"""This fixture provides a function for calculating the tags and their
expected latest changes currently in the database, after filtering for
provided **kwargs
"""
return _calculate_tags_latest
@pytest.fixture
def tag_matrix(site_matrix):
"""This provides the `site_matrix` fixture but with added tags.
This fixture can be used in conjunction with the `calculate_tags`
fixture to test for tags using kwargs from the parametrized
`tag_test_kwargs` fixture
"""
factories = site_matrix['factories']
factories['tag'] = tag_factory()
# this creates 113 tags
# every 20th tag gets no priority
# the others get between 0-5
tags = factories['tag'](
args=[
{
'priority': (
None
if not i or not (i % 20)
else int(i / 20)
)
}
for i in range(0, 113)
]
)
# associate tags with resources
for i, resource in enumerate(site_matrix['resources']):
x = 0
indeces = []
# this distributes the tags amongst resources in
# a fairly arbitrary fashion
# every resource gets the tag with index of 0
while True:
idx = x * (i + 1)
if idx >= len(tags):
break
indeces.append(idx)
x = x + 1
# add tags to the resource's tag_set
resource.tag_set.add(*[tags[n] for n in indeces])
site_matrix['tags'] = tags
return site_matrix
_tag_kwargs = OrderedDict(
(('empty', dict()),
('project0_match', dict(projects=[0])),
('project1_match', dict(projects=[1])),
('locale_match', dict(locales=[0])),
('locale_and_project_match', dict(locales=[0], projects=[0])),
('locales_and_projects_match', dict(projects=[1, 2], locales=[0, 1])),
('priority_match', dict(priority=3)),
('priority_true_match', dict(priority=True)),
('priority_false_match', dict(priority=False)),
('path_no_match', dict(path="NOPATHSHERE")),
('path_match', dict(path=11)),
('path_glob', dict(path="resource[5-9]*")),
('slug_no_match', dict(slug="NOSLUGSHERE")),
('slug_exact', dict(slug=23)),
('slug_glob', dict(slug="factory*7")),
('party_glob',
dict(path="resource[1]*",
projects=[0, 2],
locales=[1, 2],
slug="factory*[2-5]"))))
@pytest.fixture(params=_tag_kwargs)
def tag_test_kwargs(request, tag_matrix):
"""This is a parametrized fixture that provides a range of possible
**kwargs for testing the TagsTool against tags in the `tag_matrix`
fixture.
If a parameter values for `path` is an `int` its mangled to the
`path` of the `resource` (in the site_matrix) with the corresponding
index.
If a `slug` is an `int` its likewise mangled to the `tag`.`slug` with
a corresponding index.
`projects` and `locales` are similarly mangled to the corresponding
projects/locales in the site_matrix.
Parameters that are suffixed with `_match` expect at least 1 result
to be returned.
Parameters suffixed with `_exact` expect exactly 1 result.
Parameters suffixed with `_glob` expect more than 1 result.
Finally, parameters suffixed with `_no_match` expect 0 results.
"""
kwargs = _tag_kwargs.get(request.param).copy()
if kwargs.get("path"):
if isinstance(kwargs['path'], int):
kwargs['path'] = tag_matrix['resources'][kwargs['path']].path
if kwargs.get("slug"):
if isinstance(kwargs['slug'], int):
kwargs["slug"] = tag_matrix['tags'][kwargs['slug']].slug
for k in ['projects', 'locales']:
if kwargs.get(k):
kwargs[k] = [
tag_matrix[k][i]
for i
in kwargs[k]]
return request.param, kwargs
_tag_data_init_kwargs = OrderedDict(
(('no_args',
dict(annotations=None,
groupby=None,
locales=None,
path=None,
priority=None,
projects=None,
slug=None)),
('args',
dict(annotations=1,
groupby=2,
locales=3,
path=4,
priority=5,
projects=6,
slug=7))))
@pytest.fixture(params=_tag_data_init_kwargs)
def tag_data_init_kwargs(request):
"""This is a parametrized fixture that provides 2 sets
of possible **kwargs to instantiate the TagsDataTools with
The first set of kwargs, are all set to `None` and the
second contain numeric values for testing against
"""
return _tag_data_init_kwargs.get(request.param).copy()
_tag_init_kwargs = OrderedDict(
(('no_args',
dict(locales=None,
path=None,
priority=None,
projects=None,
slug=None)),
('args',
dict(locales=1,
path=2,
priority=3,
projects=4,
slug=5))))
@pytest.fixture(params=_tag_init_kwargs)
def tag_init_kwargs(request):
"""This is a parametrized fixture that provides 2 sets
of possible **kwargs to instantiate the TagsTool with
The first set of kwargs, are all set to `None` and the
second contain numeric values for testing against
"""
return _tag_init_kwargs.get(request.param).copy()
|
the-stack_0_5492 | from threading import Thread
from time import sleep
from tkinter import *
def main():
global left_timer, right_timer
while True:
sleep(1)
if flag:
left_timer = left_timer - 1
m = int(left_timer / 60)
s = left_timer % 60
left['text'] = '{:02d}:{:02d}'.format(m, s)
else:
right_timer = right_timer - 1
m = int(right_timer / 60)
s = right_timer % 60
right['text'] = '{:02d}:{:02d}'.format(m, s)
def s():
t = Thread(target=main)
t.start()
def stopl():
global flag
flag = False
def stopr():
global flag
flag = True
root = Tk()
# variables ##########################
left_timer = 1200
right_timer = 1200
flag = True
######################################
# Label ##############################
l1 = Label(root, text='Left Player',
font=('times', 20, 'italic'))
l1.grid(row=0, column=0)
l2 = Label(root, text='Right Player',
font=('times', 20, 'italic'))
l2.grid(row=0, column=1)
######################################
# Timer Label ########################
left = Label(root, text='20:00',
font=('courier', 20))
left.grid(row=1, column=0)
right = Label(root, text='20:00',
font=('courier', 20))
right.grid(row=1, column=1)
######################################
# Button #############################
b1 = Button(root,
text='Stop',
command=stopl,
font=('courier', 20))
b1.grid(row=2, column=0)
b2 = Button(root,
command=stopr,
text='Stop',
font=('courier', 20))
b2.grid(row=2, column=1)
b3 = Button(root,
text='Start',
command=s,
font=('courier', 20))
b3.grid(row=3, column=0)
######################################
root.mainloop() |
the-stack_0_5495 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""Unit Tests for internal methods."""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import unittest
from collections import namedtuple
import graphviz as gv
import numpy as np
from onnx import TensorProto
from onnx import helper, numpy_helper
import tensorflow as tf
from tf2onnx import utils
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
from tf2onnx.graph import GraphUtil
from common import unittest_main
# pylint: disable=missing-docstring
def onnx_to_graphviz(g):
"""Onnx graph as dot string."""
g2 = gv.Digraph()
for node in g.get_nodes():
kwarg = {}
attr = node.attr
if "shape" in attr:
kwarg["shape"] = str(attr["shape"].ints)
if "broadcast" in attr:
kwarg["broadcast"] = str(attr["broadcast"].i)
g2.node(node.name, op_type=node.type, **kwarg)
for node in g.get_nodes():
for i in node.input:
if i:
g2.edge(i, node.name)
return " ".join(g2.source.split())
def onnx_pretty(g, args=None):
"""Onnx graph pretty print."""
graph_proto = g.make_model("converted from {}".format(args.input))
return helper.printable_graph(graph_proto.graph)
class Tf2OnnxInternalTests(unittest.TestCase):
def setUp(self):
"""Setup test."""
# suppress log info of tensorflow so that result of test can be seen much easier
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.WARN)
utils.INTERNAL_NAME = 1
arg = namedtuple("Arg", "input inputs outputs verbose")
self._args0 = arg(input="test", inputs=[], outputs=["output:0"], verbose=False)
self._args1 = arg(input="test", inputs=["input:0"], outputs=["output:0"], verbose=False)
self._args2 = arg(input="test", inputs=["input1:0", "input2:0"], outputs=["output:0"], verbose=False)
self._args3 = arg(input="test", inputs=["input1:0", "input2:0", "prob:0"], outputs=["output:0"], verbose=False)
self._args4 = arg(input="test", inputs=["input1:0", "input2:0"], outputs=["output1:0", "output2:0"],
verbose=False)
@staticmethod
def sample_net():
n1 = helper.make_node("Abs", ["input"], ["n1:0"], name="n1")
n2 = helper.make_node("Abs", ["n1:0"], ["n2:0"], name="n2")
n3 = helper.make_node("Abs", ["n1:0"], ["n3:0"], name="n3")
n4 = helper.make_node("Add", ["n2:0", "n3:0"], ["n4:0"], name="n4")
n5 = helper.make_node("Abs", ["n4:0"], ["n5:0"], name="n5")
n6 = helper.make_node("Identity", ["n5:0"], ["n6:0"], name="n6")
graph_proto = helper.make_graph(
nodes=[n1, n2, n3, n4, n5, n6],
name="test",
inputs=[helper.make_tensor_value_info("input", TensorProto.FLOAT, [2, 2])],
outputs=[helper.make_tensor_value_info("n5:0", TensorProto.FLOAT, [2, 2])],
initializer=[]
)
return graph_proto
def test_insert_node1(self):
graph_proto = self.sample_net()
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
n2 = g.get_node_by_name("n2")
g.insert_new_node_on_input(n2, "Abs", "n1:0", name="n7")
ops = g.get_nodes()
g.topological_sort(ops)
result = onnx_to_graphviz(g)
expected = 'digraph { Placeholder__4 [op_type=Placeholder] ' \
'n1 [op_type=Abs] n7 [op_type=Abs] n2 [op_type=Abs] n3 [op_type=Abs] ' \
'n4 [op_type=Add] n5 [op_type=Abs] n5_graph_outputs_Identity__3 [op_type=Identity] ' \
'n6 [op_type=Identity] input -> n1 n1:0 -> n7 n7:0 -> n2 n1:0 -> n3 ' \
'n2:0 -> n4 n3:0 -> n4 n4:0 -> n5 n5_raw_output___2:0 -> n5_graph_outputs_Identity__3 ' \
'n5_raw_output___2:0 -> n6 }'
self.assertEqual(expected, result)
def test_insert_node2(self):
graph_proto = self.sample_net()
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
g.insert_new_node_on_output("Abs", "n1:0", name="n7")
ops = g.get_nodes()
g.topological_sort(ops)
result = onnx_to_graphviz(g)
expected = 'digraph { Placeholder__4 [op_type=Placeholder] n1 [op_type=Abs] n7 [op_type=Abs] ' \
'n3 [op_type=Abs] n2 [op_type=Abs] n4 [op_type=Add] n5 [op_type=Abs] ' \
'n5_graph_outputs_Identity__3 [op_type=Identity] n6 [op_type=Identity] ' \
'input -> n1 n1:0 -> n7 n7:0 -> n3 n7:0 -> n2 n2:0 -> n4 n3:0 -> n4 ' \
'n4:0 -> n5 n5_raw_output___2:0 -> n5_graph_outputs_Identity__3 n5_raw_output___2:0 -> n6 }'
self.assertEqual(expected, result)
def test_remove_input(self):
graph_proto = self.sample_net()
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
n4 = g.get_node_by_name("n4")
g.remove_input(n4, n4.input[1])
ops = g.get_nodes()
g.topological_sort(ops)
result = onnx_to_graphviz(g)
expected = 'digraph { Placeholder__4 [op_type=Placeholder] n1 [op_type=Abs] n3 [op_type=Abs] ' \
'n2 [op_type=Abs] n4 [op_type=Add] n5 [op_type=Abs] ' \
'n5_graph_outputs_Identity__3 [op_type=Identity] n6 [op_type=Identity] ' \
'input -> n1 n1:0 -> n3 n1:0 -> n2 n2:0 -> n4 n4:0 -> n5 ' \
'n5_raw_output___2:0 -> n5_graph_outputs_Identity__3 n5_raw_output___2:0 -> n6 }'
self.assertEqual(expected, result)
def test_rewrite_subgraph(self):
graph_proto = self.sample_net()
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
pattern = \
OpTypePattern('Abs', name='output', inputs=[
OpTypePattern('Add', name='input')
])
ops = g.get_nodes()
matcher = GraphMatcher(pattern)
match_results = list(matcher.match_ops(ops))
for match in match_results:
input_node = match.get_op('input')
output_node = match.get_op('output')
op_name = utils.make_name("ReplacedOp")
out_name = utils.port_name(op_name)
new_node = g.make_node("Sub", inputs=input_node.input, outputs=[out_name], name=op_name)
g.replace_all_inputs(ops, output_node.output[0], new_node.output[0])
for n in set(match.get_nodes()):
g.remove_node(n.name)
g.topological_sort(ops)
result = onnx_to_graphviz(g)
expected = 'digraph { Placeholder__4 [op_type=Placeholder] n1 [op_type=Abs] ' \
'n3 [op_type=Abs] n2 [op_type=Abs] ReplacedOp__5 [op_type=Sub] ' \
'n5_graph_outputs_Identity__3 [op_type=Identity] n6 [op_type=Identity] ' \
'input -> n1 n1:0 -> n3 n1:0 -> n2 n2:0 -> ReplacedOp__5 ' \
'n3:0 -> ReplacedOp__5 ReplacedOp__5:0 -> n5_graph_outputs_Identity__3 ' \
'ReplacedOp__5:0 -> n6 }'
self.assertEqual(expected, result)
def test_match_flipped(self):
n1 = helper.make_node("Sub", ["i1", "i1"], ["n1:0"], name="n1")
n2 = helper.make_node("Add", ["i2", "i2"], ["n2:0"], name="n2")
n3 = helper.make_node("Mul", ["n1:0", "n2:0"], ["n3:0"], name="n3")
graph_proto = helper.make_graph(
nodes=[n1, n2, n3],
name="test",
inputs=[helper.make_tensor_value_info("i1", TensorProto.FLOAT, [2, 2]),
helper.make_tensor_value_info("i2", TensorProto.FLOAT, [2, 2])],
outputs=[helper.make_tensor_value_info("n2:0", TensorProto.FLOAT, [2, 2])],
initializer=[]
)
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
pattern = OpTypePattern('Mul', inputs=[
OpTypePattern('Add'),
OpTypePattern('Sub')
])
ops = g.get_nodes()
matcher = GraphMatcher(pattern, allow_reorder=True)
match_results = list(matcher.match_ops(ops))
self.assertEqual(1, len(match_results))
def test_cmdarg_parse(self):
arg = "input/V-1_2:0,input/X:0[1,2,3],Y:1[4,5],Z:3,A:1,B"
expected_inputs = ['input/V-1_2:0', 'input/X:0', 'Y:1', 'Z:3', 'A:1', 'B']
expected_shape = {'Y:1': [4, 5], 'input/X:0': [1, 2, 3]}
inputs, shape_override = utils.split_nodename_and_shape(arg)
self.assertEqual(expected_inputs, inputs)
self.assertEqual(expected_shape, shape_override)
def test_shape_utils(self):
self.assertEqual(utils.merge_shapes(None, None), None)
self.assertEqual(utils.merge_shapes([], None), [])
self.assertEqual(utils.merge_shapes(None, [1, 2, 3]), [1, 2, 3])
self.assertEqual(utils.merge_shapes([1, 3], [None, 3]), [1, 3])
self.assertEqual(utils.merge_shapes([1, None, 3], (-1, 2, "unk")), [1, 2, 3])
self.assertTrue(utils.are_shapes_compatible(None, []))
self.assertTrue(utils.are_shapes_compatible([1, None, 3], (-1, 2, "unk")))
self.assertFalse(utils.are_shapes_compatible([1, 2, 3], (2, 3)))
self.assertFalse(utils.are_shapes_compatible([1, 2, 3], (4, 5, 6)))
self.assertTrue(utils.are_shapes_equal(None, None))
self.assertFalse(utils.are_shapes_equal(None, []))
self.assertTrue(utils.are_shapes_equal([1, 2, 3], (1, 2, 3)))
def test_data_format(self):
n1 = helper.make_node("Conv", ["X", "W"], ["Y"], name="n1", data_format="NHWC")
graph_proto = helper.make_graph(
nodes=[n1],
name="test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [2, 2]),
helper.make_tensor_value_info("W", TensorProto.FLOAT, [2, 2])],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [2, 2])],
initializer=[]
)
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
n = g.get_node_by_name("n1")
self.assertEqual(n.data_format, "NHWC")
self.assertTrue(n.is_nhwc())
def test_node_attr_onnx(self):
n1 = helper.make_node("Conv", ["X", "W"], ["Y"], name="n1", my_attr="my_attr")
graph_proto = helper.make_graph(
nodes=[n1],
name="test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [2, 2]),
helper.make_tensor_value_info("W", TensorProto.FLOAT, [2, 2])],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [2, 2])],
initializer=[]
)
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
n1 = g.get_node_by_name("n1")
self.assertTrue("my_attr" in n1.attr)
self.assertTrue("my_attr" not in n1.attr_onnx)
n1 = helper.make_node("Conv", ["X", "W"], ["Y"], name="n1", domain="my_domain", my_attr="my_attr")
graph_proto = helper.make_graph(
nodes=[n1],
name="test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [2, 2]),
helper.make_tensor_value_info("W", TensorProto.FLOAT, [2, 2])],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [2, 2])],
initializer=[]
)
g = GraphUtil.create_graph_from_onnx_graph(graph_proto)
n1 = g.get_node_by_name("n1")
self.assertTrue("my_attr" in n1.attr)
self.assertTrue("my_attr" in n1.attr_onnx)
def test_tensor_data(self):
tensors = {
"empty_tensor": np.array([], dtype=np.float32),
"multi_dim_empty_tensor": np.array([[], []], dtype=np.float32),
"scalar": np.array(1., dtype=np.float32),
"one_item_array": np.array([1.], dtype=np.float32),
"normal_array": np.array([[1., 2.], [2., 3.]], dtype=np.float32)
}
tf.reset_default_graph()
with tf.Session() as sess:
for n, data in tensors.items():
tf.constant(data, dtype=tf.float32, name=n)
for tf_node in sess.graph.get_operations():
name = tf_node.name
self.assertTrue(name in tensors.keys())
self.assertTrue("value" in tf_node.node_def.attr)
# convert to onnx tensor value
tensor_value = utils.tf_to_onnx_tensor(
utils.get_tf_node_attr(tf_node, "value"),
name=utils.port_name(tf_node.name)
)
attr = helper.make_attribute("value", tensor_value)
# same as node.get_tensor_value(is_list=False)
actual = numpy_helper.to_array(helper.get_attribute_value(attr))
expected = tensors[name]
self.assertTrue(np.array_equal(expected, actual))
if __name__ == '__main__':
unittest_main()
|
the-stack_0_5496 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import json
import sys
import time
import typing
from pickle import dumps as p_dumps, loads as p_loads
import pika
# noinspection PyPackageRequirements
from pyspark import SparkContext, RDD
from fate_arch.common import conf_utils, file_utils
from fate_arch.abc import FederationABC, GarbageCollectionABC
from fate_arch.common import Party
from fate_arch.common.log import getLogger
from fate_arch.computing.spark import get_storage_level, Table
from fate_arch.computing.spark._materialize import materialize
from fate_arch.federation.rabbitmq._mq_channel import MQChannel
from fate_arch.federation.rabbitmq._rabbit_manager import RabbitManager
LOGGER = getLogger()
# default message max size in bytes = 1MB
DEFAULT_MESSAGE_MAX_SIZE = 1048576
NAME_DTYPE_TAG = "<dtype>"
_SPLIT_ = "^"
# Datastream is a wraper of StringIO, it receives kv pairs and dump it to json string
class Datastream(object):
def __init__(self):
self._string = io.StringIO()
self._string.write("[")
def get_size(self):
return sys.getsizeof(self._string.getvalue())
def get_data(self):
self._string.write("]")
return self._string.getvalue()
def append(self, kv: dict):
# add ',' if not the first element
if self._string.getvalue() != "[":
self._string.write(",")
json.dump(kv, self._string)
def clear(self):
self._string.close()
self.__init__()
class FederationDataType(object):
OBJECT = "obj"
TABLE = "Table"
class MQ(object):
def __init__(self, host, port, union_name, policy_id, route_table):
self.host = host
self.port = port
self.union_name = union_name
self.policy_id = policy_id
self.route_table = route_table
def __str__(self):
return (
f"MQ(host={self.host}, port={self.port}, union_name={self.union_name}, "
f"policy_id={self.policy_id}, route_table={self.route_table})"
)
def __repr__(self):
return self.__str__()
class _QueueNames(object):
def __init__(self, vhost, send, receive):
self.vhost = vhost
# self.union = union
self.send = send
self.receive = receive
_remote_history = set()
def _remote_tag_not_duplicate(name, tag, parties):
for party in parties:
if (name, tag, party) in _remote_history:
return False
_remote_history.add((name, tag, party))
return True
_get_history = set()
def _get_tag_not_duplicate(name, tag, party):
if (name, tag, party) in _get_history:
return False
_get_history.add((name, tag, party))
return True
class Federation(FederationABC):
@staticmethod
def from_conf(
federation_session_id: str,
party: Party,
runtime_conf: dict,
rabbitmq_config: dict,
):
LOGGER.debug(f"rabbitmq_config: {rabbitmq_config}")
host = rabbitmq_config.get("host")
port = rabbitmq_config.get("port")
mng_port = rabbitmq_config.get("mng_port")
base_user = rabbitmq_config.get("user")
base_password = rabbitmq_config.get("password")
federation_info = runtime_conf.get("job_parameters", {}).get(
"federation_info", {}
)
union_name = federation_info.get("union_name")
policy_id = federation_info.get("policy_id")
rabbitmq_run = runtime_conf.get("job_parameters", {}).get("rabbitmq_run", {})
LOGGER.debug(f"rabbitmq_run: {rabbitmq_run}")
max_message_size = rabbitmq_run.get(
"max_message_size", DEFAULT_MESSAGE_MAX_SIZE
)
LOGGER.debug(f"set max message size to {max_message_size} Bytes")
rabbit_manager = RabbitManager(
base_user, base_password, f"{host}:{mng_port}", rabbitmq_run
)
rabbit_manager.create_user(union_name, policy_id)
route_table_path = rabbitmq_config.get("route_table")
if route_table_path is None:
route_table_path = "conf/rabbitmq_route_table.yaml"
route_table = file_utils.load_yaml_conf(conf_path=route_table_path)
mq = MQ(host, port, union_name, policy_id, route_table)
return Federation(
federation_session_id, party, mq, rabbit_manager, max_message_size
)
def __init__(
self,
session_id,
party: Party,
mq: MQ,
rabbit_manager: RabbitManager,
max_message_size,
):
self._session_id = session_id
self._party = party
self._mq = mq
self._rabbit_manager = rabbit_manager
self._queue_map: typing.MutableMapping[_QueueKey, _QueueNames] = {}
self._channels_map: typing.MutableMapping[_QueueKey, MQChannel] = {}
self._vhost_set = set()
self._name_dtype_map = {}
self._message_cache = {}
self._max_message_size = max_message_size
def __getstate__(self):
pass
def get(
self, name: str, tag: str, parties: typing.List[Party], gc: GarbageCollectionABC
) -> typing.List:
log_str = f"[rabbitmq.get](name={name}, tag={tag}, parties={parties})"
LOGGER.debug(f"[{log_str}]start to get")
# for party in parties:
# if not _get_tag_not_duplicate(name, tag, party):
# raise ValueError(f"[{log_str}]get from {party} with duplicate tag")
_name_dtype_keys = [
_SPLIT_.join([party.role, party.party_id, name, tag, "get"])
for party in parties
]
if _name_dtype_keys[0] not in self._name_dtype_map:
mq_names = self._get_mq_names(parties, dtype=NAME_DTYPE_TAG)
channel_infos = self._get_channels(mq_names=mq_names)
rtn_dtype = []
for i, info in enumerate(channel_infos):
obj = self._receive_obj(
info, name, tag=_SPLIT_.join([tag, NAME_DTYPE_TAG])
)
rtn_dtype.append(obj)
LOGGER.debug(
f"[rabbitmq.get] _name_dtype_keys: {_name_dtype_keys}, dtype: {obj}"
)
for k in _name_dtype_keys:
if k not in self._name_dtype_map:
self._name_dtype_map[k] = rtn_dtype[0]
rtn_dtype = self._name_dtype_map[_name_dtype_keys[0]]
rtn = []
dtype = rtn_dtype.get("dtype", None)
partitions = rtn_dtype.get("partitions", None)
if dtype == FederationDataType.TABLE:
mq_names = self._get_mq_names(parties, name, partitions=partitions)
for i in range(len(mq_names)):
party = parties[i]
role = party.role
party_id = party.party_id
party_mq_names = mq_names[i]
receive_func = self._get_partition_receive_func(
name,
tag,
party_id,
role,
party_mq_names,
mq=self._mq,
connection_conf=self._rabbit_manager.runtime_config.get(
"connection", {}
),
)
sc = SparkContext.getOrCreate()
rdd = sc.parallelize(range(partitions), partitions)
rdd = rdd.mapPartitionsWithIndex(receive_func)
rdd = materialize(rdd)
table = Table(rdd)
rtn.append(table)
# add gc
gc.add_gc_action(tag, table, "__del__", {})
LOGGER.debug(
f"[{log_str}]received rdd({i + 1}/{len(parties)}), party: {parties[i]} "
)
else:
mq_names = self._get_mq_names(parties, name)
channel_infos = self._get_channels(mq_names=mq_names)
for i, info in enumerate(channel_infos):
obj = self._receive_obj(info, name, tag)
LOGGER.debug(
f"[{log_str}]received obj({i + 1}/{len(parties)}), party: {parties[i]} "
)
rtn.append(obj)
LOGGER.debug(f"[{log_str}]finish to get")
return rtn
def remote(
self,
v,
name: str,
tag: str,
parties: typing.List[Party],
gc: GarbageCollectionABC,
) -> typing.NoReturn:
log_str = f"[rabbitmq.remote](name={name}, tag={tag}, parties={parties})"
# if not _remote_tag_not_duplicate(name, tag, parties):
# raise ValueError(f"[{log_str}]remote to {parties} with duplicate tag")
_name_dtype_keys = [
_SPLIT_.join([party.role, party.party_id, name, tag, "remote"])
for party in parties
]
if _name_dtype_keys[0] not in self._name_dtype_map:
mq_names = self._get_mq_names(parties, dtype=NAME_DTYPE_TAG)
channel_infos = self._get_channels(mq_names=mq_names)
if isinstance(v, Table):
body = {"dtype": FederationDataType.TABLE, "partitions": v.partitions}
else:
body = {"dtype": FederationDataType.OBJECT}
LOGGER.debug(
f"[rabbitmq.remote] _name_dtype_keys: {_name_dtype_keys}, dtype: {body}"
)
self._send_obj(
name=name,
tag=_SPLIT_.join([tag, NAME_DTYPE_TAG]),
data=p_dumps(body),
channel_infos=channel_infos,
)
for k in _name_dtype_keys:
if k not in self._name_dtype_map:
self._name_dtype_map[k] = body
if isinstance(v, Table):
total_size = v.count()
partitions = v.partitions
LOGGER.debug(
f"[{log_str}]start to remote RDD, total_size={total_size}, partitions={partitions}"
)
mq_names = self._get_mq_names(parties, name, partitions=partitions)
# add gc
gc.add_gc_action(tag, v, "__del__", {})
send_func = self._get_partition_send_func(
name,
tag,
partitions,
mq_names,
mq=self._mq,
maximun_message_size=self._max_message_size,
connection_conf=self._rabbit_manager.runtime_config.get(
"connection", {}
),
)
# noinspection PyProtectedMember
v._rdd.mapPartitionsWithIndex(send_func).count()
else:
LOGGER.debug(f"[{log_str}]start to remote obj")
mq_names = self._get_mq_names(parties, name)
channel_infos = self._get_channels(mq_names=mq_names)
self._send_obj(
name=name, tag=tag, data=p_dumps(v), channel_infos=channel_infos
)
LOGGER.debug(f"[{log_str}]finish to remote")
def cleanup(self, parties):
LOGGER.debug("[rabbitmq.cleanup]start to cleanup...")
for party in parties:
vhost = self._get_vhost(party)
LOGGER.debug(f"[rabbitmq.cleanup]start to cleanup vhost {vhost}...")
self._rabbit_manager.clean(vhost)
LOGGER.debug(f"[rabbitmq.cleanup]cleanup vhost {vhost} done")
if self._mq.union_name:
LOGGER.debug(f"[rabbitmq.cleanup]clean user {self._mq.union_name}.")
self._rabbit_manager.delete_user(user=self._mq.union_name)
def _get_vhost(self, party):
low, high = (
(self._party, party) if self._party < party else (party, self._party)
)
vhost = (
f"{self._session_id}-{low.role}-{low.party_id}-{high.role}-{high.party_id}"
)
return vhost
def _get_mq_names(
self, parties: typing.List[Party], name=None, partitions=None, dtype=None
) -> typing.List:
mq_names = [
self._get_or_create_queue(party, name, partitions, dtype)
for party in parties
]
return mq_names
def _get_or_create_queue(
self, party: Party, name=None, partitions=None, dtype=None
) -> typing.Tuple:
queue_key_list = []
queue_infos = []
if dtype is not None:
queue_key = _SPLIT_.join([party.role, party.party_id, dtype, dtype])
queue_key_list.append(queue_key)
else:
if partitions is not None:
for i in range(partitions):
queue_key = _SPLIT_.join([party.role, party.party_id, name, str(i)])
queue_key_list.append(queue_key)
elif name is not None:
queue_key = _SPLIT_.join([party.role, party.party_id, name])
queue_key_list.append(queue_key)
else:
queue_key = _SPLIT_.join([party.role, party.party_id])
queue_key_list.append(queue_key)
for queue_key in queue_key_list:
if queue_key not in self._queue_map:
LOGGER.debug(
f"[rabbitmq.get_or_create_queue]queue: {queue_key} for party:{party} not found, start to create"
)
# gen names
vhost_name = self._get_vhost(party)
queue_key_splits = queue_key.split(_SPLIT_)
queue_suffix = "-".join(queue_key_splits[2:])
send_queue_name = f"send-{self._session_id}-{self._party.role}-{self._party.party_id}-{party.role}-{party.party_id}-{queue_suffix}"
receive_queue_name = f"receive-{self._session_id}-{party.role}-{party.party_id}-{self._party.role}-{self._party.party_id}-{queue_suffix}"
queue_names = _QueueNames(
vhost_name, send_queue_name, receive_queue_name
)
# initial vhost
if queue_names.vhost not in self._vhost_set:
self._rabbit_manager.create_vhost(queue_names.vhost)
self._rabbit_manager.add_user_to_vhost(
self._mq.union_name, queue_names.vhost
)
self._vhost_set.add(queue_names.vhost)
# initial send queue, the name is send-${vhost}
self._rabbit_manager.create_queue(queue_names.vhost, queue_names.send)
# initial receive queue, the name is receive-${vhost}
self._rabbit_manager.create_queue(
queue_names.vhost, queue_names.receive
)
upstream_uri = self._upstream_uri(party_id=party.party_id)
self._rabbit_manager.federate_queue(
upstream_host=upstream_uri,
vhost=queue_names.vhost,
send_queue_name=queue_names.send,
receive_queue_name=queue_names.receive,
)
self._queue_map[queue_key] = queue_names
# TODO: check federated queue status
LOGGER.debug(
f"[rabbitmq.get_or_create_queue]queue for queue_key: {queue_key}, party:{party} created"
)
queue_names = self._queue_map[queue_key]
queue_infos.append((queue_key, queue_names))
return queue_infos
def _upstream_uri(self, party_id):
host = self._mq.route_table.get(int(party_id)).get("host")
port = self._mq.route_table.get(int(party_id)).get("port")
upstream_uri = (
f"amqp://{self._mq.union_name}:{self._mq.policy_id}@{host}:{port}"
)
return upstream_uri
def _get_channel(
self, mq, queue_names: _QueueNames, party_id, role, connection_conf: dict
):
return MQChannel(
host=mq.host,
port=mq.port,
user=mq.union_name,
password=mq.policy_id,
vhost=queue_names.vhost,
send_queue_name=queue_names.send,
receive_queue_name=queue_names.receive,
party_id=party_id,
role=role,
extra_args=connection_conf,
)
def _get_channels(self, mq_names):
channel_infos = []
for e in mq_names:
for queue_key, queue_names in e:
queue_key_splits = queue_key.split(_SPLIT_)
role = queue_key_splits[0]
party_id = queue_key_splits[1]
info = self._channels_map.get(queue_key)
if info is None:
info = self._get_channel(
self._mq,
queue_names,
party_id=party_id,
role=role,
connection_conf=self._rabbit_manager.runtime_config.get(
"connection", {}
),
)
self._channels_map[queue_key] = info
channel_infos.append(info)
return channel_infos
# can't pickle _thread.lock objects
def _get_channels_index(self, index, mq_names, mq, connection_conf: dict):
channel_infos = []
for e in mq_names:
queue_key, queue_names = e[index]
queue_key_splits = queue_key.split(_SPLIT_)
role = queue_key_splits[0]
party_id = queue_key_splits[1]
info = self._get_channel(
mq,
queue_names,
party_id=party_id,
role=role,
connection_conf=connection_conf,
)
channel_infos.append(info)
return channel_infos
def _send_obj(self, name, tag, data, channel_infos):
for info in channel_infos:
properties = pika.BasicProperties(
content_type="text/plain",
app_id=info.party_id,
message_id=name,
correlation_id=tag,
delivery_mode=1,
)
LOGGER.debug(f"[rabbitmq._send_obj]properties:{properties}.")
info.basic_publish(body=data, properties=properties)
def _get_message_cache_key(self, name, tag, party_id, role):
cache_key = _SPLIT_.join([name, tag, str(party_id), role])
return cache_key
def _receive_obj(self, channel_info, name, tag):
party_id = channel_info._party_id
role = channel_info._role
wish_cache_key = self._get_message_cache_key(name, tag, party_id, role)
if wish_cache_key in self._message_cache:
return self._message_cache[wish_cache_key]
for method, properties, body in channel_info.consume():
LOGGER.debug(
f"[rabbitmq._receive_obj] method: {method}, properties: {properties}."
)
if properties.message_id != name or properties.correlation_id != tag:
# todo: fix this
LOGGER.warning(
f"[rabbitmq._receive_obj] require {name}.{tag}, got {properties.message_id}.{properties.correlation_id}"
)
cache_key = self._get_message_cache_key(
properties.message_id, properties.correlation_id, party_id, role
)
# object
if properties.content_type == "text/plain":
self._message_cache[cache_key] = p_loads(body)
channel_info.basic_ack(delivery_tag=method.delivery_tag)
if cache_key == wish_cache_key:
channel_info.cancel()
LOGGER.debug(
f"[rabbitmq._receive_obj] cache_key: {cache_key}, obj: {self._message_cache[cache_key]}"
)
return self._message_cache[cache_key]
else:
raise ValueError(
f"[rabbitmq._receive_obj] properties.content_type is {properties.content_type}, but must be text/plain"
)
def _send_kv(
self, name, tag, data, channel_infos, partition_size, partitions, message_key
):
headers = {
"partition_size": partition_size,
"partitions": partitions,
"message_key": message_key,
}
for info in channel_infos:
properties = pika.BasicProperties(
content_type="application/json",
app_id=info.party_id,
message_id=name,
correlation_id=tag,
headers=headers,
delivery_mode=1,
)
print(f"[rabbitmq._send_kv]info: {info}, properties: {properties}.")
info.basic_publish(body=data, properties=properties)
def _get_partition_send_func(
self,
name,
tag,
partitions,
mq_names,
mq,
maximun_message_size,
connection_conf: dict,
):
def _fn(index, kvs):
return self._partition_send(
index,
kvs,
name,
tag,
partitions,
mq_names,
mq,
maximun_message_size,
connection_conf,
)
return _fn
def _partition_send(
self,
index,
kvs,
name,
tag,
partitions,
mq_names,
mq,
maximun_message_size,
connection_conf: dict,
):
channel_infos = self._get_channels_index(
index=index, mq_names=mq_names, mq=mq, connection_conf=connection_conf
)
datastream = Datastream()
base_message_key = str(index)
message_key_idx = 0
count = 0
for k, v in kvs:
count += 1
el = {"k": p_dumps(k).hex(), "v": p_dumps(v).hex()}
# roughly caculate the size of package to avoid serialization ;)
if (
datastream.get_size() + sys.getsizeof(el["k"]) + sys.getsizeof(el["v"])
>= maximun_message_size
):
print(
f"[rabbitmq._partition_send]The size of message is: {datastream.get_size()}"
)
message_key_idx += 1
message_key = base_message_key + "_" + str(message_key_idx)
self._send_kv(
name=name,
tag=tag,
data=datastream.get_data(),
channel_infos=channel_infos,
partition_size=-1,
partitions=partitions,
message_key=message_key,
)
datastream.clear()
datastream.append(el)
message_key_idx += 1
message_key = _SPLIT_.join([base_message_key, str(message_key_idx)])
self._send_kv(
name=name,
tag=tag,
data=datastream.get_data(),
channel_infos=channel_infos,
partition_size=count,
partitions=partitions,
message_key=message_key,
)
return [1]
def _get_partition_receive_func(
self, name, tag, party_id, role, party_mq_names, mq, connection_conf: dict
):
def _fn(index, kvs):
return self._partition_receive(
index,
kvs,
name,
tag,
party_id,
role,
party_mq_names,
mq,
connection_conf,
)
return _fn
def _partition_receive(
self,
index,
kvs,
name,
tag,
party_id,
role,
party_mq_names,
mq,
connection_conf: dict,
):
queue_names = party_mq_names[index][1]
channel_info = self._get_channel(
mq, queue_names, party_id, role, connection_conf
)
message_key_cache = set()
count = 0
partition_size = -1
all_data = []
for method, properties, body in channel_info.consume():
print(
f"[rabbitmq._partition_receive] method: {method}, properties: {properties}."
)
if properties.message_id != name or properties.correlation_id != tag:
# todo: fix this
channel_info.basic_ack(delivery_tag=method.delivery_tag)
print(
f"[rabbitmq._partition_receive]: require {name}.{tag}, got {properties.message_id}.{properties.correlation_id}"
)
continue
if properties.content_type == "application/json":
message_key = properties.headers["message_key"]
if message_key in message_key_cache:
print(
f"[rabbitmq._partition_receive] message_key : {message_key} is duplicated"
)
channel_info.basic_ack(delivery_tag=method.delivery_tag)
continue
message_key_cache.add(message_key)
if properties.headers["partition_size"] >= 0:
partition_size = properties.headers["partition_size"]
data = json.loads(body)
data_iter = (
(p_loads(bytes.fromhex(el["k"])), p_loads(bytes.fromhex(el["v"])))
for el in data
)
count += len(data)
print(f"[rabbitmq._partition_receive] count: {count}")
all_data.extend(data_iter)
channel_info.basic_ack(delivery_tag=method.delivery_tag)
if count == partition_size:
channel_info.cancel()
return all_data
else:
ValueError(
f"[rabbitmq._partition_receive]properties.content_type is {properties.content_type}, but must be application/json"
)
|
the-stack_0_5497 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import scipy.misc
import sys
import cityscapesscripts.evaluation.instances2dict_with_polygons as cs
import utils.segms as segms_util
import utils.boxes as bboxs_util
def parse_args():
parser = argparse.ArgumentParser(description='Convert dataset')
parser.add_argument(
'--dataset', help="cocostuff, cityscapes", default=None, type=str)
parser.add_argument(
'--outdir', help="output dir for json files", default=None, type=str)
parser.add_argument(
'--datadir', help="data dir for annotations to be converted",
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def convert_coco_stuff_mat(data_dir, out_dir):
"""Convert to png and save json with path. This currently only contains
the segmentation labels for objects+stuff in cocostuff - if we need to
combine with other labels from original COCO that will be a TODO."""
sets = ['train', 'val']
categories = []
json_name = 'coco_stuff_%s.json'
ann_dict = {}
for data_set in sets:
file_list = os.path.join(data_dir, '%s.txt')
images = []
with open(file_list % data_set) as f:
for img_id, img_name in enumerate(f):
img_name = img_name.replace('coco', 'COCO').strip('\n')
image = {}
mat_file = os.path.join(
data_dir, 'annotations/%s.mat' % img_name)
data = h5py.File(mat_file, 'r')
labelMap = data.get('S')
if len(categories) == 0:
labelNames = data.get('names')
for idx, n in enumerate(labelNames):
categories.append(
{"id": idx, "name": ''.join(chr(i) for i in data[
n[0]])})
ann_dict['categories'] = categories
scipy.misc.imsave(
os.path.join(data_dir, img_name + '.png'), labelMap)
image['width'] = labelMap.shape[0]
image['height'] = labelMap.shape[1]
image['file_name'] = img_name
image['seg_file_name'] = img_name
image['id'] = img_id
images.append(image)
ann_dict['images'] = images
print("Num images: %s" % len(images))
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
outfile.write(json.dumps(ann_dict))
# for Cityscapes
def getLabelID(self, instID):
if (instID < 1000):
return instID
else:
return int(instID / 1000)
def convert_cityscapes_instance_only(
data_dir, out_dir):
"""Convert from cityscapes format to COCO instance seg format - polygons"""
sets = [
'gtFine_val',
# 'gtFine_train',
# 'gtFine_test',
# 'gtCoarse_train',
# 'gtCoarse_val',
# 'gtCoarse_train_extra'
]
ann_dirs = [
'gtFine_trainvaltest/gtFine/val',
# 'gtFine_trainvaltest/gtFine/train',
# 'gtFine_trainvaltest/gtFine/test',
# 'gtCoarse/train',
# 'gtCoarse/train_extra',
# 'gtCoarse/val'
]
json_name = 'instancesonly_filtered_%s.json'
ends_in = '%s_polygons.json'
img_id = 0
ann_id = 0
cat_id = 1
category_dict = {}
category_instancesonly = [
'person',
'rider',
'car',
'truck',
'bus',
'train',
'motorcycle',
'bicycle',
]
for data_set, ann_dir in zip(sets, ann_dirs):
print('Starting %s' % data_set)
ann_dict = {}
images = []
annotations = []
ann_dir = os.path.join(data_dir, ann_dir)
for root, _, files in os.walk(ann_dir):
for filename in files:
if filename.endswith(ends_in % data_set.split('_')[0]):
if len(images) % 50 == 0:
print("Processed %s images, %s annotations" % (
len(images), len(annotations)))
json_ann = json.load(open(os.path.join(root, filename)))
image = {}
image['id'] = img_id
img_id += 1
image['width'] = json_ann['imgWidth']
image['height'] = json_ann['imgHeight']
image['file_name'] = filename[:-len(
ends_in % data_set.split('_')[0])] + 'leftImg8bit.png'
image['seg_file_name'] = filename[:-len(
ends_in % data_set.split('_')[0])] + \
'%s_instanceIds.png' % data_set.split('_')[0]
images.append(image)
fullname = os.path.join(root, image['seg_file_name'])
objects = cs.instances2dict_with_polygons(
[fullname], verbose=False)[fullname]
for object_cls in objects:
if object_cls not in category_instancesonly:
continue # skip non-instance categories
for obj in objects[object_cls]:
if obj['contours'] == []:
print('Warning: empty contours.')
continue # skip non-instance categories
len_p = [len(p) for p in obj['contours']]
if min(len_p) <= 4:
print('Warning: invalid contours.')
continue # skip non-instance categories
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = obj['contours']
if object_cls not in category_dict:
category_dict[object_cls] = cat_id
cat_id += 1
ann['category_id'] = category_dict[object_cls]
ann['iscrowd'] = 0
ann['area'] = obj['pixelCount']
ann['bbox'] = bboxs_util.xyxy_to_xywh(
segms_util.polys_to_boxes(
[ann['segmentation']])).tolist()[0]
annotations.append(ann)
ann_dict['images'] = images
categories = [{"id": category_dict[name], "name": name} for name in
category_dict]
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print("Num categories: %s" % len(categories))
print("Num images: %s" % len(images))
print("Num annotations: %s" % len(annotations))
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
outfile.write(json.dumps(ann_dict))
if __name__ == '__main__':
args = parse_args()
if args.dataset == "cityscapes_instance_only":
convert_cityscapes_instance_only(args.datadir, args.outdir)
elif args.dataset == "cocostuff":
convert_coco_stuff_mat(args.datadir, args.outdir)
else:
print("Dataset not supported: %s" % args.dataset)
|
the-stack_0_5498 | import json
from web3 import Web3
from config import NUM_TRANSACTIONS
from config import DEADBEEF
from config import SHARD_IDS
web3 = Web3()
alice_key = '0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'
alice_address = web3.eth.account.privateKeyToAccount(alice_key).address.lower()[2:]
abi = json.loads('[{"constant":false,"inputs":[{"name":"_shard_ID","type":"uint256"},{"name":"_sendGas","type":"uint256"},{"name":"_sendToAddress","type":"address"},{"name":"_data","type":"bytes"}],"name":"send","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"shard_ID","type":"uint256"},{"indexed":false,"name":"sendGas","type":"uint256"},{"indexed":false,"name":"sendFromAddress","type":"address"},{"indexed":true,"name":"sendToAddress","type":"address"},{"indexed":false,"name":"value","type":"uint256"},{"indexed":false,"name":"data","type":"bytes"},{"indexed":true,"name":"base","type":"uint256"},{"indexed":false,"name":"TTL","type":"uint256"}],"name":"SentMessage","type":"event"}]')
contract = web3.eth.contract(address='0x000000000000000000000000000000000000002A', abi=abi)
def format_transaction(tx, signed):
if isinstance(tx["data"], bytes):
data = tx["data"].hex()
else:
data = tx["data"]
return {
"gas": hex(tx["gas"]),
"gasPrice": tx["gasPrice"],
"hash": signed["hash"].hex(),
"input": data,
"nonce": tx["nonce"],
"r": hex(signed["r"]),
"s": hex(signed["s"]),
"v": hex(signed["v"]),
"to": tx["to"],
"value": hex(tx["value"]),
}
# Alice sends cross shard transactions
def gen_cross_shard_tx(nonce, shard_ID):
cross_shard_tx = contract.functions.send(shard_ID, 300000, DEADBEEF, bytes(0)).buildTransaction({ "gas": 3000000, "gasPrice": "0x2", "nonce": hex(nonce), "value": 1})
cross_shard_tx_signed = web3.eth.account.signTransaction(cross_shard_tx, alice_key)
cross_shard_tx_formatted = format_transaction(cross_shard_tx, cross_shard_tx_signed)
return cross_shard_tx_formatted
'''
# Bob sends simple transfers between account in the same shard
def gen_in_shard_tx(nonce):
private_key_bob = '0x5c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'
address_bob = web3.eth.account.privateKeyToAccount(private_key_bob).address.lower()[2:]
in_shard_tx = {
"gas": 3000000,
"gasPrice": "0x2",
"nonce": "0x0", # we will need to overwrite this by getting the nonce from the state
"to": "0x000000000000000000000000000000000000002F",
"value": 20,
"data": "0x",
}
in_shard_tx_signed = web3.eth.account.signTransaction(in_shard_tx, private_key_bob)
in_shard_tx_formatted = format_transaction(in_shard_tx, in_shard_tx_signed)
return in_shard_tx_formatted
def gen_payloads():
private_key_alice = '0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'
address_alice = web3.eth.account.privateKeyToAccount(private_key_alice).address.lower()[2:]
payloadA = {
"fromAddress": address_alice,
"toAddress": "0x000000000000000000000000000000000000002A",
"value": 100,
"data": cross_shard_tx["data"]
}
# MessagePayload(address_alice, "0x000000000000000000000000000000000000002A", 100, cross_shard_tx["data"])
tx = []
for x in range(0, 100):
tx.append(payloadA)
return tx
'''
def gen_alice_and_bob_tx(dest_shards = None):
tx = []
if dest_shards is None:
for x in range(0, NUM_TRANSACTIONS):
tx.append(gen_cross_shard_tx(x, SHARD_IDS[x%len(SHARD_IDS)]))
else:
for x in range(0, NUM_TRANSACTIONS):
tx.append(gen_cross_shard_tx(x, dest_shards[x % len(dest_shards)]))
return tx
|
the-stack_0_5499 | #!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
from django_mfa import totp
if __name__ == "__main__":
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django_mfa',
),
MIDDLEWARE_CLASSES=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
),
ROOT_URLCONF='django_mfa.urls',
STATIC_URL='/static/',
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'django_mfa/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
],
)
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(["django_mfa"])
sys.exit(bool(failures)) |
the-stack_0_5505 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
import pytest
import cirq
import cirq.neutral_atoms as neutral_atoms
import cirq.testing
def square_device(
width: int, height: int, holes=(), max_controls=2, use_timedelta=False
) -> neutral_atoms.NeutralAtomDevice:
us = cirq.Duration(nanos=10**3) if not use_timedelta else timedelta(microseconds=1)
ms = cirq.Duration(nanos=10**6) if not use_timedelta else timedelta(microseconds=1000)
return neutral_atoms.NeutralAtomDevice( # type: ignore
measurement_duration=50 * ms, # type: ignore
gate_duration=100 * us, # type: ignore
control_radius=1.5,
max_parallel_z=3,
max_parallel_xy=3,
max_parallel_c=max_controls,
qubits=[
cirq.GridQubit(row, col)
for col in range(width)
for row in range(height)
if cirq.GridQubit(row, col) not in holes
],
)
def test_init():
d = square_device(2, 2, holes=[cirq.GridQubit(1, 1)])
us = cirq.Duration(nanos=10**3)
ms = cirq.Duration(nanos=10**6)
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
q10 = cirq.GridQubit(1, 0)
assert d.qubits == {q10, q00, q01}
assert d.duration_of(cirq.GateOperation(cirq.IdentityGate(1), [q00])) == 100 * us
assert d.duration_of(cirq.measure(q00)) == 50 * ms
with pytest.raises(ValueError):
_ = d.duration_of(cirq.SingleQubitGate().on(q00))
def test_metadata():
d = square_device(2, 3)
assert d.metadata.qubit_set == frozenset(
{
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 0),
cirq.GridQubit(1, 1),
cirq.GridQubit(2, 0),
cirq.GridQubit(2, 1),
}
)
assert len(d.metadata.nx_graph.edges()) == 7
def test_init_timedelta():
d = square_device(2, 2, holes=[cirq.GridQubit(1, 1)], use_timedelta=True)
us = cirq.Duration(nanos=10**3)
ms = cirq.Duration(nanos=10**6)
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
q10 = cirq.GridQubit(1, 0)
assert d.qubits == {q10, q00, q01}
assert d.duration_of(cirq.GateOperation(cirq.IdentityGate(1), [q00])) == 100 * us
assert d.duration_of(cirq.measure(q00)) == 50 * ms
with pytest.raises(ValueError):
_ = d.duration_of(cirq.SingleQubitGate().on(q00))
def test_init_errors():
line = cirq.LineQubit.range(3)
us = cirq.Duration(nanos=10**3)
ms = cirq.Duration(nanos=10**6)
with pytest.raises(ValueError, match="Unsupported qubit type"):
_ = neutral_atoms.NeutralAtomDevice(
measurement_duration=50 * ms,
gate_duration=100 * us,
control_radius=1.5,
max_parallel_z=3,
max_parallel_xy=3,
max_parallel_c=3,
qubits=line,
)
with pytest.raises(ValueError, match="max_parallel_c must be less"):
_ = neutral_atoms.NeutralAtomDevice(
measurement_duration=50 * ms,
gate_duration=100 * us,
control_radius=1.5,
max_parallel_z=3,
max_parallel_xy=3,
max_parallel_c=4,
qubits=[cirq.GridQubit(0, 0)],
)
def test_decompose_error_deprecated():
d = square_device(2, 2, holes=[cirq.GridQubit(1, 1)])
with cirq.testing.assert_deprecated('ConvertToNeutralAtomGates', deadline='v0.15'):
for op in d.decompose_operation((cirq.CCZ**1.5).on(*(d.qubit_list()))):
d.validate_operation(op)
def test_validate_gate_errors():
d = square_device(1, 1)
d.validate_gate(cirq.IdentityGate(4))
with pytest.raises(ValueError, match="controlled gates must have integer exponents"):
d.validate_gate(cirq.CNotPowGate(exponent=0.5))
with pytest.raises(ValueError, match="Unsupported gate"):
d.validate_gate(cirq.SingleQubitGate())
def test_validate_operation_errors():
d = square_device(3, 3)
class bad_op(cirq.Operation):
def bad_op(self):
pass
def qubits(self):
pass
def with_qubits(self, new_qubits):
pass
with pytest.raises(ValueError, match="Unsupported operation"):
d.validate_operation(bad_op())
not_on_device_op = cirq.parallel_gate_op(
cirq.X, *[cirq.GridQubit(row, col) for col in range(4) for row in range(4)]
)
with pytest.raises(ValueError, match="Qubit not on device"):
d.validate_operation(not_on_device_op)
with pytest.raises(ValueError, match="Too many qubits acted on in parallel by"):
d.validate_operation(cirq.CCX.on(*d.qubit_list()[0:3]))
with pytest.raises(ValueError, match="are too far away"):
d.validate_operation(cirq.CZ.on(cirq.GridQubit(0, 0), cirq.GridQubit(2, 2)))
with pytest.raises(ValueError, match="Unsupported operation"):
d.validate_operation(cirq.parallel_gate_op(cirq.Z, *d.qubits))
with pytest.raises(ValueError, match="Unsupported operation"):
d.validate_operation(cirq.parallel_gate_op(cirq.X, *d.qubit_list()[1:]))
with pytest.raises(ValueError, match="Unsupported operation"):
d.validate_operation(
cirq.ParallelGate(cirq.MeasurementGate(1, key='a'), 4)(*d.qubit_list()[:4])
)
def test_validate_moment_errors():
d = square_device(3, 3)
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
q10 = cirq.GridQubit(1, 0)
q11 = cirq.GridQubit(1, 1)
q12 = cirq.GridQubit(1, 2)
q02 = cirq.GridQubit(0, 2)
q04 = cirq.GridQubit(0, 4)
q03 = cirq.GridQubit(0, 3)
q20 = cirq.GridQubit(2, 0)
q21 = cirq.GridQubit(2, 1)
m = cirq.Moment([cirq.Z.on(q00), (cirq.Z**2).on(q01)])
with pytest.raises(ValueError, match="Non-identical simultaneous "):
d.validate_moment(m)
m = cirq.Moment([cirq.X.on(q00), cirq.Y.on(q01)])
with pytest.raises(ValueError, match="Non-identical simultaneous "):
d.validate_moment(m)
m = cirq.Moment([cirq.CNOT.on(q00, q01), cirq.CZ.on(q12, q02)])
with pytest.raises(ValueError, match="Non-identical simultaneous "):
d.validate_moment(m)
m = cirq.Moment([cirq.CNOT.on(q00, q01), cirq.CNOT.on(q12, q02)])
with pytest.raises(ValueError, match="Too many qubits acted on by controlled gates"):
d.validate_moment(m)
m = cirq.Moment([cirq.CNOT.on(q00, q01), cirq.Z.on(q02)])
with pytest.raises(
ValueError,
match="Can't perform non-controlled operations at same time as controlled operations",
):
d.validate_moment(m)
m = cirq.Moment(cirq.Z.on_each(*d.qubits))
with pytest.raises(ValueError, match="Too many simultaneous Z gates"):
d.validate_moment(m)
m = cirq.Moment(cirq.X.on_each(*(d.qubit_list()[1:])))
with pytest.raises(ValueError, match="Bad number of simultaneous XY gates"):
d.validate_moment(m)
m = cirq.Moment([cirq.MeasurementGate(1, 'a').on(q00), cirq.Z.on(q01)])
with pytest.raises(
ValueError, match="Measurements can't be simultaneous with other operations"
):
d.validate_moment(m)
d.validate_moment(cirq.Moment([cirq.X.on(q00), cirq.Z.on(q01)]))
us = cirq.Duration(nanos=10**3)
ms = cirq.Duration(nanos=10**6)
d2 = neutral_atoms.NeutralAtomDevice(
measurement_duration=50 * ms,
gate_duration=100 * us,
control_radius=1.5,
max_parallel_z=4,
max_parallel_xy=4,
max_parallel_c=4,
qubits=[cirq.GridQubit(row, col) for col in range(2) for row in range(2)],
)
m = cirq.Moment([cirq.CNOT.on(q00, q01), cirq.CNOT.on(q10, q11)])
with pytest.raises(ValueError, match="Interacting controlled operations"):
d2.validate_moment(m)
d2 = neutral_atoms.NeutralAtomDevice(
measurement_duration=50 * ms,
gate_duration=100 * us,
control_radius=1.1,
max_parallel_z=6,
max_parallel_xy=6,
max_parallel_c=6,
qubits=[cirq.GridQubit(row, col) for col in range(5) for row in range(5)],
)
m = cirq.Moment([cirq.CZ.on(q00, q01), cirq.CZ.on(q03, q04), cirq.CZ.on(q20, q21)])
d2.validate_moment(m)
m = cirq.Moment([cirq.CZ.on(q00, q01), cirq.CZ.on(q02, q03), cirq.CZ.on(q10, q11)])
with pytest.raises(ValueError, match="Interacting controlled operations"):
d2.validate_moment(m)
def test_can_add_operation_into_moment_coverage_deprecated():
with cirq.testing.assert_deprecated('can_add_operation_into_moment', deadline='v0.15', count=3):
d = square_device(2, 2)
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
q10 = cirq.GridQubit(1, 0)
m = cirq.Moment([cirq.X.on(q00)])
assert not d.can_add_operation_into_moment(cirq.X.on(q00), m)
assert not d.can_add_operation_into_moment(cirq.CZ.on(q01, q10), m)
assert d.can_add_operation_into_moment(cirq.Z.on(q01), m)
def test_validate_circuit_errors():
d = square_device(2, 2, max_controls=3)
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
q10 = cirq.GridQubit(1, 0)
q11 = cirq.GridQubit(1, 1)
c = cirq.Circuit()
c.append(cirq.parallel_gate_op(cirq.X, *d.qubits))
c.append(cirq.CCZ.on(q00, q01, q10))
c.append(cirq.parallel_gate_op(cirq.Z, q00, q01, q10))
m = cirq.Moment(cirq.X.on_each(q00, q01) + cirq.Z.on_each(q10, q11))
c.append(m)
c.append(cirq.measure_each(*d.qubits))
d.validate_circuit(c)
c.append(cirq.Moment([cirq.X.on(q00)]))
with pytest.raises(ValueError, match="Non-empty moment after measurement"):
d.validate_circuit(c)
def test_repr():
d = square_device(1, 1)
cirq.testing.assert_equivalent_repr(d)
def test_str():
assert (
str(square_device(2, 2)).strip()
== """
(0, 0)───(0, 1)
│ │
│ │
(1, 0)───(1, 1)
""".strip()
)
def test_repr_pretty():
cirq.testing.assert_repr_pretty(
square_device(2, 2),
"""
(0, 0)───(0, 1)
│ │
│ │
(1, 0)───(1, 1)
""".strip(),
)
cirq.testing.assert_repr_pretty(square_device(2, 2), "cirq.NeutralAtomDevice(...)", cycle=True)
def test_qubit_set_deprecated():
with cirq.testing.assert_deprecated('qubit_set', deadline='v0.15'):
assert square_device(2, 2).qubit_set() == frozenset(cirq.GridQubit.square(2, 0, 0))
|
the-stack_0_5506 | # Copyright 2019 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import time
from anna.client import AnnaTcpClient
from anna.zmq_util import SocketCache
import zmq
from cloudburst.server import utils as sutils
from cloudburst.server.executor import utils
from cloudburst.server.executor.call import exec_function, exec_dag_function
from cloudburst.server.executor.pin import pin, unpin
from cloudburst.server.executor.user_library import CloudburstUserLibrary
from cloudburst.shared.anna_ipc_client import AnnaIpcClient
from cloudburst.shared.proto.cloudburst_pb2 import (
DagSchedule,
DagTrigger,
MULTIEXEC # Cloudburst's execution types
)
from cloudburst.shared.proto.internal_pb2 import (
CPU, GPU, # Cloudburst's executor types
ExecutorStatistics,
ThreadStatus,
)
REPORT_THRESH = 5
BATCH_SIZE_MAX = 20
def executor(ip, mgmt_ip, schedulers, thread_id):
# logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s %(message)s')
logging.basicConfig(filename='log_executor.txt', level=logging.INFO, filemode="w",
format='%(asctime)s %(message)s')
# Check what resources we have access to, set as an environment variable.
if os.getenv('EXECUTOR_TYPE', 'CPU') == 'GPU':
exec_type = GPU
else:
exec_type = CPU
context = zmq.Context(1)
poller = zmq.Poller()
pin_socket = context.socket(zmq.PULL)
pin_socket.bind(sutils.BIND_ADDR_TEMPLATE % (sutils.PIN_PORT + thread_id))
unpin_socket = context.socket(zmq.PULL)
unpin_socket.bind(sutils.BIND_ADDR_TEMPLATE % (sutils.UNPIN_PORT +
thread_id))
exec_socket = context.socket(zmq.PULL)
exec_socket.bind(sutils.BIND_ADDR_TEMPLATE % (sutils.FUNC_EXEC_PORT +
thread_id))
dag_queue_socket = context.socket(zmq.PULL)
dag_queue_socket.bind(sutils.BIND_ADDR_TEMPLATE % (sutils.DAG_QUEUE_PORT
+ thread_id))
dag_exec_socket = context.socket(zmq.PULL)
dag_exec_socket.bind(sutils.BIND_ADDR_TEMPLATE % (sutils.DAG_EXEC_PORT
+ thread_id))
self_depart_socket = context.socket(zmq.PULL)
self_depart_socket.bind(sutils.BIND_ADDR_TEMPLATE %
(sutils.SELF_DEPART_PORT + thread_id))
pusher_cache = SocketCache(context, zmq.PUSH)
poller = zmq.Poller()
poller.register(pin_socket, zmq.POLLIN)
poller.register(unpin_socket, zmq.POLLIN)
poller.register(exec_socket, zmq.POLLIN)
poller.register(dag_queue_socket, zmq.POLLIN)
poller.register(dag_exec_socket, zmq.POLLIN)
poller.register(self_depart_socket, zmq.POLLIN)
# If the management IP is set to None, that means that we are running in
# local mode, so we use a regular AnnaTcpClient rather than an IPC client.
has_ephe = False
if mgmt_ip:
if 'STORAGE_OR_DEFAULT' in os.environ and os.environ['STORAGE_OR_DEFAULT'] == '0':
client = AnnaTcpClient(os.environ['ROUTE_ADDR'], ip, local=False, offset=thread_id)
has_ephe = True
else:
client = AnnaIpcClient(thread_id, context)
# force_remote_anna = 1
# if 'FORCE_REMOTE' in os.environ:
# force_remote_anna = int(os.environ['FORCE_REMOTE'])
# if force_remote_anna == 0: # remote anna only
# client = AnnaTcpClient(os.environ['ROUTE_ADDR'], ip, local=False, offset=thread_id)
# elif force_remote_anna == 1: # anna cache
# client = AnnaIpcClient(thread_id, context)
# elif force_remote_anna == 2: # control both cache and remote anna
# remote_client = AnnaTcpClient(os.environ['ROUTE_ADDR'], ip, local=False, offset=thread_id)
# cache_client = AnnaIpcClient(thread_id, context)
# client = cache_client
# user_library = CloudburstUserLibrary(context, pusher_cache, ip, thread_id, (cache_client, remote_client))
local = False
else:
client = AnnaTcpClient('127.0.0.1', '127.0.0.1', local=True, offset=1)
local = True
user_library = CloudburstUserLibrary(context, pusher_cache, ip, thread_id, client, has_ephe=has_ephe)
status = ThreadStatus()
status.ip = ip
status.tid = thread_id
status.running = True
status.type = exec_type
utils.push_status(schedulers, pusher_cache, status)
departing = False
# Maintains a request queue for each function pinned on this executor. Each
# function will have a set of request IDs mapped to it, and this map stores
# a schedule for each request ID.
queue = {}
# Tracks the actual function objects that are pinned to this executor.
function_cache = {}
# Tracks runtime cost of excuting a DAG function.
runtimes = {}
# If multiple triggers are necessary for a function, track the triggers as
# we receive them. This is also used if a trigger arrives before its
# corresponding schedule.
received_triggers = {}
# Tracks when we received a function request, so we can report end-to-end
# latency for the whole executio.
receive_times = {}
# Tracks the number of requests we are finishing for each function pinned
# here.
exec_counts = {}
# Tracks the end-to-end runtime of each DAG request for which we are the
# sink function.
dag_runtimes = {}
# A map with KVS keys and their corresponding deserialized payloads.
cache = {}
# A map which tracks the most recent DAGs for which we have finished our
# work.
finished_executions = {}
# The set of pinned functions and whether they support batching. NOTE: This
# is only a set for local mode -- in cluster mode, there will only be one
# pinned function per executor.
batching = False
# Internal metadata to track thread utilization.
report_start = time.time()
event_occupancy = {'pin': 0.0,
'unpin': 0.0,
'func_exec': 0.0,
'dag_queue': 0.0,
'dag_exec': 0.0}
total_occupancy = 0.0
while True:
socks = dict(poller.poll(timeout=1000))
if pin_socket in socks and socks[pin_socket] == zmq.POLLIN:
work_start = time.time()
batching = pin(pin_socket, pusher_cache, client, status,
function_cache, runtimes, exec_counts, user_library,
local, batching)
utils.push_status(schedulers, pusher_cache, status)
elapsed = time.time() - work_start
event_occupancy['pin'] += elapsed
total_occupancy += elapsed
if unpin_socket in socks and socks[unpin_socket] == zmq.POLLIN:
work_start = time.time()
unpin(unpin_socket, status, function_cache, runtimes,
exec_counts)
utils.push_status(schedulers, pusher_cache, status)
elapsed = time.time() - work_start
event_occupancy['unpin'] += elapsed
total_occupancy += elapsed
if exec_socket in socks and socks[exec_socket] == zmq.POLLIN:
work_start = time.time()
# logging.info(f'Executor timer. exec_socket recv: {work_start}')
exec_function(exec_socket, client, user_library, cache,
function_cache, has_ephe=has_ephe)
user_library.close()
utils.push_status(schedulers, pusher_cache, status)
elapsed = time.time() - work_start
event_occupancy['func_exec'] += elapsed
total_occupancy += elapsed
if dag_queue_socket in socks and socks[dag_queue_socket] == zmq.POLLIN:
work_start = time.time()
logging.info(f'Executor timer. dag_queue_socket recv: {work_start}')
# In order to effectively support batching, we have to make sure we
# dequeue lots of schedules in addition to lots of triggers. Right
# now, we're not going to worry about supporting batching here,
# just on the trigger dequeue side, but we still have to dequeue
# all schedules we've received. We just process them one at a time.
while True:
schedule = DagSchedule()
try:
msg = dag_queue_socket.recv(zmq.DONTWAIT)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
break # There are no more messages.
else:
raise e # Unexpected error.
schedule.ParseFromString(msg)
fname = schedule.target_function
logging.info('Received a schedule for DAG %s (%s), function %s.' %
(schedule.dag.name, schedule.id, fname))
if fname not in queue:
queue[fname] = {}
queue[fname][schedule.id] = schedule
if (schedule.id, fname) not in receive_times:
receive_times[(schedule.id, fname)] = time.time()
# In case we receive the trigger before we receive the schedule, we
# can trigger from this operation as well.
trkey = (schedule.id, fname)
fref = None
# Check to see what type of execution this function is.
for ref in schedule.dag.functions:
if ref.name == fname:
fref = ref
if (trkey in received_triggers and
((len(received_triggers[trkey]) == len(schedule.triggers))
or (fref.type == MULTIEXEC))):
triggers = list(received_triggers[trkey].values())
if fname not in function_cache:
logging.error('%s not in function cache', fname)
utils.generate_error_response(schedule, client, fname)
continue
exec_start = time.time()
# logging.info(f'Executor timer. dag_queue_socket exec_dag: {exec_start}')
# We don't support actual batching for when we receive a
# schedule before a trigger, so everything is just a batch of
# size 1 if anything.
success = exec_dag_function(pusher_cache, client,
[triggers], function_cache[fname],
[schedule], user_library,
dag_runtimes, cache, schedulers,
batching)[0]
user_library.close()
del received_triggers[trkey]
if success:
del queue[fname][schedule.id]
fend = time.time()
fstart = receive_times[(schedule.id, fname)]
runtimes[fname].append(fend - work_start)
exec_counts[fname] += 1
finished_executions[(schedule.id, fname)] = time.time()
elapsed = time.time() - work_start
event_occupancy['dag_queue'] += elapsed
total_occupancy += elapsed
if dag_exec_socket in socks and socks[dag_exec_socket] == zmq.POLLIN:
work_start = time.time()
# logging.info(f'Executor timer. dag_exec_socket recv: {work_start}')
# How many messages to dequeue -- BATCH_SIZE_MAX or 1 depending on
# the function configuration.
if batching:
count = BATCH_SIZE_MAX
else:
count = 1
trigger_keys = set()
for _ in range(count): # Dequeue count number of messages.
trigger = DagTrigger()
try:
msg = dag_exec_socket.recv(zmq.DONTWAIT)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN: # There are no more messages.
break
else:
raise e # Unexpected error.
trigger.ParseFromString(msg)
# We have received a repeated trigger for a function that has
# already finished executing.
if trigger.id in finished_executions:
continue
fname = trigger.target_function
logging.info('Received a trigger for schedule %s, function %s.' %
(trigger.id, fname))
key = (trigger.id, fname)
trigger_keys.add(key)
if key not in received_triggers:
received_triggers[key] = {}
if (trigger.id, fname) not in receive_times:
receive_times[(trigger.id, fname)] = time.time()
received_triggers[key][trigger.source] = trigger
# Only execute the functions for which we have received a schedule.
# Everything else will wait.
for tid, fname in list(trigger_keys):
if fname not in queue or tid not in queue[fname]:
trigger_keys.remove((tid, fname))
if len(trigger_keys) == 0:
continue
fref = None
schedule = queue[fname][list(trigger_keys)[0][0]] # Pick a random schedule to check.
# Check to see what type of execution this function is.
for ref in schedule.dag.functions:
if ref.name == fname:
fref = ref
break
# Compile a list of all the trigger sets for which we have
# enough triggers.
trigger_sets = []
schedules = []
for key in trigger_keys:
if (len(received_triggers[key]) == len(schedule.triggers)) or \
fref.type == MULTIEXEC:
if fref.type == MULTIEXEC:
triggers = [trigger]
else:
triggers = list(received_triggers[key].values())
if fname not in function_cache:
logging.error('%s not in function cache', fname)
utils.generate_error_response(schedule, client, fname)
continue
trigger_sets.append(triggers)
schedule = queue[fname][key[0]]
schedules.append(schedule)
exec_start = time.time()
# logging.info(f'Executor timer. dag_exec_socket exec_dag: {exec_start}')
# Pass all of the trigger_sets into exec_dag_function at once.
# We also include the batching variaible to make sure we know
# whether to pass lists into the fn or not.
if len(trigger_sets) > 0:
successes = exec_dag_function(pusher_cache, client,
trigger_sets,
function_cache[fname],
schedules, user_library,
dag_runtimes, cache,
schedulers, batching)
user_library.close()
del received_triggers[key]
for key, success in zip(trigger_keys, successes):
if success:
del queue[fname][key[0]] # key[0] is trigger.id.
fend = time.time()
fstart = receive_times[key]
average_time = (fend - work_start) / len(trigger_keys)
runtimes[fname].append(average_time)
exec_counts[fname] += 1
finished_executions[(schedule.id, fname)] = time.time()
elapsed = time.time() - work_start
event_occupancy['dag_exec'] += elapsed
total_occupancy += elapsed
if self_depart_socket in socks and socks[self_depart_socket] == \
zmq.POLLIN:
# This message does not matter.
self_depart_socket.recv()
logging.info('Preparing to depart. No longer accepting requests ' +
'and clearing all queues.')
status.ClearField('functions')
status.running = False
utils.push_status(schedulers, pusher_cache, status)
departing = True
# periodically report function occupancy
report_end = time.time()
if report_end - report_start > REPORT_THRESH:
if len(cache) > 100:
extra_keys = list(cache.keys())[:len(cache) - 100]
for key in extra_keys:
del cache[key]
utilization = total_occupancy / (report_end - report_start)
status.utilization = utilization
# Periodically report my status to schedulers with the utilization
# set.
utils.push_status(schedulers, pusher_cache, status)
logging.debug('Total thread occupancy: %.6f' % (utilization))
for event in event_occupancy:
occ = event_occupancy[event] / (report_end - report_start)
logging.debug('\tEvent %s occupancy: %.6f' % (event, occ))
event_occupancy[event] = 0.0
stats = ExecutorStatistics()
for fname in runtimes:
if exec_counts[fname] > 0:
fstats = stats.functions.add()
fstats.name = fname
fstats.call_count = exec_counts[fname]
fstats.runtime.extend(runtimes[fname])
runtimes[fname].clear()
exec_counts[fname] = 0
for dname in dag_runtimes:
dstats = stats.dags.add()
dstats.name = dname
dstats.runtimes.extend(dag_runtimes[dname])
dag_runtimes[dname].clear()
# If we are running in cluster mode, mgmt_ip will be set, and we
# will report our status and statistics to it. Otherwise, we will
# write to the local conf file
if mgmt_ip:
sckt = pusher_cache.get(sutils.get_statistics_report_address
(mgmt_ip))
sckt.send(stats.SerializeToString())
sckt = pusher_cache.get(utils.get_util_report_address(mgmt_ip))
sckt.send(status.SerializeToString())
else:
logging.info(stats)
status.ClearField('utilization')
report_start = time.time()
total_occupancy = 0.0
# Periodically clear any old functions we have cached that we are
# no longer accepting requests for.
del_list = []
for fname in queue:
if len(queue[fname]) == 0 and fname not in status.functions:
del_list.append(fname)
del function_cache[fname]
del runtimes[fname]
del exec_counts[fname]
for fname in del_list:
del queue[fname]
del_list = []
for tid in finished_executions:
if (time.time() - finished_executions[tid]) > 10:
del_list.append(tid)
for tid in del_list:
del finished_executions[tid]
# If we are departing and have cleared our queues, let the
# management server know, and exit the process.
if departing and len(queue) == 0:
sckt = pusher_cache.get(utils.get_depart_done_addr(mgmt_ip))
sckt.send_string(ip)
# We specifically pass 1 as the exit code when ending our
# process so that the wrapper script does not restart us.
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) > 1:
conf_file = sys.argv[1]
else:
conf_file = 'conf/cloudburst-config.yml'
conf = sutils.load_conf(conf_file)
exec_conf = conf['executor']
executor(conf['ip'], conf['mgmt_ip'], exec_conf['scheduler_ips'],
int(exec_conf['thread_id']))
|
the-stack_0_5507 | #!/usr/bin/python2.7
"""Makes dictionary file from mozc files.
How to use this tool:
$ git clone https://github.com/google/mozc.git
$ tools/make_dictionary_file.py mozc/src/data/dictionary_oss/dictionary*.txt > app/japanese_name_location_dict.txt
"""
import sys
def make_dictionary(input_file_names, output_file_name, numbers):
"""Makes dictionary and writes it to output_file_name.
Args:
input_file_names: a list of file names
Output format:
kanji '\t' yomigana(hiragana) '\n'
kanji '\t' yomigana(hiragana) '\n' ...
"""
yomigana_list = []
for input_file_name in input_file_names:
with open(input_file_name, 'r') as input_file:
for line in input_file:
line = line.rstrip()
split_line = line.split('\t')
id1 = int(split_line[1])
id2 = int(split_line[2])
# e.g. (id1 == id2 == 1845) means "given name"
if id1 in numbers and id1 == id2:
yomigana = split_line[0]
kanji = split_line[4]
yomigana_list.append(kanji + '\t' + yomigana + '\n')
with open(output_file_name, 'w') as output_file:
output_file.writelines(yomigana_list)
def make_jp_name_location_dictionary(input_file_names):
"""Makes japanese name and location dictionary."""
# 1845: id for given names in mozc dictionary
# 1846: id for family names in mozc dictionary
# 1847 ~ 1850: ids for location names in mozc dictionary
numbers = [1845, 1846, 1847, 1848, 1849, 1850]
make_dictionary(input_file_names, 'app/japanese_name_location_dict.txt', numbers)
def main():
dictionaries = sys.argv[1:]
make_jp_name_location_dictionary(dictionaries)
if __name__ == '__main__':
main()
|
the-stack_0_5509 | from os import path
import torch
from torch import tensor
import numpy as np
import string
import linecache
class data:
# Assume the data is of this form: SpeakerId Text|AddresseeId Text
def __init__(self, params, voc):
self.params = params
self.voc = voc
# EOS: End of source, start of target
self.EOS = 1
# EOT: End of target
self.EOT = 2
self.padding = 0 # Not used, just a reminder
self.UNK = params.UNK+params.special_word #sel.UNK = 3
def encode(self, tokens, batch_size = 2, mode = "train"):
ids = []
for token in tokens:
if mode == "decode" and batch_size == 1:
### For raw-word data:
try:
ids.append(self.voc[token]+self.params.special_word)
except KeyError:
ids.append(self.UNK)
###--------------------
else:
### For testing data (numbering starts from 1, not 0):
ids.append(int(token)-1+self.params.special_word)
### For data that is already tokenized and transferred to ids:
# ids.append(int(token)+self.params.special_word)
return ids
def read_batch(self, file, num, mode='train_or_test'):
origin = []
sources = np.zeros((self.params.batch_size, self.params.source_max_length+1)) #batch_size*50
targets = np.zeros((self.params.batch_size, self.params.source_max_length+1)) #batch_size*50
speaker_label = -np.ones(self.params.batch_size) #all speaker IDs are set to -1
addressee_label = -np.ones(self.params.batch_size)
l_s_set = set()
l_t_set = set()
END=0
a=0
for i in range(self.params.batch_size):
if mode == "decode" and self.params.batch_size == 1:
line = file.strip().split("|")
else:
line = linecache.getline(file,num*self.params.batch_size+i+1).strip().split("|")
i-=a #to adjust for skipped lines
if line == ['']:
END = 1
break
s = line[-2].split()[:self.params.source_max_length]
t = line[-1].split()[:self.params.target_max_length]
#skipping lines when Speaker or Addressee speech is empty
if s[1:]==[]: #if only one word in Source (i.e Speaker ID)
a+=1
continue
elif t[1:]==[] and mode!='decode': #if only one word in Target (i.e Addressee ID) AND mode!='decode'
a+=1
continue
if self.params.SpeakerMode or self.params.AddresseeMode:
source=self.encode(s[1:], self.params.batch_size, mode) #encoding speech of the speaker
target=[self.EOS]+self.encode(t[1:], self.params.batch_size, mode)+[self.EOT] #encoding speech of the addressee
else:
source=self.encode(s[0:], self.params.batch_size, mode) #encoding speech of the speaker
target=[self.EOS]+self.encode(t[0:], self.params.batch_size, mode)+[self.EOT] #encoding speech of the addressee
l_s=len(source) #length of Source
l_t=len(target) #length of Target
l_s_set.add(l_s)
l_t_set.add(l_t)
### If the data contains words, not numbers:
# origin.append(' '.join(s[1:]))
origin.append(source)
sources[i, :l_s]=source #last few elements will be 0
targets[i, :l_t]=target #last few elements will be 0
if mode!='decode':
try:
speaker_label[i]=int(s[0])-1 #speaker id (zero-indexed)
addressee_label[i]=int(t[0])-1 #addressee id (zero-indexed)
except:
print('Persona id cannot be transferred to numbers')
i+=1
try:
max_l_s=max(l_s_set) #length of longest Source sentence in the batch
max_l_t=max(l_t_set) #length of longest Target sentence in the batch
except ValueError:
return END,None,None,None,None,None,None,None
if max_l_s == 0:
return END,None,None,None,None,None,None,None
elif max_l_t == 2 and mode != 'decode':
return END,None,None,None,None,None,None,None
sources=sources[:i, : max_l_s] #cutting everything beyong max_l_s
targets=targets[:i, : max_l_t] #cutting everything beyong max_l_t
speaker_label=speaker_label[:i]
addressee_label=addressee_label[:i]
length_s=(sources!=0).sum(1) #batch_size, each element is sum of number of words in each sample (includes speaker IDs)
mask_t=np.ones(targets.shape)*(targets!=0) # batch_size*max_l_t; 1 in place where the words exist in target, elsewhere 0
token_num=mask_t[:,1:].sum() #total number of words in Target for each batch (not including Addressee IDs)
return END,tensor(sources).long(),tensor(targets).long(),tensor(speaker_label).long(),tensor(addressee_label).long(),tensor(length_s).long(),token_num,origin
|
the-stack_0_5510 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Copyright (c) 2020-2021 The Hive Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there is a double-spend conflict."""
from test_framework.test_framework import HiveTestFramework
from test_framework.util import disconnect_nodes, assert_equal, Decimal, sync_blocks, find_output, connect_nodes
class TxnMallTest(HiveTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.extra_args = [["-maxreorg=10000"], ["-maxreorg=10000"], ["-maxreorg=10000"], ["-maxreorg=10000"]]
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super().setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
# All nodes should start with 125,000 HVN:
starting_balance = 125000
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 121900)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 2900)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 121900 - 2900 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1240 HVN to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {"txid": fund_foo_txid, "vout": find_output(self.nodes[0], fund_foo_txid, 121900)}
rawtx_input_1 = {"txid": fund_bar_txid, "vout": find_output(self.nodes[0], fund_bar_txid, 2900)}
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {node1_address: 124000, change_address: 124800 - 124000 + doublespend_fee}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 HVN coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 4000, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 2000, 0)
# Have node0 mine a block:
if self.options.mine_block:
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50HVN for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 5000
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 121900+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 2900+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100HVN for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 10000 - 124000 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance("foo"), 121900)
assert_equal(self.nodes[0].getbalance("bar"), 2900)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-121900
- 2900
-124000
+ 10000
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
# Node1's "from0" account balance should be just the doublespend:
assert_equal(self.nodes[1].getbalance("from0"), 124000)
if __name__ == '__main__':
TxnMallTest().main()
|
the-stack_0_5511 | # Copyright (c) 2022, Rahib Hassan and Contributors
# See license.txt
import frappe
import unittest
def create_item(item_code):
if frappe.db.exists('Item', item_code):
return frappe.get_doc('Item', item_code)
item = frappe.get_doc({
'doctype': 'Item',
'item_code': item_code,
'item_name': item_code,
'maintain_stock': 1,
'default_warehouse': 'All Warehouse',
'opening_stock': 100,
'valuation_rate': 200
}).insert()
return item
class TestItem(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
frappe.db.rollback()
def test_stock_entry_creation(self):
item = create_item("Iron")
stock_entries = frappe.db.get_list('Stock Entry', {'stock_entry_type': 'Material Receipt'})
for d in stock_entries:
child_entry = frappe.db.get_list('Stock Entry Item', {'parent': d.name}, ['item_code'])
if child_entry[0].item_code == 'Iron':
return
frappe.throw("Stock Entry not created") |
the-stack_0_5512 | import torch
import torchvision.models
from torchvision.ops import MultiScaleRoIAlign
from torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead
import unittest
class Tester(unittest.TestCase):
def _make_empty_sample(self, add_masks=False, add_keypoints=False):
images = [torch.rand((3, 100, 100), dtype=torch.float32)]
boxes = torch.zeros((0, 4), dtype=torch.float32)
negative_target = {"boxes": boxes,
"labels": torch.zeros(0, dtype=torch.int64),
"image_id": 4,
"area": (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]),
"iscrowd": torch.zeros((0,), dtype=torch.int64)}
if add_masks:
negative_target["masks"] = torch.zeros(0, 100, 100, dtype=torch.uint8)
if add_keypoints:
negative_target["keypoints"] = torch.zeros(0, 17, 3, dtype=torch.float32)
targets = [negative_target]
return images, targets
def test_targets_to_anchors(self):
_, targets = self._make_empty_sample()
anchors = [torch.randint(-50, 50, (3, 4), dtype=torch.float32)]
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(
anchor_sizes, aspect_ratios
)
rpn_head = RPNHead(4, rpn_anchor_generator.num_anchors_per_location()[0])
head = RegionProposalNetwork(
rpn_anchor_generator, rpn_head,
0.5, 0.3,
256, 0.5,
2000, 2000, 0.7)
labels, matched_gt_boxes = head.assign_targets_to_anchors(anchors, targets)
self.assertEqual(labels[0].sum(), 0)
self.assertEqual(labels[0].shape, torch.Size([anchors[0].shape[0]]))
self.assertEqual(labels[0].dtype, torch.float32)
self.assertEqual(matched_gt_boxes[0].sum(), 0)
self.assertEqual(matched_gt_boxes[0].shape, anchors[0].shape)
self.assertEqual(matched_gt_boxes[0].dtype, torch.float32)
def test_assign_targets_to_proposals(self):
proposals = [torch.randint(-50, 50, (20, 4), dtype=torch.float32)]
gt_boxes = [torch.zeros((0, 4), dtype=torch.float32)]
gt_labels = [torch.tensor([[0]], dtype=torch.int64)]
box_roi_pool = MultiScaleRoIAlign(
featmap_names=['0', '1', '2', '3'],
output_size=7,
sampling_ratio=2)
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(
4 * resolution ** 2,
representation_size)
representation_size = 1024
box_predictor = FastRCNNPredictor(
representation_size,
2)
roi_heads = RoIHeads(
# Box
box_roi_pool, box_head, box_predictor,
0.5, 0.5,
512, 0.25,
None,
0.05, 0.5, 100)
matched_idxs, labels = roi_heads.assign_targets_to_proposals(proposals, gt_boxes, gt_labels)
self.assertEqual(matched_idxs[0].sum(), 0)
self.assertEqual(matched_idxs[0].shape, torch.Size([proposals[0].shape[0]]))
self.assertEqual(matched_idxs[0].dtype, torch.int64)
self.assertEqual(labels[0].sum(), 0)
self.assertEqual(labels[0].shape, torch.Size([proposals[0].shape[0]]))
self.assertEqual(labels[0].dtype, torch.int64)
def test_forward_negative_sample_frcnn(self):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
num_classes=2, min_size=100, max_size=100)
images, targets = self._make_empty_sample()
loss_dict = model(images, targets)
self.assertEqual(loss_dict["loss_box_reg"], torch.tensor(0.))
self.assertEqual(loss_dict["loss_rpn_box_reg"], torch.tensor(0.))
def test_forward_negative_sample_mrcnn(self):
model = torchvision.models.detection.maskrcnn_resnet50_fpn(
num_classes=2, min_size=100, max_size=100)
images, targets = self._make_empty_sample(add_masks=True)
loss_dict = model(images, targets)
self.assertEqual(loss_dict["loss_box_reg"], torch.tensor(0.))
self.assertEqual(loss_dict["loss_rpn_box_reg"], torch.tensor(0.))
self.assertEqual(loss_dict["loss_mask"], torch.tensor(0.))
def test_forward_negative_sample_krcnn(self):
model = torchvision.models.detection.keypointrcnn_resnet50_fpn(
num_classes=2, min_size=100, max_size=100)
images, targets = self._make_empty_sample(add_keypoints=True)
loss_dict = model(images, targets)
self.assertEqual(loss_dict["loss_box_reg"], torch.tensor(0.))
self.assertEqual(loss_dict["loss_rpn_box_reg"], torch.tensor(0.))
self.assertEqual(loss_dict["loss_keypoint"], torch.tensor(0.))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_5514 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mock import Mock
from mock import patch
from airflow import configuration
from airflow.contrib.hooks.jira_hook import JiraHook
from airflow import models
from airflow.utils import db
jira_client_mock = Mock(
name="jira_client"
)
class TestJiraHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='jira_default', conn_type='jira',
host='https://localhost/jira/', port=443,
extra='{"verify": "False", "project": "AIRFLOW"}'))
@patch("airflow.contrib.hooks.jira_hook.JIRA", autospec=True,
return_value=jira_client_mock)
def test_jira_client_connection(self, jira_mock):
jira_hook = JiraHook()
assert jira_mock.called
self.assertIsInstance(jira_hook.client, Mock)
self.assertEqual(jira_hook.client.name, jira_mock.return_value.name)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_5516 | import unittest
from test import support
import gc
import weakref
import operator
import copy
import pickle
from random import randrange, shuffle
import warnings
import collections
import collections.abc
import itertools
class PassThru(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1
class BadCmp:
def __hash__(self):
return 1
def __eq__(self, other):
raise RuntimeError
class ReprWrapper:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
class HashCountingInt(int):
'int-like object that counts the number of times __hash__ is called'
def __init__(self, *args):
self.hash_count = 0
def __hash__(self):
self.hash_count += 1
return int.__hash__(self)
class TestJointOps:
# Tests common to both set and frozenset
def setUp(self):
self.word = word = 'simsalabim'
self.otherword = 'madagascar'
self.letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
self.assertRaises(TypeError, set().__init__, a=1)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
self.assertRaises(TypeError, self.thetype, [[]])
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
self.assertRaises(TypeError, self.s.__contains__, [[]])
s = self.thetype([frozenset(self.letters)])
self.assertIn(self.thetype(self.letters), s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(u), self.basetype)
self.assertRaises(PassThru, self.s.union, check_pass_thru())
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
self.assertEqual(self.thetype('abcba').union(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
self.assertEqual(self.thetype('abcba').union(C('ef')), set('abcef'))
self.assertEqual(self.thetype('abcba').union(C('ef'), C('fg')), set('abcefg'))
# Issue #6573
x = self.thetype()
self.assertEqual(x.union(set([1]), x, set([2])), self.thetype([1, 2]))
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
self.s | self.otherword
except TypeError:
pass
else:
self.fail("s|t did not screen-out general iterables")
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').intersection(C('cdc')), set('cc'))
self.assertEqual(self.thetype('abcba').intersection(C('efgfe')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('ccb')), set('bc'))
self.assertEqual(self.thetype('abcba').intersection(C('ef')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('cbcf'), C('bag')), set('b'))
s = self.thetype('abcba')
z = s.intersection()
if self.thetype == frozenset():
self.assertEqual(id(s), id(z))
else:
self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
def f(s1, s2):
'Pure python equivalent of isdisjoint()'
return not set(s1).intersection(s2)
for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
s1 = self.thetype(larg)
for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assertTrue(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
self.s & self.otherword
except TypeError:
pass
else:
self.fail("s&t did not screen-out general iterables")
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
self.assertRaises(TypeError, self.s.difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').difference(C('cdc')), set('ab'))
self.assertEqual(self.thetype('abcba').difference(C('efgfe')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').difference(C('ef')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('a'), C('b')), set('c'))
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - set(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
try:
self.s - self.otherword
except TypeError:
pass
else:
self.fail("s-t did not screen-out general iterables")
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').symmetric_difference(C('cdc')), set('abd'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ef')), set('abcef'))
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
self.s ^ self.otherword
except TypeError:
pass
else:
self.fail("s^t did not screen-out general iterables")
def test_equality(self):
self.assertEqual(self.s, set(self.word))
self.assertEqual(self.s, frozenset(self.word))
self.assertEqual(self.s == self.word, False)
self.assertNotEqual(self.s, set(self.otherword))
self.assertNotEqual(self.s, frozenset(self.otherword))
self.assertEqual(self.s != self.word, True)
def test_setOfFrozensets(self):
t = map(frozenset, ['abcdef', 'bcd', 'bdcb', 'fed', 'fedccba'])
s = self.thetype(t)
self.assertEqual(len(s), 3)
def test_sub_and_super(self):
p, q, r = map(self.thetype, ['ab', 'abcde', 'def'])
self.assertTrue(p < q)
self.assertTrue(p <= q)
self.assertTrue(q <= q)
self.assertTrue(q > p)
self.assertTrue(q >= p)
self.assertFalse(q < r)
self.assertFalse(q <= r)
self.assertFalse(q > r)
self.assertFalse(q >= r)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
def test_pickling(self):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s, dup, "%s != %s" % (self.s, dup))
if type(self.s) not in (set, frozenset):
self.s.x = 10
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s.x, dup.x)
def test_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
itorg = iter(self.s)
data = self.thetype(self.s)
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# Set iterators unpickle as list iterators due to the
# undefined order of set items.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(self.thetype(it), data)
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(self.thetype(it), data - self.thetype((drop,)))
def test_deepcopy(self):
class Tracer:
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
def __deepcopy__(self, memo=None):
return Tracer(self.value + 1)
t = Tracer(10)
s = self.thetype([t])
dup = copy.deepcopy(s)
self.assertNotEqual(id(s), id(dup))
for elem in dup:
newt = elem
self.assertNotEqual(id(t), id(newt))
self.assertEqual(t.value + 1, newt.value)
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
class A:
pass
s = set(A() for i in range(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = set([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(self.thetype):
def __hash__(self):
return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
self.assertIn(s, f)
f.remove(s)
f.add(s)
f.discard(s)
def test_badcmp(self):
s = self.thetype([BadCmp()])
# Detect comparison errors during insertion and lookup
self.assertRaises(RuntimeError, self.thetype, [BadCmp(), BadCmp()])
self.assertRaises(RuntimeError, s.__contains__, BadCmp())
# Detect errors during mutating operations
if hasattr(s, 'add'):
self.assertRaises(RuntimeError, s.add, BadCmp())
self.assertRaises(RuntimeError, s.discard, BadCmp())
self.assertRaises(RuntimeError, s.remove, BadCmp())
def test_cyclical_repr(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
if self.thetype == set:
self.assertEqual(repr(s), '{set(...)}')
else:
name = repr(s).partition('(')[0] # strip class name
self.assertEqual(repr(s), '%s({%s(...)})' % (name, name))
def test_cyclical_print(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
fo = open(support.TESTFN, "w")
try:
fo.write(str(s))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(s))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_do_not_rehash_dict_keys(self):
n = 10
d = dict.fromkeys(map(HashCountingInt, range(n)))
self.assertEqual(sum(elem.hash_count for elem in d), n)
s = self.thetype(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
s.difference(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
if hasattr(s, 'symmetric_difference_update'):
s.symmetric_difference_update(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
d2 = dict.fromkeys(set(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d), 123)
self.assertEqual(sum(elem.hash_count for elem in d), n)
self.assertEqual(d3, dict.fromkeys(d, 123))
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for set iterator object
class C(object):
pass
obj = C()
ref = weakref.ref(obj)
container = set([obj, 1])
obj.x = iter(container)
del obj, container
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, self.thetype)
class TestSet(TestJointOps, unittest.TestCase):
thetype = set
basetype = set
def test_init(self):
s = self.thetype()
s.__init__(self.word)
self.assertEqual(s, set(self.word))
s.__init__(self.otherword)
self.assertEqual(s, set(self.otherword))
self.assertRaises(TypeError, s.__init__, s, 2);
self.assertRaises(TypeError, s.__init__, 1);
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_set_literal(self):
s = set([1,2,3])
t = {1,2,3}
self.assertEqual(s, t)
def test_set_literal_insertion_order(self):
# SF Issue #26020 -- Expect left to right insertion
s = {1, 1.0, True}
self.assertEqual(len(s), 1)
stored_value = s.pop()
self.assertEqual(type(stored_value), int)
def test_set_literal_evaluation_order(self):
# Expect left to right expression evaluation
events = []
def record(obj):
events.append(obj)
s = {record(1), record(2), record(3)}
self.assertEqual(events, [1, 2, 3])
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, set())
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
self.assertEqual(type(dup), self.basetype)
def test_add(self):
self.s.add('Q')
self.assertIn('Q', self.s)
dup = self.s.copy()
self.s.add('Q')
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
def test_remove(self):
self.s.remove('a')
self.assertNotIn('a', self.s)
self.assertRaises(KeyError, self.s.remove, 'Q')
self.assertRaises(TypeError, self.s.remove, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.remove(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
self.assertRaises(KeyError, self.s.remove, self.thetype(self.word))
def test_remove_keyerror_unpacking(self):
# bug: www.python.org/sf/1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except KeyError as e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else:
self.fail()
def test_remove_keyerror_set(self):
key = self.thetype([3, 4])
try:
self.s.remove(key)
except KeyError as e:
self.assertTrue(e.args[0] is key,
"KeyError should be {0}, not {1}".format(key,
e.args[0]))
else:
self.fail()
def test_discard(self):
self.s.discard('a')
self.assertNotIn('a', self.s)
self.s.discard('Q')
self.assertRaises(TypeError, self.s.discard, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
def test_pop(self):
for i in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
self.assertRaises(PassThru, self.s.update, check_pass_thru())
self.assertRaises(TypeError, self.s.update, [[]])
for p, q in (('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p)), None)
self.assertEqual(s, set(q))
for p in ('cdc', 'efgfe', 'ccb', 'ef', 'abcda'):
q = 'ahi'
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p), C(q)), None)
self.assertEqual(s, set(s) | set(p) | set(q))
def test_ior(self):
self.s |= set(self.otherword)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.intersection_update, check_pass_thru())
self.assertRaises(TypeError, self.s.intersection_update, [[]])
for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.intersection_update(C(p)), None)
self.assertEqual(s, set(q))
ss = 'abcba'
s = self.thetype(ss)
t = 'cbc'
self.assertEqual(s.intersection_update(C(p), C(t)), None)
self.assertEqual(s, set('abcba')&set(p)&set(t))
def test_iand(self):
self.s &= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.difference_update(C(p)), None)
self.assertEqual(s, set(q))
s = self.thetype('abcdefghih')
s.difference_update()
self.assertEqual(s, self.thetype('abcdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('aba'))
self.assertEqual(s, self.thetype('cdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('cdc'), C('aba'))
self.assertEqual(s, self.thetype('efghih'))
def test_isub(self):
self.s -= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.symmetric_difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.symmetric_difference_update(C(p)), None)
self.assertEqual(s, set(q))
def test_ixor(self):
self.s ^= set(self.otherword)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, self.thetype())
t = self.s.copy()
t ^= t
self.assertEqual(t, self.thetype())
def test_weakref(self):
s = self.thetype('gallahad')
p = weakref.proxy(s)
self.assertEqual(str(p), str(s))
s = None
self.assertRaises(ReferenceError, str, p)
def test_rich_compare(self):
class TestRichSetCompare:
def __gt__(self, some_set):
self.gt_called = True
return False
def __lt__(self, some_set):
self.lt_called = True
return False
def __ge__(self, some_set):
self.ge_called = True
return False
def __le__(self, some_set):
self.le_called = True
return False
# This first tries the builtin rich set comparison, which doesn't know
# how to handle the custom object. Upon returning NotImplemented, the
# corresponding comparison on the right object is invoked.
myset = {1, 2, 3}
myobj = TestRichSetCompare()
myset < myobj
self.assertTrue(myobj.gt_called)
myobj = TestRichSetCompare()
myset > myobj
self.assertTrue(myobj.lt_called)
myobj = TestRichSetCompare()
myset <= myobj
self.assertTrue(myobj.ge_called)
myobj = TestRichSetCompare()
myset >= myobj
self.assertTrue(myobj.le_called)
@unittest.skipUnless(hasattr(set, "test_c_api"),
'C API test only available in a debug build')
def test_c_api(self):
self.assertEqual(set().test_c_api(), True)
class SetSubclass(set):
pass
class TestSetSubclass(TestSet):
thetype = SetSubclass
basetype = set
class SetSubclassWithKeywordArgs(set):
def __init__(self, iterable=[], newarg=None):
set.__init__(self, iterable)
class TestSetSubclassWithKeywordArgs(TestSet):
def test_keywords_in_subclass(self):
'SF bug #1486663 -- this used to erroneously raise a TypeError'
SetSubclassWithKeywordArgs(newarg=1)
class TestFrozenSet(TestJointOps, unittest.TestCase):
thetype = frozenset
basetype = frozenset
def test_init(self):
s = self.thetype(self.word)
s.__init__(self.otherword)
self.assertEqual(s, set(self.word))
def test_singleton_empty_frozenset(self):
f = frozenset()
efs = [frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(range(0)), frozenset(frozenset()),
frozenset(f), f]
# All of the empty frozensets should have just one id()
self.assertEqual(len(set(map(id, efs))), 1)
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertEqual(id(s), id(t))
def test_hash(self):
self.assertEqual(hash(self.thetype('abcdeb')),
hash(self.thetype('ebecda')))
# make sure that all permutations give the same hash value
n = 100
seq = [randrange(n) for i in range(n)]
results = set()
for i in range(200):
shuffle(seq)
results.add(hash(self.thetype(seq)))
self.assertEqual(len(results), 1)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(id(self.s), id(dup))
def test_frozen_as_dictkey(self):
seq = list(range(10)) + list('abcdefg') + ['apple']
key1 = self.thetype(seq)
key2 = self.thetype(reversed(seq))
self.assertEqual(key1, key2)
self.assertNotEqual(id(key1), id(key2))
d = {}
d[key1] = 42
self.assertEqual(d[key2], 42)
def test_hash_caching(self):
f = self.thetype('abcdcda')
self.assertEqual(hash(f), hash(f))
def test_hash_effectiveness(self):
n = 13
hashvalues = set()
addhashvalue = hashvalues.add
elemmasks = [(i+1, 1<<i) for i in range(n)]
for i in range(2**n):
addhashvalue(hash(frozenset([e for e, m in elemmasks if m&i])))
self.assertEqual(len(hashvalues), 2**n)
def zf_range(n):
# https://en.wikipedia.org/wiki/Set-theoretic_definition_of_natural_numbers
nums = [frozenset()]
for i in range(n-1):
num = frozenset(nums)
nums.append(num)
return nums[:n]
def powerset(s):
for i in range(len(s)+1):
yield from map(frozenset, itertools.combinations(s, i))
for n in range(18):
t = 2 ** n
mask = t - 1
for nums in (range, zf_range):
u = len({h & mask for h in map(hash, powerset(nums(n)))})
self.assertGreater(4*u, t)
class FrozenSetSubclass(frozenset):
pass
class TestFrozenSetSubclass(TestFrozenSet):
thetype = FrozenSetSubclass
basetype = frozenset
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_copy(self):
dup = self.s.copy()
self.assertNotEqual(id(self.s), id(dup))
def test_nested_empty_constructor(self):
s = self.thetype()
t = self.thetype(s)
self.assertEqual(s, t)
def test_singleton_empty_frozenset(self):
Frozenset = self.thetype
f = frozenset()
F = Frozenset()
efs = [Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(range(0)), Frozenset(Frozenset()),
Frozenset(frozenset()), f, F, Frozenset(f), Frozenset(F)]
# All empty frozenset subclass instances should have different ids
self.assertEqual(len(set(map(id, efs))), len(efs))
# Tests taken from test_sets.py =============================================
empty_set = set()
#==============================================================================
class TestBasicOps:
def test_repr(self):
if self.repr is not None:
self.assertEqual(repr(self.set), self.repr)
def check_repr_against_values(self):
text = repr(self.set)
self.assertTrue(text.startswith('{'))
self.assertTrue(text.endswith('}'))
result = text[1:-1].split(', ')
result.sort()
sorted_repr_values = [repr(value) for value in self.values]
sorted_repr_values.sort()
self.assertEqual(result, sorted_repr_values)
def test_print(self):
try:
fo = open(support.TESTFN, "w")
fo.write(str(self.set))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(self.set))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_length(self):
self.assertEqual(len(self.set), self.length)
def test_self_equality(self):
self.assertEqual(self.set, self.set)
def test_equivalent_equality(self):
self.assertEqual(self.set, self.dup)
def test_copy(self):
self.assertEqual(self.set.copy(), self.dup)
def test_self_union(self):
result = self.set | self.set
self.assertEqual(result, self.dup)
def test_empty_union(self):
result = self.set | empty_set
self.assertEqual(result, self.dup)
def test_union_empty(self):
result = empty_set | self.set
self.assertEqual(result, self.dup)
def test_self_intersection(self):
result = self.set & self.set
self.assertEqual(result, self.dup)
def test_empty_intersection(self):
result = self.set & empty_set
self.assertEqual(result, empty_set)
def test_intersection_empty(self):
result = empty_set & self.set
self.assertEqual(result, empty_set)
def test_self_isdisjoint(self):
result = self.set.isdisjoint(self.set)
self.assertEqual(result, not self.set)
def test_empty_isdisjoint(self):
result = self.set.isdisjoint(empty_set)
self.assertEqual(result, True)
def test_isdisjoint_empty(self):
result = empty_set.isdisjoint(self.set)
self.assertEqual(result, True)
def test_self_symmetric_difference(self):
result = self.set ^ self.set
self.assertEqual(result, empty_set)
def test_empty_symmetric_difference(self):
result = self.set ^ empty_set
self.assertEqual(result, self.set)
def test_self_difference(self):
result = self.set - self.set
self.assertEqual(result, empty_set)
def test_empty_difference(self):
result = self.set - empty_set
self.assertEqual(result, self.dup)
def test_empty_difference_rev(self):
result = empty_set - self.set
self.assertEqual(result, empty_set)
def test_iteration(self):
for v in self.set:
self.assertIn(v, self.values)
setiter = iter(self.set)
self.assertEqual(setiter.__length_hint__(), len(self.set))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.set, proto)
copy = pickle.loads(p)
self.assertEqual(self.set, copy,
"%s != %s" % (self.set, copy))
#------------------------------------------------------------------------------
class TestBasicOpsEmpty(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "empty set"
self.values = []
self.set = set(self.values)
self.dup = set(self.values)
self.length = 0
self.repr = "set()"
#------------------------------------------------------------------------------
class TestBasicOpsSingleton(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (number)"
self.values = [3]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{3}"
def test_in(self):
self.assertIn(3, self.set)
def test_not_in(self):
self.assertNotIn(2, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTuple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (tuple)"
self.values = [(0, "zero")]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{(0, 'zero')}"
def test_in(self):
self.assertIn((0, "zero"), self.set)
def test_not_in(self):
self.assertNotIn(9, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTriple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "triple set"
self.values = [0, "zero", operator.add]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
self.repr = None
#------------------------------------------------------------------------------
class TestBasicOpsString(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "string set"
self.values = ["a", "b", "c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "bytes set"
self.values = [b"a", b"b", b"c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsMixedStringBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self._warning_filters = support.check_warnings()
self._warning_filters.__enter__()
warnings.simplefilter('ignore', BytesWarning)
self.case = "string and bytes set"
self.values = ["a", "b", b"a", b"b"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 4
def tearDown(self):
self._warning_filters.__exit__(None, None, None)
def test_repr(self):
self.check_repr_against_values()
#==============================================================================
def baditer():
raise TypeError
yield True
def gooditer():
yield True
class TestExceptionPropagation(unittest.TestCase):
"""SF 628246: Set constructor should not trap iterator TypeErrors"""
def test_instanceWithException(self):
self.assertRaises(TypeError, set, baditer())
def test_instancesWithoutException(self):
# All of these iterables should load without exception.
set([1,2,3])
set((1,2,3))
set({'one':1, 'two':2, 'three':3})
set(range(3))
set('abc')
set(gooditer())
def test_changingSizeWhileIterating(self):
s = set([1,2,3])
try:
for i in s:
s.update([4])
except RuntimeError:
pass
else:
self.fail("no exception when changing size during iteration")
#==============================================================================
class TestSetOfSets(unittest.TestCase):
def test_constructor(self):
inner = frozenset([1])
outer = set([inner])
element = outer.pop()
self.assertEqual(type(element), frozenset)
outer.add(inner) # Rebuild set of sets with .add method
outer.remove(inner)
self.assertEqual(outer, set()) # Verify that remove worked
outer.discard(inner) # Absence of KeyError indicates working fine
#==============================================================================
class TestBinaryOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_eq(self): # SF bug 643115
self.assertEqual(self.set, set({2:1,4:3,6:5}))
def test_union_subset(self):
result = self.set | set([2])
self.assertEqual(result, set((2, 4, 6)))
def test_union_superset(self):
result = self.set | set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.set | set([3, 4, 5])
self.assertEqual(result, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.set | set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.set & set((2, 4))
self.assertEqual(result, set((2, 4)))
def test_intersection_superset(self):
result = self.set & set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6]))
def test_intersection_overlap(self):
result = self.set & set([3, 4, 5])
self.assertEqual(result, set([4]))
def test_intersection_non_overlap(self):
result = self.set & set([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.set.isdisjoint(set((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.set.isdisjoint(set([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.set.isdisjoint(set([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.set.isdisjoint(set([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.set ^ set((2, 4))
self.assertEqual(result, set([6]))
def test_sym_difference_superset(self):
result = self.set ^ set((2, 4, 6, 8))
self.assertEqual(result, set([8]))
def test_sym_difference_overlap(self):
result = self.set ^ set((3, 4, 5))
self.assertEqual(result, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.set ^ set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
#==============================================================================
class TestUpdateOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_union_subset(self):
self.set |= set([2])
self.assertEqual(self.set, set((2, 4, 6)))
def test_union_superset(self):
self.set |= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_overlap(self):
self.set |= set([3, 4, 5])
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
self.set |= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_method_call(self):
self.set.update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_intersection_subset(self):
self.set &= set((2, 4))
self.assertEqual(self.set, set((2, 4)))
def test_intersection_superset(self):
self.set &= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_intersection_overlap(self):
self.set &= set([3, 4, 5])
self.assertEqual(self.set, set([4]))
def test_intersection_non_overlap(self):
self.set &= set([8])
self.assertEqual(self.set, empty_set)
def test_intersection_method_call(self):
self.set.intersection_update(set([3, 4, 5]))
self.assertEqual(self.set, set([4]))
def test_sym_difference_subset(self):
self.set ^= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_sym_difference_superset(self):
self.set ^= set((2, 4, 6, 8))
self.assertEqual(self.set, set([8]))
def test_sym_difference_overlap(self):
self.set ^= set((3, 4, 5))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
self.set ^= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_sym_difference_method_call(self):
self.set.symmetric_difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_difference_subset(self):
self.set -= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_difference_superset(self):
self.set -= set((2, 4, 6, 8))
self.assertEqual(self.set, set([]))
def test_difference_overlap(self):
self.set -= set((3, 4, 5))
self.assertEqual(self.set, set([2, 6]))
def test_difference_non_overlap(self):
self.set -= set([8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_difference_method_call(self):
self.set.difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 6]))
#==============================================================================
class TestMutate(unittest.TestCase):
def setUp(self):
self.values = ["a", "b", "c"]
self.set = set(self.values)
def test_add_present(self):
self.set.add("c")
self.assertEqual(self.set, set("abc"))
def test_add_absent(self):
self.set.add("d")
self.assertEqual(self.set, set("abcd"))
def test_add_until_full(self):
tmp = set()
expected_len = 0
for v in self.values:
tmp.add(v)
expected_len += 1
self.assertEqual(len(tmp), expected_len)
self.assertEqual(tmp, self.set)
def test_remove_present(self):
self.set.remove("b")
self.assertEqual(self.set, set("ac"))
def test_remove_absent(self):
try:
self.set.remove("d")
self.fail("Removing missing element should have raised LookupError")
except LookupError:
pass
def test_remove_until_empty(self):
expected_len = len(self.set)
for v in self.values:
self.set.remove(v)
expected_len -= 1
self.assertEqual(len(self.set), expected_len)
def test_discard_present(self):
self.set.discard("c")
self.assertEqual(self.set, set("ab"))
def test_discard_absent(self):
self.set.discard("d")
self.assertEqual(self.set, set("abc"))
def test_clear(self):
self.set.clear()
self.assertEqual(len(self.set), 0)
def test_pop(self):
popped = {}
while self.set:
popped[self.set.pop()] = None
self.assertEqual(len(popped), len(self.values))
for v in self.values:
self.assertIn(v, popped)
def test_update_empty_tuple(self):
self.set.update(())
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_overlap(self):
self.set.update(("a",))
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_non_overlap(self):
self.set.update(("a", "z"))
self.assertEqual(self.set, set(self.values + ["z"]))
#==============================================================================
class TestSubsets:
case2method = {"<=": "issubset",
">=": "issuperset",
}
reverse = {"==": "==",
"!=": "!=",
"<": ">",
">": "<",
"<=": ">=",
">=": "<=",
}
def test_issubset(self):
x = self.left
y = self.right
for case in "!=", "==", "<", "<=", ">", ">=":
expected = case in self.cases
# Test the binary infix spelling.
result = eval("x" + case + "y", locals())
self.assertEqual(result, expected)
# Test the "friendly" method-name spelling, if one exists.
if case in TestSubsets.case2method:
method = getattr(x, TestSubsets.case2method[case])
result = method(y)
self.assertEqual(result, expected)
# Now do the same for the operands reversed.
rcase = TestSubsets.reverse[case]
result = eval("y" + rcase + "x", locals())
self.assertEqual(result, expected)
if rcase in TestSubsets.case2method:
method = getattr(y, TestSubsets.case2method[rcase])
result = method(x)
self.assertEqual(result, expected)
#------------------------------------------------------------------------------
class TestSubsetEqualEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set()
name = "both empty"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEqualNonEmpty(TestSubsets, unittest.TestCase):
left = set([1, 2])
right = set([1, 2])
name = "equal pair"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEmptyNonEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set([1, 2])
name = "one empty, one non-empty"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetPartial(TestSubsets, unittest.TestCase):
left = set([1])
right = set([1, 2])
name = "one a non-empty proper subset of other"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetNonOverlap(TestSubsets, unittest.TestCase):
left = set([1])
right = set([2])
name = "neither empty, neither contains"
cases = "!="
#==============================================================================
class TestOnlySetsInBinaryOps:
def test_eq_ne(self):
# Unlike the others, this is testing that == and != *are* allowed.
self.assertEqual(self.other == self.set, False)
self.assertEqual(self.set == self.other, False)
self.assertEqual(self.other != self.set, True)
self.assertEqual(self.set != self.other, True)
def test_ge_gt_le_lt(self):
self.assertRaises(TypeError, lambda: self.set < self.other)
self.assertRaises(TypeError, lambda: self.set <= self.other)
self.assertRaises(TypeError, lambda: self.set > self.other)
self.assertRaises(TypeError, lambda: self.set >= self.other)
self.assertRaises(TypeError, lambda: self.other < self.set)
self.assertRaises(TypeError, lambda: self.other <= self.set)
self.assertRaises(TypeError, lambda: self.other > self.set)
self.assertRaises(TypeError, lambda: self.other >= self.set)
def test_update_operator(self):
try:
self.set |= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_update(self):
if self.otherIsIterable:
self.set.update(self.other)
else:
self.assertRaises(TypeError, self.set.update, self.other)
def test_union(self):
self.assertRaises(TypeError, lambda: self.set | self.other)
self.assertRaises(TypeError, lambda: self.other | self.set)
if self.otherIsIterable:
self.set.union(self.other)
else:
self.assertRaises(TypeError, self.set.union, self.other)
def test_intersection_update_operator(self):
try:
self.set &= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_intersection_update(self):
if self.otherIsIterable:
self.set.intersection_update(self.other)
else:
self.assertRaises(TypeError,
self.set.intersection_update,
self.other)
def test_intersection(self):
self.assertRaises(TypeError, lambda: self.set & self.other)
self.assertRaises(TypeError, lambda: self.other & self.set)
if self.otherIsIterable:
self.set.intersection(self.other)
else:
self.assertRaises(TypeError, self.set.intersection, self.other)
def test_sym_difference_update_operator(self):
try:
self.set ^= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_sym_difference_update(self):
if self.otherIsIterable:
self.set.symmetric_difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.symmetric_difference_update,
self.other)
def test_sym_difference(self):
self.assertRaises(TypeError, lambda: self.set ^ self.other)
self.assertRaises(TypeError, lambda: self.other ^ self.set)
if self.otherIsIterable:
self.set.symmetric_difference(self.other)
else:
self.assertRaises(TypeError, self.set.symmetric_difference, self.other)
def test_difference_update_operator(self):
try:
self.set -= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_difference_update(self):
if self.otherIsIterable:
self.set.difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.difference_update,
self.other)
def test_difference(self):
self.assertRaises(TypeError, lambda: self.set - self.other)
self.assertRaises(TypeError, lambda: self.other - self.set)
if self.otherIsIterable:
self.set.difference(self.other)
else:
self.assertRaises(TypeError, self.set.difference, self.other)
#------------------------------------------------------------------------------
class TestOnlySetsNumeric(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 19
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsDict(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = {1:2, 3:4}
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsOperator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = operator.add
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsTuple(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = (2, 4, 6)
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsString(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 'abc'
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsGenerator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
def gen():
for i in range(0, 10, 2):
yield i
self.set = set((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
#==============================================================================
class TestCopying:
def test_copy(self):
dup = self.set.copy()
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertTrue(dup_list[i] is set_list[i])
def test_deep_copy(self):
dup = copy.deepcopy(self.set)
##print type(dup), repr(dup)
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertEqual(dup_list[i], set_list[i])
#------------------------------------------------------------------------------
class TestCopyingEmpty(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set()
#------------------------------------------------------------------------------
class TestCopyingSingleton(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["hello"])
#------------------------------------------------------------------------------
class TestCopyingTriple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["zero", 0, None])
#------------------------------------------------------------------------------
class TestCopyingTuple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([(1, 2)])
#------------------------------------------------------------------------------
class TestCopyingNested(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([((1, 2), (3, 4))])
#==============================================================================
class TestIdentities(unittest.TestCase):
def setUp(self):
self.a = set('abracadabra')
self.b = set('alacazam')
def test_binopsVsSubsets(self):
a, b = self.a, self.b
self.assertTrue(a - b < a)
self.assertTrue(b - a < b)
self.assertTrue(a & b < a)
self.assertTrue(a & b < b)
self.assertTrue(a | b > a)
self.assertTrue(a | b > b)
self.assertTrue(a ^ b < a | b)
def test_commutativity(self):
a, b = self.a, self.b
self.assertEqual(a&b, b&a)
self.assertEqual(a|b, b|a)
self.assertEqual(a^b, b^a)
if a != b:
self.assertNotEqual(a-b, b-a)
def test_summations(self):
# check that sums of parts equal the whole
a, b = self.a, self.b
self.assertEqual((a-b)|(a&b)|(b-a), a|b)
self.assertEqual((a&b)|(a^b), a|b)
self.assertEqual(a|(b-a), a|b)
self.assertEqual((a-b)|b, a|b)
self.assertEqual((a-b)|(a&b), a)
self.assertEqual((b-a)|(a&b), b)
self.assertEqual((a-b)|(b-a), a^b)
def test_exclusion(self):
# check that inverse operations show non-overlap
a, b, zero = self.a, self.b, set()
self.assertEqual((a-b)&b, zero)
self.assertEqual((b-a)&a, zero)
self.assertEqual((a&b)&(a^b), zero)
# Tests derived from test_itertools.py =======================================
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
for cons in (set, frozenset):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(sorted(cons(g(s)), key=repr), sorted(g(s), key=repr))
self.assertRaises(TypeError, cons , X(s))
self.assertRaises(TypeError, cons , N(s))
self.assertRaises(ZeroDivisionError, cons , E(s))
def test_inline_methods(self):
s = set('november')
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
for g in (G, I, Ig, L, R):
expected = meth(data)
actual = meth(g(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
self.assertEqual(sorted(actual, key=repr), sorted(expected, key=repr))
self.assertRaises(TypeError, meth, X(s))
self.assertRaises(TypeError, meth, N(s))
self.assertRaises(ZeroDivisionError, meth, E(s))
def test_inplace_methods(self):
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for methname in ('update', 'intersection_update',
'difference_update', 'symmetric_difference_update'):
for g in (G, I, Ig, S, L, R):
s = set('january')
t = s.copy()
getattr(s, methname)(list(g(data)))
getattr(t, methname)(g(data))
self.assertEqual(sorted(s, key=repr), sorted(t, key=repr))
self.assertRaises(TypeError, getattr(set('january'), methname), X(data))
self.assertRaises(TypeError, getattr(set('january'), methname), N(data))
self.assertRaises(ZeroDivisionError, getattr(set('january'), methname), E(data))
class bad_eq:
def __eq__(self, other):
if be_bad:
set2.clear()
raise ZeroDivisionError
return self is other
def __hash__(self):
return 0
class bad_dict_clear:
def __eq__(self, other):
if be_bad:
dict2.clear()
return self is other
def __hash__(self):
return 0
class TestWeirdBugs(unittest.TestCase):
def test_8420_set_merge(self):
# This used to segfault
global be_bad, set2, dict2
be_bad = False
set1 = {bad_eq()}
set2 = {bad_eq() for i in range(75)}
be_bad = True
self.assertRaises(ZeroDivisionError, set1.update, set2)
be_bad = False
set1 = {bad_dict_clear()}
dict2 = {bad_dict_clear(): None}
be_bad = True
set1.symmetric_difference_update(dict2)
def test_iter_and_mutate(self):
# Issue #24581
s = set(range(100))
s.clear()
s.update(range(100))
si = iter(s)
s.clear()
a = list(range(100))
s.update(range(100))
list(si)
def test_merge_and_mutate(self):
class X:
def __hash__(self):
return hash(0)
def __eq__(self, o):
other.clear()
return False
other = set()
other = {X() for i in range(10)}
s = {0}
s.update(other)
# Application tests (based on David Eppstein's graph recipes ====================================
def powerset(U):
"""Generates all subsets of a set or sequence U."""
U = iter(U)
try:
x = frozenset([next(U)])
for S in powerset(U):
yield S
yield S | x
except StopIteration:
yield frozenset()
def cube(n):
"""Graph of n-dimensional hypercube."""
singletons = [frozenset([x]) for x in range(n)]
return dict([(x, frozenset([x^s for s in singletons]))
for x in powerset(range(n))])
def linegraph(G):
"""Graph, the vertices of which are edges of G,
with two vertices being adjacent iff the corresponding
edges share a vertex."""
L = {}
for x in G:
for y in G[x]:
nx = [frozenset([x,z]) for z in G[x] if z != y]
ny = [frozenset([y,z]) for z in G[y] if z != x]
L[frozenset([x,y])] = frozenset(nx+ny)
return L
def faces(G):
'Return a set of faces in G. Where a face is a set of vertices on that face'
# currently limited to triangles,squares, and pentagons
f = set()
for v1, edges in G.items():
for v2 in edges:
for v3 in G[v2]:
if v1 == v3:
continue
if v1 in G[v3]:
f.add(frozenset([v1, v2, v3]))
else:
for v4 in G[v3]:
if v4 == v2:
continue
if v1 in G[v4]:
f.add(frozenset([v1, v2, v3, v4]))
else:
for v5 in G[v4]:
if v5 == v3 or v5 == v2:
continue
if v1 in G[v5]:
f.add(frozenset([v1, v2, v3, v4, v5]))
return f
class TestGraphs(unittest.TestCase):
def test_cube(self):
g = cube(3) # vert --> {v1, v2, v3}
vertices1 = set(g)
self.assertEqual(len(vertices1), 8) # eight vertices
for edge in g.values():
self.assertEqual(len(edge), 3) # each vertex connects to three edges
vertices2 = set(v for edges in g.values() for v in edges)
self.assertEqual(vertices1, vertices2) # edge vertices in original set
cubefaces = faces(g)
self.assertEqual(len(cubefaces), 6) # six faces
for face in cubefaces:
self.assertEqual(len(face), 4) # each face is a square
def test_cuboctahedron(self):
# http://en.wikipedia.org/wiki/Cuboctahedron
# 8 triangular faces and 6 square faces
# 12 identical vertices each connecting a triangle and square
g = cube(3)
cuboctahedron = linegraph(g) # V( --> {V1, V2, V3, V4}
self.assertEqual(len(cuboctahedron), 12)# twelve vertices
vertices = set(cuboctahedron)
for edges in cuboctahedron.values():
self.assertEqual(len(edges), 4) # each vertex connects to four other vertices
othervertices = set(edge for edges in cuboctahedron.values() for edge in edges)
self.assertEqual(vertices, othervertices) # edge vertices in original set
cubofaces = faces(cuboctahedron)
facesizes = collections.defaultdict(int)
for face in cubofaces:
facesizes[len(face)] += 1
self.assertEqual(facesizes[3], 8) # eight triangular faces
self.assertEqual(facesizes[4], 6) # six square faces
for vertex in cuboctahedron:
edge = vertex # Cuboctahedron vertices are edges in Cube
self.assertEqual(len(edge), 2) # Two cube vertices define an edge
for cubevert in edge:
self.assertIn(cubevert, g)
#==============================================================================
if __name__ == "__main__":
unittest.main()
|
the-stack_0_5517 | import argparse
import errno
import os
import re
import sys
from argparse import RawDescriptionHelpFormatter
from textwrap import dedent
from urllib.parse import urlsplit
from requests.utils import get_netrc_auth
from .argtypes import (
AuthCredentials, KeyValueArgType, PARSED_DEFAULT_FORMAT_OPTIONS,
parse_auth,
parse_format_options,
)
from .constants import (
HTTP_GET, HTTP_POST, OUTPUT_OPTIONS, OUTPUT_OPTIONS_DEFAULT,
OUTPUT_OPTIONS_DEFAULT_OFFLINE, OUTPUT_OPTIONS_DEFAULT_STDOUT_REDIRECTED,
OUT_RESP_BODY, PRETTY_MAP, PRETTY_STDOUT_TTY_ONLY, RequestType,
SEPARATOR_CREDENTIALS,
SEPARATOR_GROUP_ALL_ITEMS, SEPARATOR_GROUP_DATA_ITEMS, URL_SCHEME_RE,
)
from .exceptions import ParseError
from .requestitems import RequestItems
from ..context import Environment
from ..plugins.registry import plugin_manager
from ..utils import ExplicitNullAuth, get_content_type
class HTTPieHelpFormatter(RawDescriptionHelpFormatter):
"""A nicer help formatter.
Help for arguments can be indented and contain new lines.
It will be de-dented and arguments in the help
will be separated by a blank line for better readability.
"""
def __init__(self, max_help_position=6, *args, **kwargs):
# A smaller indent for args help.
kwargs['max_help_position'] = max_help_position
super().__init__(*args, **kwargs)
def _split_lines(self, text, width):
text = dedent(text).strip() + '\n\n'
return text.splitlines()
# TODO: refactor and design type-annotated data structures
# for raw args + parsed args and keep things immutable.
class HTTPieArgumentParser(argparse.ArgumentParser):
"""Adds additional logic to `argparse.ArgumentParser`.
Handles all input (CLI args, file args, stdin), applies defaults,
and performs extra validation.
"""
def __init__(self, *args, formatter_class=HTTPieHelpFormatter, **kwargs):
kwargs['add_help'] = False
super().__init__(*args, formatter_class=formatter_class, **kwargs)
self.env = None
self.args = None
self.has_stdin_data = False
self.has_input_data = False
# noinspection PyMethodOverriding
def parse_args(
self,
env: Environment,
args=None,
namespace=None
) -> argparse.Namespace:
self.env = env
self.args, no_options = super().parse_known_args(args, namespace)
if self.args.debug:
self.args.traceback = True
self.has_stdin_data = (
self.env.stdin
and not self.args.ignore_stdin
and not self.env.stdin_isatty
)
self.has_input_data = self.has_stdin_data or self.args.raw is not None
# Arguments processing and environment setup.
self._apply_no_options(no_options)
self._process_request_type()
self._process_download_options()
self._setup_standard_streams()
self._process_output_options()
self._process_pretty_options()
self._process_format_options()
self._guess_method()
self._parse_items()
self._process_url()
self._process_auth()
if self.args.raw is not None:
self._body_from_input(self.args.raw)
elif self.has_stdin_data:
self._body_from_file(self.env.stdin)
if self.args.compress:
# TODO: allow --compress with --chunked / --multipart
if self.args.chunked:
self.error('cannot combine --compress and --chunked')
if self.args.multipart:
self.error('cannot combine --compress and --multipart')
return self.args
def _process_request_type(self):
request_type = self.args.request_type
self.args.json = request_type is RequestType.JSON
self.args.multipart = request_type is RequestType.MULTIPART
self.args.form = request_type in {
RequestType.FORM,
RequestType.MULTIPART,
}
def _process_url(self):
if self.args.url.startswith('://'):
# Paste URL & add space shortcut: `http ://pie.dev` → `http://pie.dev`
self.args.url = self.args.url[3:]
if not URL_SCHEME_RE.match(self.args.url):
if os.path.basename(self.env.program_name) == 'https':
scheme = 'https://'
else:
scheme = self.args.default_scheme + '://'
# See if we're using curl style shorthand for localhost (:3000/foo)
shorthand = re.match(r'^:(?!:)(\d*)(/?.*)$', self.args.url)
if shorthand:
port = shorthand.group(1)
rest = shorthand.group(2)
self.args.url = scheme + 'localhost'
if port:
self.args.url += ':' + port
self.args.url += rest
else:
self.args.url = scheme + self.args.url
# noinspection PyShadowingBuiltins
def _print_message(self, message, file=None):
# Sneak in our stderr/stdout.
file = {
sys.stdout: self.env.stdout,
sys.stderr: self.env.stderr,
None: self.env.stderr
}.get(file, file)
if not hasattr(file, 'buffer') and isinstance(message, str):
message = message.encode(self.env.stdout_encoding)
super()._print_message(message, file)
def _setup_standard_streams(self):
"""
Modify `env.stdout` and `env.stdout_isatty` based on args, if needed.
"""
self.args.output_file_specified = bool(self.args.output_file)
if self.args.download:
# FIXME: Come up with a cleaner solution.
if not self.args.output_file and not self.env.stdout_isatty:
# Use stdout as the download output file.
self.args.output_file = self.env.stdout
# With `--download`, we write everything that would normally go to
# `stdout` to `stderr` instead. Let's replace the stream so that
# we don't have to use many `if`s throughout the codebase.
# The response body will be treated separately.
self.env.stdout = self.env.stderr
self.env.stdout_isatty = self.env.stderr_isatty
elif self.args.output_file:
# When not `--download`ing, then `--output` simply replaces
# `stdout`. The file is opened for appending, which isn't what
# we want in this case.
self.args.output_file.seek(0)
try:
self.args.output_file.truncate()
except OSError as e:
if e.errno == errno.EINVAL:
# E.g. /dev/null on Linux.
pass
else:
raise
self.env.stdout = self.args.output_file
self.env.stdout_isatty = False
if self.args.quiet:
self.env.stderr = self.env.devnull
if not (self.args.output_file_specified and not self.args.download):
self.env.stdout = self.env.devnull
def _process_auth(self):
# TODO: refactor & simplify this method.
self.args.auth_plugin = None
default_auth_plugin = plugin_manager.get_auth_plugins()[0]
auth_type_set = self.args.auth_type is not None
url = urlsplit(self.args.url)
if self.args.auth is None and not auth_type_set:
if url.username is not None:
# Handle http://username:password@hostname/
username = url.username
password = url.password or ''
self.args.auth = AuthCredentials(
key=username,
value=password,
sep=SEPARATOR_CREDENTIALS,
orig=SEPARATOR_CREDENTIALS.join([username, password])
)
if self.args.auth is not None or auth_type_set:
if not self.args.auth_type:
self.args.auth_type = default_auth_plugin.auth_type
plugin = plugin_manager.get_auth_plugin(self.args.auth_type)()
if (not self.args.ignore_netrc
and self.args.auth is None
and plugin.netrc_parse):
# Only host needed, so it’s OK URL not finalized.
netrc_credentials = get_netrc_auth(self.args.url)
if netrc_credentials:
self.args.auth = AuthCredentials(
key=netrc_credentials[0],
value=netrc_credentials[1],
sep=SEPARATOR_CREDENTIALS,
orig=SEPARATOR_CREDENTIALS.join(netrc_credentials)
)
if plugin.auth_require and self.args.auth is None:
self.error('--auth required')
plugin.raw_auth = self.args.auth
self.args.auth_plugin = plugin
already_parsed = isinstance(self.args.auth, AuthCredentials)
if self.args.auth is None or not plugin.auth_parse:
self.args.auth = plugin.get_auth()
else:
if already_parsed:
# from the URL
credentials = self.args.auth
else:
credentials = parse_auth(self.args.auth)
if (not credentials.has_password()
and plugin.prompt_password):
if self.args.ignore_stdin:
# Non-tty stdin read by now
self.error(
'Unable to prompt for passwords because'
' --ignore-stdin is set.'
)
credentials.prompt_password(url.netloc)
self.args.auth = plugin.get_auth(
username=credentials.key,
password=credentials.value,
)
if not self.args.auth and self.args.ignore_netrc:
# Set a no-op auth to force requests to ignore .netrc
# <https://github.com/psf/requests/issues/2773#issuecomment-174312831>
self.args.auth = ExplicitNullAuth()
def _apply_no_options(self, no_options):
"""For every `--no-OPTION` in `no_options`, set `args.OPTION` to
its default value. This allows for un-setting of options, e.g.,
specified in config.
"""
invalid = []
for option in no_options:
if not option.startswith('--no-'):
invalid.append(option)
continue
# --no-option => --option
inverted = '--' + option[5:]
for action in self._actions:
if inverted in action.option_strings:
setattr(self.args, action.dest, action.default)
break
else:
invalid.append(option)
if invalid:
self.error(f'unrecognized arguments: {" ".join(invalid)}')
def _body_from_file(self, fd):
"""Read the data from a file-like object.
Bytes are always read.
"""
self._ensure_one_data_source(self.args.data, self.args.files)
self.args.data = getattr(fd, 'buffer', fd)
def _body_from_input(self, data):
"""Read the data from the CLI.
"""
self._ensure_one_data_source(self.has_stdin_data, self.args.data,
self.args.files)
self.args.data = data.encode()
def _ensure_one_data_source(self, *other_sources):
"""There can only be one source of input request data.
"""
if any(other_sources):
self.error('Request body (from stdin, --raw or a file) and request '
'data (key=value) cannot be mixed. Pass '
'--ignore-stdin to let key/value take priority. '
'See https://httpie.io/docs#scripting for details.')
def _guess_method(self):
"""Set `args.method` if not specified to either POST or GET
based on whether the request has data or not.
"""
if self.args.method is None:
# Invoked as `http URL'.
assert not self.args.request_items
if self.has_input_data:
self.args.method = HTTP_POST
else:
self.args.method = HTTP_GET
# FIXME: False positive, e.g., "localhost" matches but is a valid URL.
elif not re.match('^[a-zA-Z]+$', self.args.method):
# Invoked as `http URL item+'. The URL is now in `args.method`
# and the first ITEM is now incorrectly in `args.url`.
try:
# Parse the URL as an ITEM and store it as the first ITEM arg.
self.args.request_items.insert(0, KeyValueArgType(
*SEPARATOR_GROUP_ALL_ITEMS).__call__(self.args.url))
except argparse.ArgumentTypeError as e:
if self.args.traceback:
raise
self.error(e.args[0])
else:
# Set the URL correctly
self.args.url = self.args.method
# Infer the method
has_data = (
self.has_input_data
or any(
item.sep in SEPARATOR_GROUP_DATA_ITEMS
for item in self.args.request_items)
)
self.args.method = HTTP_POST if has_data else HTTP_GET
def _parse_items(self):
"""
Parse `args.request_items` into `args.headers`, `args.data`,
`args.params`, and `args.files`.
"""
try:
request_items = RequestItems.from_args(
request_item_args=self.args.request_items,
request_type=self.args.request_type,
)
except ParseError as e:
if self.args.traceback:
raise
self.error(e.args[0])
else:
self.args.headers = request_items.headers
self.args.data = request_items.data
self.args.files = request_items.files
self.args.params = request_items.params
self.args.multipart_data = request_items.multipart_data
if self.args.files and not self.args.form:
# `http url @/path/to/file`
request_file = None
for key, file in self.args.files.items():
if key != '':
self.error(
'Invalid file fields (perhaps you meant --form?):'
f' {",".join(self.args.files.keys())}')
if request_file is not None:
self.error("Can't read request from multiple files")
request_file = file
fn, fd, ct = request_file
self.args.files = {}
self._body_from_file(fd)
if 'Content-Type' not in self.args.headers:
content_type = get_content_type(fn)
if content_type:
self.args.headers['Content-Type'] = content_type
def _process_output_options(self):
"""Apply defaults to output options, or validate the provided ones.
The default output options are stdout-type-sensitive.
"""
def check_options(value, option):
unknown = set(value) - OUTPUT_OPTIONS
if unknown:
self.error(f'Unknown output options: {option}={",".join(unknown)}')
if self.args.verbose:
self.args.all = True
if self.args.output_options is None:
if self.args.verbose:
self.args.output_options = ''.join(OUTPUT_OPTIONS)
elif self.args.offline:
self.args.output_options = OUTPUT_OPTIONS_DEFAULT_OFFLINE
elif not self.env.stdout_isatty:
self.args.output_options = OUTPUT_OPTIONS_DEFAULT_STDOUT_REDIRECTED
else:
self.args.output_options = OUTPUT_OPTIONS_DEFAULT
if self.args.output_options_history is None:
self.args.output_options_history = self.args.output_options
check_options(self.args.output_options, '--print')
check_options(self.args.output_options_history, '--history-print')
if self.args.download and OUT_RESP_BODY in self.args.output_options:
# Response body is always downloaded with --download and it goes
# through a different routine, so we remove it.
self.args.output_options = str(
set(self.args.output_options) - set(OUT_RESP_BODY))
def _process_pretty_options(self):
if self.args.prettify == PRETTY_STDOUT_TTY_ONLY:
self.args.prettify = PRETTY_MAP[
'all' if self.env.stdout_isatty else 'none']
elif (self.args.prettify and self.env.is_windows
and self.args.output_file):
self.error('Only terminal output can be colorized on Windows.')
else:
# noinspection PyTypeChecker
self.args.prettify = PRETTY_MAP[self.args.prettify]
def _process_download_options(self):
if self.args.offline:
self.args.download = False
self.args.download_resume = False
return
if not self.args.download:
if self.args.download_resume:
self.error('--continue only works with --download')
if self.args.download_resume and not (
self.args.download and self.args.output_file):
self.error('--continue requires --output to be specified')
def _process_format_options(self):
format_options = self.args.format_options or []
parsed_options = PARSED_DEFAULT_FORMAT_OPTIONS
for options_group in format_options:
parsed_options = parse_format_options(options_group, defaults=parsed_options)
self.args.format_options = parsed_options
|
the-stack_0_5520 | from __future__ import unicode_literals
from django import forms
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.template.defaultfilters import filesizeformat
from django.utils.translation import gettext_lazy as _
from .models import Attachment
def validate_max_size(data):
if (
hasattr(settings, "FILE_UPLOAD_MAX_SIZE")
and data.size > settings.FILE_UPLOAD_MAX_SIZE
):
raise forms.ValidationError(
_("File exceeds maximum size of {size}").format(
size=filesizeformat(settings.FILE_UPLOAD_MAX_SIZE)
)
)
class AttachmentForm(forms.ModelForm):
attachment_file = forms.FileField(
label=_("Upload attachment"), validators=[validate_max_size], max_length=32787
)
class Meta:
model = Attachment
fields = ("attachment_file",)
def save(self, request, obj, *args, **kwargs):
self.instance.creator = request.user
self.instance.content_type = ContentType.objects.get_for_model(obj)
self.instance.object_id = obj.pk
super(AttachmentForm, self).save(*args, **kwargs)
|
the-stack_0_5522 | # ex: set sts=4 ts=4 sw=4 noet:
# -*- coding: utf-8 -*-
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""
"""
from unittest.mock import patch
from datalad.config import ConfigManager
from datalad.distribution.dataset import Dataset
from ..credman import (
CredentialManager,
_get_cred_cfg_var,
)
from datalad.support.keyring_ import MemoryKeyring
from datalad.tests.utils import (
assert_in,
assert_not_in,
assert_raises,
eq_,
neq_,
patch_config,
with_tempfile,
)
def test_credmanager():
# we want all tests to bypass the actual system keyring
with patch('datalad.support.keyring_.keyring', MemoryKeyring()):
check_credmanager()
def check_credmanager():
cfg = ConfigManager()
credman = CredentialManager(cfg)
# doesn't work with thing air
assert_raises(ValueError, credman.get)
eq_(credman.get('donotexiststest'), None)
eq_(credman.get(crazy='empty'), None)
# smoke test for legacy credential retrieval code
eq_(credman.get('donotexiststest', type='user_password'), None)
# does not fiddle with a secret that is readily provided
eq_(credman.get('dummy', secret='mike', _type_hint='token'),
dict(type='token', secret='mike'))
# no instructions what to do, no legacy entry, nothing was changed
# but the secret was written to the keystore
eq_(credman.set('mycred', secret='some'), dict(secret='some'))
# redo but with timestep
setprops = credman.set('lastusedcred', _lastused=True, secret='some')
assert_in('last-used', setprops)
# now re-set, based on the retrieved info, but update the timestamp
setprops_new = credman.set('lastusedcred', _lastused=True,
**credman.get('lastusedcred'))
# must have updated 'last-used'
neq_(setprops['last-used'], setprops_new['last-used'])
# first property store attempt
eq_(credman.set('changed', secret='some', prop='val'),
dict(secret='some', prop='val'))
# second, no changing the secret, but changing the prop, albeit with
# the same value, change report should be empty
eq_(credman.set('changed', prop='val'), dict())
# change secret, with value pulled from config
try:
cfg.set('datalad.credential.changed.secret', 'envsec',
scope='override')
eq_(credman.set('changed', secret=None), dict(secret='envsec'))
finally:
cfg.unset('datalad.credential.changed.secret', scope='override')
# remove non-existing property, secret not report, because unchanged
eq_(credman.set('mycred', dummy=None), dict(dummy=None))
assert_not_in(_get_cred_cfg_var("mycred", "dummy"), cfg)
# set property
eq_(credman.set('mycred', dummy='good', this='that'),
dict(dummy='good', this='that'))
# ensure set
eq_(credman.get('mycred'), dict(dummy='good', this='that', secret='some'))
# remove individual property
eq_(credman.set('mycred', dummy=None), dict(dummy=None))
# ensure removal
eq_(credman.get('mycred'), dict(this='that', secret='some'))
# test full query and constrained query
q = list(credman.query_())
eq_(len(q), 3)
# now query for one of the creds created above
q = list(credman.query_(prop='val'))
eq_(len(q), 1)
eq_(q[0][0], 'changed')
eq_(q[0][1]['prop'], 'val')
# and now a query with no match
q = list(credman.query_(prop='val', funky='town'))
eq_(len(q), 0)
# remove complete credential
credman.remove('mycred')
eq_(credman.get('mycred'), None)
@with_tempfile
def test_credman_local(path):
ds = Dataset(path).create(result_renderer='disabled')
credman = CredentialManager(ds.config)
# deposit a credential into the dataset's config, and die trying to
# remove it
ds.config.set('datalad.credential.stupid.secret', 'really', scope='branch')
assert_raises(RuntimeError, credman.remove, 'stupid')
# but it manages for the local scope
ds.config.set('datalad.credential.notstupid.secret', 'really', scope='local')
credman.remove('notstupid')
def test_query():
# we want all tests to bypass the actual system keyring
with patch('datalad.support.keyring_.keyring', MemoryKeyring()):
check_query()
def check_query():
cfg = ConfigManager()
credman = CredentialManager(cfg)
# set a bunch of credentials with a common realm AND timestamp
for i in range(3):
credman.set(
f'cred.{i}',
_lastused=True,
secret=f'diff{i}',
realm='http://ex.com/login',
)
# now a credential with the common realm, but without a timestamp
credman.set(
'cred.no.time',
_lastused=False,
secret='notime',
realm='http://ex.com/login',
)
# and the most recent one (with timestamp) is an unrelated one
credman.set('unrelated', _lastused=True, secret='unrelated')
# now we want all credentials that match the realm, sorted by
# last-used timestamp -- most recent first
slist = credman.query(realm='http://ex.com/login', _sortby='last-used')
eq_(['cred.2', 'cred.1', 'cred.0', 'cred.no.time'],
[i[0] for i in slist])
# same now, but least recent first, importantly no timestamp stays last
slist = credman.query(realm='http://ex.com/login', _sortby='last-used',
_reverse=False)
eq_(['cred.0', 'cred.1', 'cred.2', 'cred.no.time'],
[i[0] for i in slist])
|
the-stack_0_5524 | #OBSOLETE - This script has been moved to the Jupyter Notebook: OSPO_Project_Health_Data_Tableau.ipynb
from common_functions import augur_db_connect, get_dates, get_commits_by_repo
from common_functions import repo_api_call, fork_archive
from tableau_functions import sustain_prs_by_repo_tableau, contributor_risk_tableau, response_time_tableau, activity_release_tableau
six_months = 180 # Default to one year of data
year = 365 # Default to one year of data
engine = augur_db_connect()
start_date, end_date = get_dates(year)
six_start_date, six_end_date = get_dates(six_months)
commit_threshold = 60 # 90 but use 1500 for testing
repo_list_commits = get_commits_by_repo(six_start_date, six_end_date, engine)
top = repo_list_commits.loc[repo_list_commits['count'] > commit_threshold]
# Testing - Delete this line later
i = 0
for index, repo in top.iterrows():
repo_id = repo['repo_id']
repo_name = repo['repo_name']
repo_path = repo['repo_path']
org_name = repo_path[11:(len(repo_path)-1)]
print('Processing:', org_name, repo_name, repo_path, repo_id, repo['count'])
try:
repo_api = repo_api_call(repo_name, org_name)
except:
print('Cannot process API calls for:', org_name, repo_name, repo_path, repo_id)
is_fork, is_archived = fork_archive(repo_name, org_name, engine)
# Only gather data from repos that aren't forks or archived
if is_fork == False and is_archived == False:
sustain_prs_by_repo_tableau(repo_id, repo_name, org_name, start_date, end_date, engine)
contributor_risk_tableau(repo_id, repo_name, org_name, start_date, end_date, engine)
response_time_tableau(repo_id, repo_name, org_name, start_date, end_date, engine)
activity_release_tableau(repo_name, org_name, start_date, end_date, repo_api)
# Testing - Delete these lines later
if i > 2:
break
else:
i+=1
|
the-stack_0_5525 | examples = [
"""Josephine softens. "Yeah, okay. I probably got a little too worked up there."
A bell chimes in the house.
"Oh, wow. Is it that late? We should be headed to bed if you wanna be up early enough to dig your car out."
"Yeah, I should probably turn in."
"The night's still young. Why don't we stay up a little longer?"
Donald Trump shows up. You realize you've been in simulated White House this whole time.
""",
"Alex softens. Josephine picks up an apple.",
"She walks in beauty, like the night. It snows that night. The rain, the rain. You are free.",
"You decided to meet him on a pub called Le Bon Temps Roule.",
"The Golden Gate Bridge was painted green by Joe Biden."
]
def test_neuralcoref():
"""
"""
import spacy
import neuralcoref
nlp = spacy.load('en')
neuralcoref.add_to_pipe(nlp)
doc = nlp('My sister has a dog. She loves him. Angela lives in Boston. She is quite happy in that city.')
print(f"doc {doc}")
print(f"coref clusters {doc._.coref_clusters}")
for ent in doc.ents:
print(ent._.coref_cluster)
def test_spacy_ner():
"""
PERSON: People, including fictional.
NORP: Nationalities or religious or political groups.
FAC: Buildings, airports, highways, bridges, etc.
ORG: Companies, agencies, institutions, etc.
GPE: Countries, cities, states.
LOC: Non-GPE locations, mountain ranges, bodies of water.
PRODUCT: Objects, vehicles, foods, etc. (Not services.)
EVENT: Named hurricanes, battles, wars, sports events, etc.
WORK_OF_ART: Titles of books, songs, etc.
LAW: Named documents made into laws.
LANGUAGE: Any named language.
DATE: Absolute or relative dates or periods.
TIME: Times smaller than a day.
PERCENT: Percentage, including ”%“.
MONEY: Monetary values, including unit.
QUANTITY: Measurements, as of weight or distance.
ORDINAL: “first”, “second”, etc.
CARDINAL: Numerals that do not fall under another type.
"""
import spacy
nlp = spacy.load('en_core_web_lg')
text = examples[-1]
doc = nlp(text)
print(doc.text)
# for token in doc:
# print(token.text, token.pos_, token.dep_, token.ent_type_)
# print('entities')
# for entity in doc.ents:
# start, end = entity.start, entity.end
# for token in doc[start:end]:
# print(token.text, token.ent_type_)
# for token in doc:
# print(token.text, token.pos_, token.dep_, token.ent_type_)
print('pos_')
pronouns = []
for token in doc:
print(token, token.pos_)
# test_spacy_ner()
def test_bert_huggingface_ner():
from transformers import AutoTokenizer, AutoModelForTokenClassification
from transformers import pipeline
tokenizer = AutoTokenizer.from_pretrained("dslim/bert-base-NER")
model = AutoModelForTokenClassification.from_pretrained("dslim/bert-base-NER")
nlp = pipeline("ner", model=model, tokenizer=tokenizer)
tokens = nlp.tokenizer.tokenize(examples[-1])
print(type(tokens), type(tokens[0]))
print(tokens)
example = examples[-2]
print(example)
ner_results = nlp(example)
print(ner_results)
same_ent_type = lambda x, y: x.split('-')[-1] == y.split('-')[-1]
entities = []
prev_beg = {}
for i, entity in enumerate(ner_results):
prev_entity = ner_results[i - 1] if i > 0 else {}
print(entity['word'], entity['entity'])
if entity['entity'].startswith('B'):
prev_beg = entity
entities.append(prev_beg)
elif entity['entity'].startswith('I'):
if entity['word'].startswith('##'):
word = entity['word'][2:]
else:
word = ' ' + entity['word']
prev_beg['word'] += word
else:
raise Exception("How?")
print([e for e in entities])
# test_bert_huggingface_ner()
def test_ner_in_practice():
from analysis import ner_pipeline, nlp, ner
ner(examples[-1])
# test_ner_in_practice()
def test_semantic_parse():
import spacy
import textacy
nlp = spacy.load('en_core_web_lg')
# https://stackoverflow.com/questions/56896753/is-there-a-way-to-get-entire-constituents-using-spacy
text = "Anna was mad at Alex."
doc = nlp(text)
# print(doc.text)
# for token in doc:
# print(token.text, token.tag_, token.pos_, token.dep_, token.ent_type_, [a for a in token.subtree])
triples = [svo for svo in textacy.extract.subject_verb_object_triples(doc)]
def t(tokens, lemma=True):
# convert a list of tokens into text
return " ".join([x.lemma_ if lemma else x.text for x in tokens])
for subject, verb, object in triples:
s1 = subject[0]
print(type(s1))
print(s1.text, s1.lemma_)
print(f'{t(verb)}({t(subject, False)}, {t(object, False)})')
test_semantic_parse()
# from stanza.server import CoreNLPClient
# stanza.download('en')
# # nlp = stanza.Pipeline('en')
# #
# # doc =
# #
# # for sentence in doc.sentences:
# # print(sentence.ents)
# # print(sentence.dependencies)
#
#
# text = \
# """It's been a while since you've been here, but you quickly find your way. Even after all these years, the path is still a little too familiar to be completely trustworthy. The door creaks open, and you slowly creep inside. You have a slight feeling of deja-vu, as if you've been here before."""
#
# # with CoreNLPClient(annotators=["tokenize","ssplit","pos","lemma","depparse","natlog","openie"], be_quiet=False) as client:
# with CoreNLPClient(annotators=["openie"], be_quiet=False) as client:
# ann = client.annotate(text)
# # print(ann)
# for sentence in ann.sentence:
# for triple in sentence.openieTriple:
# print(triple) |
the-stack_0_5527 | from zeit.cms.content.interfaces import ICommonMetadata
from zeit.cms.interfaces import CONFIG_CACHE
from zeit.cms.interfaces import ITypeDeclaration
from zeit.cms.repository.interfaces import IAutomaticallyRenameable
import collections
import grokcore.component as grok
import logging
import requests
import transaction
import zeit.cms.celery
import zeit.cms.checkout.interfaces
import zeit.cms.interfaces
import zope.lifecycleevent
log = logging.getLogger(__name__)
@grok.subscribe(
zeit.cms.interfaces.ICMSContent,
zeit.cms.checkout.interfaces.IAfterCheckinEvent)
def notify_after_checkin(context, event):
if event.publishing:
return
# XXX Work around redis/ZODB race condition, see BUG-796.
for hook in HOOKS:
notify_webhook.apply_async((context.uniqueId, hook.url), countdown=5)
@grok.subscribe(zope.lifecycleevent.IObjectAddedEvent)
def notify_after_add(event):
context = event.object
if not zeit.cms.interfaces.ICMSContent.providedBy(context):
return
if zeit.cms.repository.interfaces.IRepository.providedBy(context):
return
if zeit.cms.workingcopy.interfaces.IWorkingcopy.providedBy(
event.newParent):
return
for hook in HOOKS:
notify_webhook.delay(context.uniqueId, hook.url)
@zeit.cms.celery.task(bind=True, queuename='webhook')
def notify_webhook(self, uniqueId, url):
content = zeit.cms.interfaces.ICMSContent(uniqueId, None)
if content is None:
log.warning('Could not resolve %s, ignoring.', uniqueId)
return
hook = HOOKS.factory.find(url)
if hook is None:
log.warning('Hook configuration for %s has vanished, ignoring.', url)
return
try:
hook(content)
except TechnicalError as e:
raise self.retry(countdown=e.countdown)
# Don't even think about trying to write to DAV cache, to avoid conflicts.
transaction.abort()
class Hook(object):
def __init__(self, url):
self.url = url
self.excludes = []
def __call__(self, content):
if self.should_exclude(content):
return
log.debug('Notifying %s about %s', self.url, content)
try:
self.deliver(content)
except requests.exceptions.HTTPError as err:
if getattr(err.response, 'status_code', 500) < 500:
raise
else:
log.warning('Webhook %s returned error, retrying',
self.url, exc_info=True)
raise TechnicalError()
except requests.exceptions.RequestException:
log.warning('Webhook %s returned error, retrying',
self.url, exc_info=True)
raise TechnicalError()
def deliver(self, content):
r = requests.post(self.url, json=[content.uniqueId], timeout=10)
r.raise_for_status()
def add_exclude(self, key, value):
self.excludes.append((key, value))
def should_exclude(self, content):
renameable = getattr(
IAutomaticallyRenameable(content, None), 'renameable', False)
if renameable:
return True
for exclude in self.excludes:
if self._matches(exclude, content):
log.debug('Skipping %s, matched exclude %s', content, exclude)
return True
return False
def _matches(self, exclude, content):
key, value = exclude
func = getattr(self, '_match_%s' % key)
return func(content, value)
def _match_type(self, content, value):
typ = getattr(
ITypeDeclaration(content, None), 'type_identifier', 'unknown')
return typ == value
def _match_product(self, content, value):
if not ICommonMetadata.providedBy(content):
return False
return content.product and content.product.id == value
class HookSource(zeit.cms.content.sources.SimpleXMLSource):
config_url = 'checkin-webhook-config'
default_filename = 'checkin-webhooks.xml'
@CONFIG_CACHE.cache_on_arguments()
def _values(self):
result = collections.OrderedDict()
tree = self._get_tree()
for node in tree.iterchildren('webhook'):
hook = Hook(node.get('url'))
for exclude in node.xpath('exclude/*'):
hook.add_exclude(exclude.tag, exclude.text)
result[hook.url] = hook
return result
def getValues(self):
return self._values().values()
def find(self, url):
return self._values().get(url)
HOOKS = HookSource()
class TechnicalError(Exception):
def __init__(self, countdown=60):
self.countdown = countdown
|
the-stack_0_5528 | """ Unit tests for visibility operations
"""
import unittest
import astropy.units as u
import numpy
from astropy.coordinates import SkyCoord
from numpy.testing import assert_allclose
from rascil.data_models.memory_data_models import Skycomponent
from rascil.data_models.polarisation import PolarisationFrame
from rascil.processing_components.imaging import dft_skycomponent_visibility
from rascil.processing_components.simulation import create_named_configuration
from rascil.processing_components import create_flagtable_from_blockvisibility, qa_flagtable, \
create_blockvisibility, create_flagtable_from_rows
class TestFlagTableOperations(unittest.TestCase):
def setUp(self):
self.lowcore = create_named_configuration('LOWBD2-CORE')
self.times = (numpy.pi / 43200.0) * numpy.arange(0.0, 300.0, 30.0)
self.frequency = numpy.linspace(1.0e8, 1.1e8, 3)
self.channel_bandwidth = numpy.array([1e7, 1e7, 1e7])
# Define the component and give it some spectral behaviour
f = numpy.array([100.0, 20.0, -10.0, 1.0])
self.flux = numpy.array([f, 0.8 * f, 0.6 * f])
self.polarisation_frame = PolarisationFrame("linear")
# The phase centre is absolute and the component is specified relative (for now).
# This means that the component should end up at the position phasecentre+compredirection
self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
self.compabsdirection = SkyCoord(ra=+181.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
pcof = self.phasecentre.skyoffset_frame()
self.compreldirection = self.compabsdirection.transform_to(pcof)
self.comp = Skycomponent(direction=self.compreldirection, frequency=self.frequency, flux=self.flux)
def test_create_flagtable(self):
bvis = create_blockvisibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre,
polarisation_frame=self.polarisation_frame,
weight=1.0)
ft = create_flagtable_from_blockvisibility(bvis)
print(ft)
assert len(ft.data) == len(bvis.data)
def test_create_flagtable_from_rows(self):
bvis = create_blockvisibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
polarisation_frame=self.polarisation_frame,
phasecentre=self.phasecentre, weight=1.0)
ft = create_flagtable_from_blockvisibility(bvis)
rows = ft.time > 150.0
ft = create_flagtable_from_blockvisibility(bvis)
selected_ft = create_flagtable_from_rows(ft, rows)
assert len(selected_ft.time) == numpy.sum(numpy.array(rows))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_5530 | #-------------------------------------#
# 对数据集进行训练
#-------------------------------------#
import os
import numpy as np
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from utils.dataloader import yolo_dataset_collate, YoloDataset
from nets.yolo_training import YOLOLoss,Generator
from nets.yolo4 import YoloBody
from tensorboardX import SummaryWriter
from tqdm import tqdm
#---------------------------------------------------#
# 获得类和先验框
#---------------------------------------------------#
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape([-1,3,2])[::-1,:,:]
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epoch,cuda,writer):
global train_tensorboard_step, val_tensorboard_step
total_loss = 0
val_loss = 0
net.train()
with tqdm(total=epoch_size,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_size:
break
images, targets = batch[0], batch[1]
with torch.no_grad():
if cuda:
images = Variable(torch.from_numpy(images).type(torch.FloatTensor)).cuda()
targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
else:
images = Variable(torch.from_numpy(images).type(torch.FloatTensor))
targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
#----------------------#
# 清零梯度
#----------------------#
optimizer.zero_grad()
#----------------------#
# 前向传播
#----------------------#
outputs = net(images)
losses = []
num_pos_all = 0
#----------------------#
# 计算损失
#----------------------#
for i in range(3):
loss_item, num_pos = yolo_losses[i](outputs[i], targets)
losses.append(loss_item)
num_pos_all += num_pos
loss = sum(losses) / num_pos_all
total_loss += loss.item()
#----------------------#
# 反向传播
#----------------------#
loss.backward()
optimizer.step()
# 将loss写入tensorboard,每一步都写
writer.add_scalar('Train_loss', loss, train_tensorboard_step)
train_tensorboard_step += 1
pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1),
'lr' : get_lr(optimizer)})
pbar.update(1)
# 将loss写入tensorboard,下面注释的是每个世代保存一次
# writer.add_scalar('Train_loss', total_loss/(iteration+1), epoch)
net.eval()
print('Start Validation')
with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(genval):
if iteration >= epoch_size_val:
break
images_val, targets_val = batch[0], batch[1]
with torch.no_grad():
if cuda:
images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor)).cuda()
targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]
else:
images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor))
targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]
optimizer.zero_grad()
outputs = net(images_val)
losses = []
num_pos_all = 0
for i in range(3):
loss_item, num_pos = yolo_losses[i](outputs[i], targets_val)
losses.append(loss_item)
num_pos_all += num_pos
loss = sum(losses) / num_pos_all
val_loss += loss.item()
# 将loss写入tensorboard, 下面注释的是每一步都写
# writer.add_scalar('Val_loss', loss, val_tensorboard_step)
# val_tensorboard_step += 1
pbar.set_postfix(**{'total_loss': val_loss / (iteration + 1)})
pbar.update(1)
# 将loss写入tensorboard,每个世代保存一次
writer.add_scalar('Val_loss',val_loss / (epoch_size_val+1), epoch)
print('Finish Validation')
print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))
print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))
print('Saving state, iter:', str(epoch+1))
torch.save(model.state_dict(), 'logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth'%((epoch+1),total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))
if __name__ == "__main__":
#-------------------------------#
# 是否使用Cuda
# 没有GPU可以设置成False
#-------------------------------#
Cuda = False
#-------------------------------#
# Dataloder的使用
#-------------------------------#
Use_Data_Loader = True
#------------------------------------------------------#
# 是否对损失进行归一化,用于改变loss的大小
# 用于决定计算最终loss是除上batch_size还是除上正样本数量
#------------------------------------------------------#
normalize = False
#-------------------------------#
# 输入的shape大小
# 显存比较小可以使用416x416
# 显存比较大可以使用608x608
#-------------------------------#
input_shape = (416,416)
#----------------------------------------------------#
# classes和anchor的路径,非常重要
# 训练前一定要修改classes_path,使其对应自己的数据集
#----------------------------------------------------#
anchors_path = 'model_data/yolo_anchors.txt'
classes_path = 'model_data/voc_classes.txt'
#----------------------------------------------------#
# 获取classes和anchor
#----------------------------------------------------#
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
num_classes = len(class_names)
#------------------------------------------------------#
# Yolov4的tricks应用
# mosaic 马赛克数据增强 True or False
# 实际测试时mosaic数据增强并不稳定,所以默认为False
# Cosine_scheduler 余弦退火学习率 True or False
# label_smoothing 标签平滑 0.01以下一般 如0.01、0.005
#------------------------------------------------------#
mosaic = False
Cosine_lr = False
smoooth_label = 0
#------------------------------------------------------#
# 创建yolo模型
# 训练前一定要修改classes_path和对应的txt文件
#------------------------------------------------------#
model = YoloBody(len(anchors[0]), num_classes)
#------------------------------------------------------#
# 权值文件请看README,百度网盘下载
#------------------------------------------------------#
model_path = "model_data/Epoch100-Total_Loss6.4410-Val_Loss8.7225.pth"
print('Loading weights into state dict...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_dict = model.state_dict()
pretrained_dict = torch.load(model_path, map_location=device)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
print('Finished!')
net = model.train()
if Cuda:
net = torch.nn.DataParallel(model)
cudnn.benchmark = True
net = net.cuda()
# 建立loss函数
yolo_losses = []
for i in range(3):
yolo_losses.append(YOLOLoss(np.reshape(anchors,[-1,2]),num_classes, \
(input_shape[1], input_shape[0]), smoooth_label, Cuda, normalize))
#----------------------------------------------------#
# 获得图片路径和标签
#----------------------------------------------------#
annotation_path = '2007_train.txt'
#----------------------------------------------------------------------#
# 验证集的划分在train.py代码里面进行
# 2007_test.txt和2007_val.txt里面没有内容是正常的。训练不会使用到。
# 当前划分方式下,验证集和训练集的比例为1:9
#----------------------------------------------------------------------#
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
writer = SummaryWriter(log_dir='logs',flush_secs=60)
if Cuda:
graph_inputs = torch.from_numpy(np.random.rand(1,3,input_shape[0],input_shape[1])).type(torch.FloatTensor).cuda()
else:
graph_inputs = torch.from_numpy(np.random.rand(1,3,input_shape[0],input_shape[1])).type(torch.FloatTensor)
writer.add_graph(model, (graph_inputs,))
#------------------------------------------------------#
# 主干特征提取网络特征通用,冻结训练可以加快训练速度
# 也可以在训练初期防止权值被破坏。
# Init_Epoch为起始世代
# Freeze_Epoch为冻结训练的世代
# Epoch总训练世代
# 提示OOM或者显存不足请调小Batch_size
#------------------------------------------------------#
train_tensorboard_step = 1
val_tensorboard_step = 1
if True:
lr = 1e-3
Batch_size = 4
Init_Epoch = 0
Freeze_Epoch = 50
#----------------------------------------------------------------------------#
# 我在实际测试时,发现optimizer的weight_decay起到了反作用,
# 所以去除掉了weight_decay,大家也可以开起来试试,一般是weight_decay=5e-4
#----------------------------------------------------------------------------#
optimizer = optim.Adam(net.parameters(),lr)
if Cosine_lr:
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.92)
if Use_Data_Loader:
train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic, is_train=True)
val_dataset = YoloDataset(lines[num_train:], (input_shape[0], input_shape[1]), mosaic=False, is_train=False)
gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4,pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
else:
gen = Generator(Batch_size, lines[:num_train],
(input_shape[0], input_shape[1])).generate(train=True, mosaic = mosaic)
gen_val = Generator(Batch_size, lines[num_train:],
(input_shape[0], input_shape[1])).generate(train=False, mosaic = False)
epoch_size = max(1, num_train//Batch_size)
epoch_size_val = num_val//Batch_size
#------------------------------------#
# 冻结一定部分训练
#------------------------------------#
for param in model.backbone.parameters():
param.requires_grad = False
for epoch in range(Init_Epoch,Freeze_Epoch):
fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,gen_val,Freeze_Epoch,Cuda,writer)
lr_scheduler.step()
if True:
lr = 1e-4
Batch_size = 2
Freeze_Epoch = 50
Unfreeze_Epoch = 100
#----------------------------------------------------------------------------#
# 我在实际测试时,发现optimizer的weight_decay起到了反作用,
# 所以去除掉了weight_decay,大家也可以开起来试试,一般是weight_decay=5e-4
#----------------------------------------------------------------------------#
optimizer = optim.Adam(net.parameters(),lr)
if Cosine_lr:
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.92)
if Use_Data_Loader:
train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic, is_train=True)
val_dataset = YoloDataset(lines[num_train:], (input_shape[0], input_shape[1]), mosaic=False, is_train=False)
gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4,pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
else:
gen = Generator(Batch_size, lines[:num_train],
(input_shape[0], input_shape[1])).generate(train=True, mosaic = mosaic)
gen_val = Generator(Batch_size, lines[num_train:],
(input_shape[0], input_shape[1])).generate(train=False, mosaic = False)
epoch_size = max(1, num_train//Batch_size)
epoch_size_val = num_val//Batch_size
#------------------------------------#
# 解冻后训练
#------------------------------------#
for param in model.backbone.parameters():
param.requires_grad = True
for epoch in range(Freeze_Epoch,Unfreeze_Epoch):
fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,gen_val,Unfreeze_Epoch,Cuda,writer)
lr_scheduler.step()
|
the-stack_0_5532 | import boto3
import os
import json
import datetime
from time import gmtime, strftime
from boto3.session import Session
region = boto3.session.Session().region_name
sagemaker = boto3.client('sagemaker')
code_pipeline = boto3.client('codepipeline')
def lambda_handler(event, context):
try:
print(event)
train_start = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
train_start_calc = datetime.datetime.now()
codepipeline_job = event['CodePipeline.job']['id']
print('[INFO]CODEPIPELINE_JOB:', codepipeline_job)
print('[INFO]TRAIN_START:', train_start)
userParamText = event['CodePipeline.job']['data']['actionConfiguration']['configuration']['UserParameters']
user_param = json.loads(userParamText)
job_name = 'mlops-bia-xgboost-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print('[INFO]TRAINING_JOB_NAME:', job_name)
event['job_name'] = job_name
event['stage'] = 'Training'
event['status'] = 'InProgress'
event['message'] = 'training job "{} started."'.format(job_name)
create_training_job(user_param, job_name)
write_job_info_s3(event)
put_job_success(event)
except Exception as e:
print(e)
print('[ERROR] Unable to create training job.')
event['message'] = str(e)
put_job_failure(event)
return event
def create_training_job(user_param, job_name):
try:
print("[INFO]CODEPIPELINE_USER_PARAMETERS:", user_param)
# Environment variable containing S3 bucket for storing the model artifact
model_artifact_bucket = os.environ['ModelArtifactBucket']
print("[INFO]MODEL_ARTIFACT_BUCKET:", model_artifact_bucket)
# Environment variable containing S3 bucket containing training data
data_bucket = os.environ['S3DataBucket']
print("[INFO]TRAINING_DATA_BUCKET:", data_bucket)
# Role to pass to SageMaker training job that has access to training data in S3, etc
SageMakerRole = os.environ['SageMakerExecutionRole']
#Get ECR information for BIA
algo_version = user_param['Algorithm']
ecr_path = os.environ['AlgoECR']
container_path = ecr_path + '/' + algo_version
print('[INFO]Container Path', container_path)
train_instance_type = user_param['traincompute']
train_volume_size = user_param['traininstancevolumesize']
train_instance_count = user_param['traininstancecount']
maxdepth_in = user_param['MaxDepth']
eta_in = user_param['eta']
gamma_in = user_param['gamma']
min_child_weight_in = user_param['MinChildWeight']
subsample_in = user_param['SubSample']
silent_in = user_param['Silent']
objective_in = user_param['Objective']
num_round_in = user_param['NumRound']
print('[INFO]TRAIN_INSTANCE_TYPE:', train_instance_type)
print('[INFO]TRAIN_VOLUME_SIZE:', train_volume_size)
print('[INFO]TRAIN_INSTANCE_COUNT:', train_instance_count)
create_training_params = \
{
"AlgorithmSpecification": {
"TrainingImage": container_path,
"TrainingInputMode": "File"
},
"RoleArn": SageMakerRole,
"OutputDataConfig": {
"S3OutputPath": "s3://{}/{}/output".format(model_artifact_bucket, job_name)
},
"ResourceConfig": {
"InstanceCount": train_instance_count,
"InstanceType": train_instance_type,
"VolumeSizeInGB": train_volume_size
},
"TrainingJobName": job_name,
"HyperParameters": {
"max_depth": maxdepth_in,
"eta": eta_in,
"gamma": gamma_in,
"min_child_weight": min_child_weight_in,
"objective": objective_in,
"num_round": num_round_in
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 3600
},
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/train".format(data_bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "csv",
"CompressionType": "None"
}
],
"OutputDataConfig": {
"S3OutputPath": "s3://{}/{}/output".format(model_artifact_bucket, job_name)
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 60 * 60
}
}
response = sagemaker.create_training_job(**create_training_params)
except Exception as e:
print(str(e))
raise(e)
def write_job_info_s3(event):
print(event)
objectKey = event['CodePipeline.job']['data']['outputArtifacts'][0]['location']['s3Location']['objectKey']
bucketname = event['CodePipeline.job']['data']['outputArtifacts'][0]['location']['s3Location']['bucketName']
artifactCredentials = event['CodePipeline.job']['data']['artifactCredentials']
artifactName = event['CodePipeline.job']['data']['outputArtifacts'][0]['name']
# S3 Managed Key for Encryption
S3SSEKey = os.environ['SSEKMSKeyIdIn']
json_data = json.dumps(event)
print(json_data)
session = Session(aws_access_key_id=artifactCredentials['accessKeyId'],
aws_secret_access_key=artifactCredentials['secretAccessKey'],
aws_session_token=artifactCredentials['sessionToken'])
s3 = session.resource("s3")
object = s3.Object(bucketname, objectKey)
print(object)
object.put(Body=json_data, ServerSideEncryption='aws:kms', SSEKMSKeyId=S3SSEKey)
print('[SUCCESS]Job Information Written to S3')
def put_job_success(event):
print('[SUCCESS]Training Job started - kicking off next stage in pipeline...')
print(event['message'])
code_pipeline.put_job_success_result(jobId=event['CodePipeline.job']['id'])
def put_job_failure(event):
print('[FAILURE]Putting job failure')
print(event['message'])
code_pipeline.put_job_failure_result(jobId=event['CodePipeline.job']['id'], failureDetails={'message': event['message'], 'type': 'JobFailed'})
return event
|
the-stack_0_5533 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@version: 1.0
@author: Evan
@time: 2019/12/4 15:16
"""
import tornado.web
import tornado.ioloop
class CookieHandler(tornado.web.RequestHandler):
def get(self):
"""
cookie 在 Response Headers
Set-Cookie: hello="2|1:0|10:1575445821|5:hello|8:RXZhbg==|c6eb04740d9320d33053b28cb0ea8a799f17b26950869fe22309671ccec57513"; expires=Fri, 03 Jan 2020 07:50:21 GMT; Path=/
:return:
"""
# self.set_cookie('username', 'admin', expires_days=3)
self.set_secure_cookie('hello', 'Evan')
class GetCookieHandler(tornado.web.RequestHandler):
def get(self):
"""
cookie 在 Request Headers
Cookie: hello="2|1:0|10:1575445307|5:hello|8:RXZhbg==|b062aa734378e7a3177e8626d66acee4b52b4dc4df1293c20eb926d25824607e"
"""
# username = self.get_cookie('username')
username = self.get_secure_cookie('hello')
self.write(username)
settings = {
'cookie_secret': 'asd123fgh'
}
app = tornado.web.Application([
(r'^/$', CookieHandler),
(r'^/getCookie/$', GetCookieHandler)
], **settings)
app.listen(8000)
tornado.ioloop.IOLoop.instance().start()
|
the-stack_0_5537 | from typing import Callable, Mapping
import pandas as pd
from starfish.core.intensity_table.intensity_table import IntensityTable
from starfish.core.types import (
Axes,
Features,
SpotAttributes,
SpotFindingResults,
TraceBuildingStrategies
)
from .util import _build_intensity_table, _match_spots, _merge_spots_by_round
def build_spot_traces_exact_match(spot_results: SpotFindingResults, **kwargs) -> IntensityTable:
"""
Combines spots found in matching x/y positions across rounds and channels of
an ImageStack into traces represented as an IntensityTable.
Parameters
-----------
spot_results: SpotFindingResults
Spots found across rounds/channels of an ImageStack
"""
# create IntensityTable with same x/y/z info accross all r/ch
spot_attributes = list(spot_results.values())[0].spot_attrs
intensity_table = IntensityTable.zeros(
spot_attributes=spot_attributes,
round_labels=spot_results.round_labels,
ch_labels=spot_results.ch_labels,
)
for r, c in spot_results.keys():
value = spot_results[{Axes.ROUND: r, Axes.CH: c}].spot_attrs.data[Features.INTENSITY]
# if no exact match set value to 0
value = 0 if value.empty else value
intensity_table.loc[dict(c=c, r=r)] = value
return intensity_table
def build_traces_sequential(spot_results: SpotFindingResults, **kwargs) -> IntensityTable:
"""
Build spot traces without merging across channels and imaging rounds. Used for sequential
methods like smFIsh.
Parameters
----------
spot_results: SpotFindingResults
Spots found across rounds/channels of an ImageStack
Returns
-------
IntensityTable :
concatenated input SpotAttributes, converted to an IntensityTable object
"""
all_spots = pd.concat([sa.spot_attrs.data for sa in spot_results.values()],
ignore_index=True, sort=True)
# reassign spot_ids to index number so they are unique
all_spots['spot_id'] = all_spots.index
intensity_table = IntensityTable.zeros(
spot_attributes=SpotAttributes(all_spots),
ch_labels=spot_results.ch_labels,
round_labels=spot_results.round_labels,
)
i = 0
for (r, c), spot_attrs in spot_results.items():
for _, row in spot_attrs.spot_attrs.data.iterrows():
selector = dict(features=i, c=c, r=r)
intensity_table.loc[selector] = row[Features.INTENSITY]
i += 1
return intensity_table
def build_traces_nearest_neighbors(spot_results: SpotFindingResults, anchor_round: int=0,
search_radius: int=3):
"""
Combine spots found across round and channels of an ImageStack using a nearest neighbors
strategy
Parameters
-----------
spot_results : SpotFindingResults
Spots found across rounds/channels of an ImageStack
anchor_round : int
The imaging round against which other rounds will be checked for spots in the same
approximate pixel location.
search_radius : int
Number of pixels over which to search for spots in other rounds and channels.
"""
per_round_spot_results = _merge_spots_by_round(spot_results)
distances, indices = _match_spots(
per_round_spot_results,
anchor_round=anchor_round
)
intensity_table = _build_intensity_table(
per_round_spot_results, distances, indices,
rounds=spot_results.round_labels,
channels=spot_results.ch_labels,
search_radius=search_radius,
anchor_round=anchor_round
)
return intensity_table
TRACE_BUILDERS: Mapping[TraceBuildingStrategies, Callable] = {
TraceBuildingStrategies.EXACT_MATCH: build_spot_traces_exact_match,
TraceBuildingStrategies.NEAREST_NEIGHBOR: build_traces_nearest_neighbors,
TraceBuildingStrategies.SEQUENTIAL: build_traces_sequential,
}
|
the-stack_0_5538 | """Useful utilities for interacting with Evergreen."""
from datetime import datetime, date
from typing import Any, Iterable, Optional
from dateutil.parser import parse
EVG_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
EVG_SHORT_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
EVG_DATE_FORMAT = "%Y-%m-%d"
EVG_DATE_INPUT_FORMAT = '"%Y-%m-%dT%H:%M:%S.000Z"'
def parse_evergreen_datetime(evg_date: Optional[Any]) -> Optional[datetime]:
"""
Convert an evergreen datetime string into a datetime object.
:param evg_date: String to convert to a datetime.
:return: datetime version of date.
"""
if not evg_date:
return None
if type(evg_date) in [int, float]:
return datetime.fromtimestamp(evg_date)
return parse(evg_date)
def parse_evergreen_short_datetime(evg_date: Optional[str]) -> Optional[datetime]:
"""
Convert an evergreen datetime string into a datetime object.
:param evg_date: String to convert to a datetime.
:return: datetime version of date.
"""
if not evg_date:
return None
return datetime.strptime(evg_date, EVG_SHORT_DATETIME_FORMAT)
def format_evergreen_datetime(when: datetime) -> str:
"""
Convert a datetime object into an evergreen consumable string.
:param when: datetime to convert.
:return: string evergreen can understand.
"""
return when.strftime(EVG_DATE_INPUT_FORMAT)
def evergreen_input_to_output(input_date: str) -> str:
"""
Convert a date from evergreen to a date to send back to evergreen.
:param input_date: date to convert.
:return: date to send to evergreen.
"""
intermediate = parse_evergreen_datetime(input_date)
if intermediate:
return format_evergreen_datetime(intermediate)
return input_date
def parse_evergreen_date(evg_date: Optional[str]) -> Optional[date]:
"""
Convert an evergreen date string into a date object.
:param evg_date: String to convert to a date.
:return: date version of date.
"""
if not evg_date:
return None
return datetime.strptime(evg_date, EVG_DATE_FORMAT).date()
def iterate_by_time_window(
iterator: Iterable, before: datetime, after: datetime, time_attr: str
) -> Iterable:
"""
Iterate over a window of time.
For a given iterator, generate the items that are within the specified time window.
Note: Since most evergreen iterators start with the most recent items and then look backwards
in time, `start` and `end` refer to the start and end of when items will be seen (i.e. `start`
should be later in time than `end` since we will start seeing new items first.
:param iterator: Iterator to window.
:param before: Return items earlier than this timestamp.
:param after: Return items later than this timestamp.
:param time_attr: Attribute of items in the iterator containing timestamp to check.
:return: Iterator for items in the given time window.
"""
for item in iterator:
item_time = getattr(item, time_attr)
if item_time > before:
continue
if item_time < after:
break
yield item
|
the-stack_0_5539 | import argparse
from datetime import datetime, timedelta
import praw
from prettytable import PrettyTable
from psaw import PushshiftAPI
import config_terminal as cfg
from helper_funcs import check_positive
# -------------------------------------------------------------------------------------------------------------------
def due_diligence(l_args, s_ticker):
parser = argparse.ArgumentParser(prog='red',
description="""Print top stock's due diligence from other users. [Source: Reddit] """)
parser.add_argument('-l', "--limit", action="store", dest="n_limit", type=check_positive, default=5,
help='limit of posts to retrieve.')
parser.add_argument('-d', "--days", action="store", dest="n_days", type=check_positive, default=3,
help="number of prior days to look for.")
parser.add_argument('-a', "--all", action="store_true", dest="b_all", default=False,
help="""search through all flairs (apart from Yolo and Meme), otherwise we focus on
specific flairs: DD, technical analysis, Catalyst, News, Advice, Chart """)
(ns_parser, l_unknown_args) = parser.parse_known_args(l_args)
if l_unknown_args:
print(f"The following args couldn't be interpreted: {l_unknown_args}\n")
return
praw_api = praw.Reddit(client_id=cfg.API_REDDIT_CLIENT_ID,
client_secret=cfg.API_REDDIT_CLIENT_SECRET,
username=cfg.API_REDDIT_USERNAME,
user_agent=cfg.API_REDDIT_USER_AGENT,
password=cfg.API_REDDIT_PASSWORD)
psaw_api = PushshiftAPI()
n_ts_after = int((datetime.today() - timedelta(days=ns_parser.n_days)).timestamp())
l_flair_text = ['DD', 'technical analysis', 'Catalyst', 'News', 'Advice', 'Chart']
l_sub_reddits = ['pennystocks', 'RobinHoodPennyStocks', 'Daytrading', 'StockMarket', 'stocks', 'investing', 'wallstreetbets']
submissions = psaw_api.search_submissions(after=int(n_ts_after),
subreddit=l_sub_reddits,
q=s_ticker,
filter=['id'])
d_submission = {}
n_flair_posts_found = 0
while True:
submission = next(submissions, None)
if submission:
# Get more information about post using PRAW api
submission = praw_api.submission(id=submission.id)
# Ensure that the post hasn't been removed in the meanwhile
if not submission.removed_by_category:
# Either just filter out Yolo, and Meme flairs, or focus on DD, based on b_DD flag
if (submission.link_flair_text in l_flair_text,
submission.link_flair_text not in ['Yolo', 'Meme'])[ns_parser.b_all]:
# Refactor data
s_datetime = datetime.utcfromtimestamp(submission.created_utc).strftime("%d/%m/%Y %H:%M:%S")
s_link = f"https://www.reddit.com{submission.permalink}"
s_all_awards = ""
for award in submission.all_awardings:
s_all_awards += f"{award['count']} {award['name']}\n"
s_all_awards = s_all_awards[:-2]
# Create dictionary with data to construct dataframe allows to save data
d_submission[submission.id] = {
'created_utc': s_datetime,
'subreddit': submission.subreddit,
'link_flair_text': submission.link_flair_text,
'title':submission.title,
'score': submission.score,
'link': s_link,
'num_comments': submission.num_comments,
'upvote_ratio': submission.upvote_ratio,
'awards': s_all_awards
}
# Print post data collected so far
print(f"{s_datetime} - {submission.title}")
print(f"{s_link}")
t_post = PrettyTable(['Subreddit', 'Flair', 'Score', '# Comments', 'Upvote %', "Awards"])
t_post.add_row([submission.subreddit, submission.link_flair_text, submission.score,
submission.num_comments,f"{round(100*submission.upvote_ratio)}%", s_all_awards])
print(t_post)
print("\n")
# If needed, submission.comments could give us the top comments
# Increment count of valid posts found
n_flair_posts_found += 1
# Check if number of wanted posts found has been reached
if n_flair_posts_found > ns_parser.n_limit-1:
break
# Check if search_submissions didn't get anymore posts
else:
break
print(f"{('No more posts with specified requirements found.', '')[n_flair_posts_found > ns_parser.n_limit-1]}")
# Create df with found data. Useful for saving all info in excel file.
#df_submissions = pd.DataFrame.from_dict(d_submission, orient='index', columns=list(d_submission[next(iter(d_submission.keys()))].keys()))
#df_submissions.sort_values(by=['created_utc'], inplace=True, ascending=True)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.