blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
08260b91313501752e2e3f0567b2f9abe58b6278 | 9d1ef7993bf0df9967b1e7a79d5913fbc3e3a7e1 | /tests/teststatistics.py | a34c3440109210c279d13e5d7aa17a063019754c | [
"BSD-2-Clause"
] | permissive | mitmedialab/WhatWeWatch-Analysis | f6f4fbd8fba4ef6a58f4961c7f3d9b9519dae3a4 | cc01dee4e77155c8aec7638e4275172053db3247 | refs/heads/master | 2021-05-28T05:40:36.678808 | 2014-11-03T01:22:26 | 2014-11-03T01:22:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | import inspect
import os
import sys
import unittest
import numpy as np
import numpy.testing as nptest
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import statistics
import stubs
import util
class StatisticsTest(unittest.TestCase):
def setUp(self):
self.video_data = util.VideoData(stubs.raw)
self.spread_span = statistics.SpreadSpan(self.video_data)
def test_span(self):
self.assertEqual(self.spread_span.span_by_vid, stubs.span_by_vid)
def test_spread(self):
self.assertEqual(self.spread_span.spread_by_vid, stubs.spread_by_vid)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
c203dcbcd0e62de00ecb30750a4efed7d09106dc | fec70d89f651a26bbb4d706e264a5cd28ce12c06 | /convert2jpg.py | f160b1e136686d2735fd0f841528870313264919 | [] | no_license | UpCoder/Pixel_Link | ab54e5505175f6e25cfb94688869953a5236239a | c807d316c0b36280347045c25c494ea6c63c5f11 | refs/heads/master | 2020-04-01T22:56:30.560113 | 2018-10-27T09:25:01 | 2018-10-27T09:25:01 | 153,733,782 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,558 | py | # -*- coding=utf-8 -*-
import os
from glob import glob
from datasets.medicalImage import read_mhd_image, fill_region
from config import type2pixel, pixel2type
import numpy as np
from xml.dom.minidom import Document
import cv2
image_suffix_name = 'jpg'
def LiverLesionDetection_Iterator(image_dir, execute_func, *parameters):
'''
遍历MICCAI2018文件夹的框架
:param execute_func:
:return:
'''
for sub_name in ['train', 'val', 'test']:
names = os.listdir(os.path.join(image_dir, sub_name))
for name in names:
cur_slice_dir = os.path.join(image_dir, sub_name, name)
execute_func(cur_slice_dir, *parameters)
def extract_bboxs_mask_from_mask(mask_image, tumor_types):
mask_image = mask_image[1, :, :]
w, h = np.shape(mask_image)
if w != 512 or h != 512:
print(np.shape(mask_image))
assert False
with open(tumor_types, 'r') as f:
lines = f.readlines()
idx2names = {}
for line in lines:
line = line[:-1]
idx, name = line.split(' ')
idx2names[idx] = name
maximum = np.max(mask_image)
min_xs = []
min_ys = []
max_xs = []
max_ys = []
names = []
res_mask = np.zeros_like(mask_image)
for i in range(1, maximum + 1):
cur_mask_image = np.asarray(mask_image == i, np.uint8)
if np.sum(cur_mask_image) == 0:
continue
filled_mask = fill_region(cur_mask_image)
filled_mask[filled_mask == 1] = type2pixel[idx2names[str(i)]][1]
res_mask[filled_mask != 0] = filled_mask[filled_mask != 0]
xs, ys = np.where(cur_mask_image == 1)
min_x = np.min(xs)
min_y = np.min(ys)
max_x = np.max(xs)
max_y = np.max(ys)
min_xs.append(min_x)
min_ys.append(min_y)
max_xs.append(max_x)
max_ys.append(max_y)
names.append(idx2names[str(i)])
return min_xs, min_ys, max_xs, max_ys, names, res_mask
def extract_bboxs_from_mask(mask_image, tumor_types):
mask_image = mask_image[1, :, :]
w, h = np.shape(mask_image)
if w != 512 or h != 512:
print(np.shape(mask_image))
assert False
with open(tumor_types, 'r') as f:
lines = f.readlines()
idx2names = {}
for line in lines:
line = line[:-1]
idx, name = line.split(' ')
idx2names[idx] = name
maximum = np.max(mask_image)
min_xs = []
min_ys = []
max_xs = []
max_ys = []
names = []
for i in range(1, maximum + 1):
cur_mask_image = np.asarray(mask_image == i, np.uint8)
if np.sum(cur_mask_image) == 0:
continue
xs, ys = np.where(cur_mask_image == 1)
min_x = np.min(xs)
min_y = np.min(ys)
max_x = np.max(xs)
max_y = np.max(ys)
min_xs.append(min_x)
min_ys.append(min_y)
max_xs.append(max_x)
max_ys.append(max_y)
names.append(idx2names[str(i)])
return min_xs, min_ys, max_xs, max_ys, names
def dicom2jpg_singlephase(slice_dir, save_dir, phase_name='PV'):
'''
前置条件:已经将dicom格式的数据转为成MHD格式,并且已经提出了slice,一个mhd文件只包含了三个slice
将单个phase的mhd转化为jpg格式
:param slice_dir:
:param save_dir:
:param phase_name:
:return:
'''
mhd_image_path = os.path.join(slice_dir, 'Image_' + phase_name + '.mhd')
mhd_mask_path = os.path.join(slice_dir, 'Mask_' + phase_name + '.mhd')
mhd_image = read_mhd_image(mhd_image_path)
mask_image = read_mhd_image(mhd_mask_path)
mhd_image = np.asarray(np.squeeze(mhd_image), np.float32)
mhd_image = np.transpose(mhd_image, axes=[1, 2, 0])
# mhd_image = np.expand_dims(mhd_image, axis=2)
# mhd_image = np.concatenate([mhd_image, mhd_image, mhd_image], axis=2)
mask_image = np.asarray(np.squeeze(mask_image), np.uint8)
max_v = 300.
min_v = -350.
mhd_image[mhd_image > max_v] = max_v
mhd_image[mhd_image < min_v] = min_v
print(np.mean(mhd_image, dtype=np.float32))
mhd_image -= np.mean(mhd_image)
min_v = np.min(mhd_image)
max_v = np.max(mhd_image)
interv = max_v - min_v
mhd_image = (mhd_image - min_v) / interv
file_name = os.path.basename(slice_dir)
dataset_name = os.path.basename(os.path.dirname(slice_dir))
save_path = os.path.join(save_dir, phase_name, dataset_name, file_name+'.jpg')
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
print('the shape of mhd_image is ', np.shape(mhd_image), np.min(mhd_image), np.max(mhd_image))
print('the shape of mask_image is ', np.shape(mask_image))
_, _, depth_image= np.shape(mhd_image)
if depth_image == 2:
mhd_image = np.concatenate(
[mhd_image, np.expand_dims(mhd_image[:, :, np.argmax(np.sum(np.sum(mask_image, axis=1), axis=1))], axis=2)],
axis=2
)
print('Error')
cv2.imwrite(save_path, mhd_image * 255)
xml_save_dir = os.path.join(save_dir, phase_name, dataset_name+'_xml')
if not os.path.exists(xml_save_dir):
os.makedirs(xml_save_dir)
evulate_gt_dir = os.path.join(save_dir, phase_name, dataset_name+'_gt')
if not os.path.exists(evulate_gt_dir):
os.makedirs(evulate_gt_dir)
xml_save_path = os.path.join(xml_save_dir, file_name + '.xml')
gt_save_path = os.path.join(evulate_gt_dir, file_name + '.txt') # for evulate
doc = Document()
root_node = doc.createElement('annotation')
doc.appendChild(root_node)
folder_name = os.path.basename(save_dir) + '/' + phase_name
folder_node = doc.createElement('folder')
root_node.appendChild(folder_node)
folder_txt_node = doc.createTextNode(folder_name)
folder_node.appendChild(folder_txt_node)
file_name = file_name + '.jpg'
filename_node = doc.createElement('filename')
root_node.appendChild(filename_node)
filename_txt_node = doc.createTextNode(file_name)
filename_node.appendChild(filename_txt_node)
shape = list(np.shape(mhd_image))
size_node = doc.createElement('size')
root_node.appendChild(size_node)
width_node = doc.createElement('width')
width_node.appendChild(doc.createTextNode(str(shape[0])))
height_node = doc.createElement('height')
height_node.appendChild(doc.createTextNode(str(shape[1])))
depth_node = doc.createElement('depth')
depth_node.appendChild(doc.createTextNode(str(3)))
size_node.appendChild(width_node)
size_node.appendChild(height_node)
size_node.appendChild(depth_node)
# mask_image[mask_image != 1] = 0
# xs, ys = np.where(mask_image == 1)
# min_x = np.min(xs)
# min_y = np.min(ys)
# max_x = np.max(xs)
# max_y = np.max(ys)
min_xs, min_ys, max_xs, max_ys, names = extract_bboxs_from_mask(mask_image, os.path.join(slice_dir, 'tumor_types'))
lines = []
for min_x, min_y, max_x, max_y, name in zip(min_xs, min_ys, max_xs, max_ys, names):
object_node = doc.createElement('object')
root_node.appendChild(object_node)
name_node = doc.createElement('name')
name_node.appendChild(doc.createTextNode(name))
object_node.appendChild(name_node)
truncated_node = doc.createElement('truncated')
object_node.appendChild(truncated_node)
truncated_node.appendChild(doc.createTextNode('0'))
difficult_node = doc.createElement('difficult')
object_node.appendChild(difficult_node)
difficult_node.appendChild(doc.createTextNode('0'))
bndbox_node = doc.createElement('bndbox')
object_node.appendChild(bndbox_node)
xmin_node = doc.createElement('xmin')
xmin_node.appendChild(doc.createTextNode(str(min_y)))
bndbox_node.appendChild(xmin_node)
ymin_node = doc.createElement('ymin')
ymin_node.appendChild(doc.createTextNode(str(min_x)))
bndbox_node.appendChild(ymin_node)
xmax_node = doc.createElement('xmax')
xmax_node.appendChild(doc.createTextNode(str(max_y)))
bndbox_node.appendChild(xmax_node)
ymax_node = doc.createElement('ymax')
ymax_node.appendChild(doc.createTextNode(str(max_x)))
bndbox_node.appendChild(ymax_node)
line = '%s %d %d %d %d\n' % (name, min_y, min_x, max_y, max_x)
print(line)
lines.append(line)
with open(xml_save_path, 'wb') as f:
f.write(doc.toprettyxml(indent='\t', encoding='utf-8'))
with open(gt_save_path, 'w') as f:
f.writelines(lines)
f.close()
def dicom2jpg_multiphase(slice_dir, save_dir, phase_names, target_phase_name):
'''
前置条件:已经将dicom格式的数据转为成MHD格式,并且已经提出了slice,一个mhd文件只包含了三个slice
针对每个phase只是用一个slice
:param slice_dir:
:param save_dir:
:param phase_names:
:param target_phase_name:
:return:
'''
total_phase_name = ''.join(phase_names)
target_phase_mask = None
mhd_images = []
for phase_name in phase_names:
mhd_image_path = os.path.join(slice_dir, 'Image_' + phase_name + '.mhd')
mhd_mask_path = os.path.join(slice_dir, 'Mask_' + phase_name + '.mhd')
mhd_image = read_mhd_image(mhd_image_path)
mask_image = read_mhd_image(mhd_mask_path)
mhd_image = np.asarray(np.squeeze(mhd_image), np.float32)
mhd_image = np.transpose(mhd_image, axes=[1, 2, 0])
if phase_name == target_phase_name:
target_phase_mask = mask_image
mhd_images.append(mhd_image)
# mhd_image = np.expand_dims(mhd_image, axis=2)
# mhd_image = np.concatenate([mhd_image, mhd_image, mhd_image], axis=2)
mhd_image = np.concatenate([np.expand_dims(ele[:, :, 1], axis=2) for ele in mhd_images], axis=-1)
mask_image = target_phase_mask
mask_image = np.asarray(np.squeeze(mask_image), np.uint8)
max_v = 300.
min_v = -350.
mhd_image[mhd_image > max_v] = max_v
mhd_image[mhd_image < min_v] = min_v
print(np.mean(mhd_image, dtype=np.float32))
mhd_image -= np.mean(mhd_image)
min_v = np.min(mhd_image)
max_v = np.max(mhd_image)
interv = max_v - min_v
mhd_image = (mhd_image - min_v) / interv
file_name = os.path.basename(slice_dir)
dataset_name = os.path.basename(os.path.dirname(slice_dir))
save_path = os.path.join(save_dir, total_phase_name, dataset_name, file_name+'.jpg')
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
print('the shape of mhd_image is ', np.shape(mhd_image), np.min(mhd_image), np.max(mhd_image))
print('the shape of mask_image is ', np.shape(mask_image))
_, _, depth_image= np.shape(mhd_image)
if depth_image == 2:
mhd_image = np.concatenate(
[mhd_image, np.expand_dims(mhd_image[:, :, np.argmax(np.sum(np.sum(mask_image, axis=1), axis=1))], axis=2)],
axis=2
)
print('Error')
#mhd_image = np.asarray(mhd_image * 255, np.uint8)
#mhd_image.tofile(save_path)
# np.save(save_path, mhd_image * 255)
cv2.imwrite(save_path, mhd_image * 255)
xml_save_dir = os.path.join(save_dir, total_phase_name, dataset_name+'_xml')
if not os.path.exists(xml_save_dir):
os.makedirs(xml_save_dir)
evulate_gt_dir = os.path.join(save_dir, total_phase_name, dataset_name+'_gt')
if not os.path.exists(evulate_gt_dir):
os.makedirs(evulate_gt_dir)
xml_save_path = os.path.join(xml_save_dir, file_name + '.xml')
gt_save_path = os.path.join(evulate_gt_dir, file_name + '.txt') # for evulate
doc = Document()
root_node = doc.createElement('annotation')
doc.appendChild(root_node)
folder_name = os.path.basename(save_dir) + '/' + total_phase_name
folder_node = doc.createElement('folder')
root_node.appendChild(folder_node)
folder_txt_node = doc.createTextNode(folder_name)
folder_node.appendChild(folder_txt_node)
file_name = file_name + '.jpg'
filename_node = doc.createElement('filename')
root_node.appendChild(filename_node)
filename_txt_node = doc.createTextNode(file_name)
filename_node.appendChild(filename_txt_node)
shape = list(np.shape(mhd_image))
size_node = doc.createElement('size')
root_node.appendChild(size_node)
width_node = doc.createElement('width')
width_node.appendChild(doc.createTextNode(str(shape[0])))
height_node = doc.createElement('height')
height_node.appendChild(doc.createTextNode(str(shape[1])))
depth_node = doc.createElement('depth')
depth_node.appendChild(doc.createTextNode(str(3)))
size_node.appendChild(width_node)
size_node.appendChild(height_node)
size_node.appendChild(depth_node)
# mask_image[mask_image != 1] = 0
# xs, ys = np.where(mask_image == 1)
# min_x = np.min(xs)
# min_y = np.min(ys)
# max_x = np.max(xs)
# max_y = np.max(ys)
min_xs, min_ys, max_xs, max_ys, names = extract_bboxs_from_mask(mask_image, os.path.join(slice_dir, 'tumor_types'))
lines = []
for min_x, min_y, max_x, max_y, name in zip(min_xs, min_ys, max_xs, max_ys, names):
object_node = doc.createElement('object')
root_node.appendChild(object_node)
name_node = doc.createElement('name')
name_node.appendChild(doc.createTextNode(name))
object_node.appendChild(name_node)
truncated_node = doc.createElement('truncated')
object_node.appendChild(truncated_node)
truncated_node.appendChild(doc.createTextNode('0'))
difficult_node = doc.createElement('difficult')
object_node.appendChild(difficult_node)
difficult_node.appendChild(doc.createTextNode('0'))
bndbox_node = doc.createElement('bndbox')
object_node.appendChild(bndbox_node)
xmin_node = doc.createElement('xmin')
xmin_node.appendChild(doc.createTextNode(str(min_y)))
bndbox_node.appendChild(xmin_node)
ymin_node = doc.createElement('ymin')
ymin_node.appendChild(doc.createTextNode(str(min_x)))
bndbox_node.appendChild(ymin_node)
xmax_node = doc.createElement('xmax')
xmax_node.appendChild(doc.createTextNode(str(max_y)))
bndbox_node.appendChild(xmax_node)
ymax_node = doc.createElement('ymax')
ymax_node.appendChild(doc.createTextNode(str(max_x)))
bndbox_node.appendChild(ymax_node)
line = '%s %d %d %d %d\n' % (name, min_y, min_x, max_y, max_x)
print(line)
lines.append(line)
with open(xml_save_path, 'wb') as f:
f.write(doc.toprettyxml(indent='\t', encoding='utf-8'))
with open(gt_save_path, 'w') as f:
f.writelines(lines)
f.close()
def dicom2jpg_multiphase_tripleslice(slice_dir, save_dir, phase_names, target_phase_name):
'''
前置条件:已经将dicom格式的数据转为成MHD格式,并且已经提出了slice,一个mhd文件只包含了三个slice
针对每个phase提取三个slice(all), 但是mask还是只提取一个
保存的格式是name_nc.jpg, name_art.jpg, name_pv.jpg
:param slice_dir:
:param save_dir:
:param phase_names:
:param target_phase_name:
:return:
'''
total_phase_name = ''.join(phase_names)
total_phase_name += '_tripleslice'
target_phase_mask = None
mhd_images = []
for phase_name in phase_names:
mhd_image_path = os.path.join(slice_dir, 'Image_' + phase_name + '.mhd')
mhd_mask_path = os.path.join(slice_dir, 'Mask_' + phase_name + '.mhd')
mhd_image = read_mhd_image(mhd_image_path)
mask_image = read_mhd_image(mhd_mask_path)
mhd_image = np.asarray(np.squeeze(mhd_image), np.float32)
mhd_image = np.transpose(mhd_image, axes=[1, 2, 0])
if phase_name == target_phase_name:
target_phase_mask = mask_image
_, _, depth_image = np.shape(mhd_image)
if depth_image == 2:
mhd_image = np.concatenate(
[mhd_image,
np.expand_dims(mhd_image[:, :, np.argmax(np.sum(np.sum(mask_image, axis=1), axis=1))], axis=2)],
axis=2
)
print('Error')
mhd_images.append(mhd_image)
# mhd_image = np.expand_dims(mhd_image, axis=2)
# mhd_image = np.concatenate([mhd_image, mhd_image, mhd_image], axis=2)
mhd_image = np.concatenate(mhd_images, axis=-1)
mask_image = target_phase_mask
mask_image = np.asarray(np.squeeze(mask_image), np.uint8)
max_v = 300.
min_v = -350.
mhd_image[mhd_image > max_v] = max_v
mhd_image[mhd_image < min_v] = min_v
print(np.mean(mhd_image, dtype=np.float32))
mhd_image -= np.mean(mhd_image)
min_v = np.min(mhd_image)
max_v = np.max(mhd_image)
interv = max_v - min_v
mhd_image = (mhd_image - min_v) / interv
file_name = os.path.basename(slice_dir)
dataset_name = os.path.basename(os.path.dirname(slice_dir))
print('the shape of mhd_image is ', np.shape(mhd_image), np.min(mhd_image), np.max(mhd_image))
print('the shape of mask_image is ', np.shape(mask_image))
for phase_idx, phase_name in enumerate(['NNC', 'ART', 'PPV']):
save_path = os.path.join(save_dir, total_phase_name, dataset_name, file_name + '_%s.jpg' % phase_name)
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
cv2.imwrite(save_path, mhd_image[:, :, phase_idx * 3: (phase_idx + 1) * 3] * 255)
xml_save_dir = os.path.join(save_dir, total_phase_name, dataset_name+'_xml')
if not os.path.exists(xml_save_dir):
os.makedirs(xml_save_dir)
evulate_gt_dir = os.path.join(save_dir, total_phase_name, dataset_name+'_gt')
if not os.path.exists(evulate_gt_dir):
os.makedirs(evulate_gt_dir)
xml_save_path = os.path.join(xml_save_dir, file_name + '.xml')
gt_save_path = os.path.join(evulate_gt_dir, file_name + '.txt') # for evulate
doc = Document()
root_node = doc.createElement('annotation')
doc.appendChild(root_node)
folder_name = os.path.basename(save_dir) + '/' + total_phase_name
folder_node = doc.createElement('folder')
root_node.appendChild(folder_node)
folder_txt_node = doc.createTextNode(folder_name)
folder_node.appendChild(folder_txt_node)
file_name = file_name + '.jpg'
filename_node = doc.createElement('filename')
root_node.appendChild(filename_node)
filename_txt_node = doc.createTextNode(file_name)
filename_node.appendChild(filename_txt_node)
shape = list(np.shape(mhd_image))
size_node = doc.createElement('size')
root_node.appendChild(size_node)
width_node = doc.createElement('width')
width_node.appendChild(doc.createTextNode(str(shape[0])))
height_node = doc.createElement('height')
height_node.appendChild(doc.createTextNode(str(shape[1])))
depth_node = doc.createElement('depth')
depth_node.appendChild(doc.createTextNode(str(3)))
size_node.appendChild(width_node)
size_node.appendChild(height_node)
size_node.appendChild(depth_node)
# mask_image[mask_image != 1] = 0
# xs, ys = np.where(mask_image == 1)
# min_x = np.min(xs)
# min_y = np.min(ys)
# max_x = np.max(xs)
# max_y = np.max(ys)
min_xs, min_ys, max_xs, max_ys, names = extract_bboxs_from_mask(mask_image, os.path.join(slice_dir, 'tumor_types'))
lines = []
for min_x, min_y, max_x, max_y, name in zip(min_xs, min_ys, max_xs, max_ys, names):
object_node = doc.createElement('object')
root_node.appendChild(object_node)
name_node = doc.createElement('name')
name_node.appendChild(doc.createTextNode(name))
object_node.appendChild(name_node)
truncated_node = doc.createElement('truncated')
object_node.appendChild(truncated_node)
truncated_node.appendChild(doc.createTextNode('0'))
difficult_node = doc.createElement('difficult')
object_node.appendChild(difficult_node)
difficult_node.appendChild(doc.createTextNode('0'))
bndbox_node = doc.createElement('bndbox')
object_node.appendChild(bndbox_node)
xmin_node = doc.createElement('xmin')
xmin_node.appendChild(doc.createTextNode(str(min_y)))
bndbox_node.appendChild(xmin_node)
ymin_node = doc.createElement('ymin')
ymin_node.appendChild(doc.createTextNode(str(min_x)))
bndbox_node.appendChild(ymin_node)
xmax_node = doc.createElement('xmax')
xmax_node.appendChild(doc.createTextNode(str(max_y)))
bndbox_node.appendChild(xmax_node)
ymax_node = doc.createElement('ymax')
ymax_node.appendChild(doc.createTextNode(str(max_x)))
bndbox_node.appendChild(ymax_node)
line = '%s %d %d %d %d\n' % (name, min_y, min_x, max_y, max_x)
print(line)
lines.append(line)
with open(xml_save_path, 'wb') as f:
f.write(doc.toprettyxml(indent='\t', encoding='utf-8'))
with open(gt_save_path, 'w') as f:
f.writelines(lines)
f.close()
def dicom2jpg_multiphase_tripleslice_mask(slice_dir, save_dir, phase_names, target_phase_name):
'''
前置条件:已经将dicom格式的数据转为成MHD格式,并且已经提出了slice,一个mhd文件只包含了三个slice
针对每个phase提取三个slice(all), 但是mask还是只提取一个
保存的格式是name_nc.jpg, name_art.jpg, name_pv.jpg
:param slice_dir:
:param save_dir:
:param phase_names:
:param target_phase_name:
:return:
'''
total_phase_name = ''.join(phase_names)
total_phase_name += '_tripleslice_mask'
target_phase_mask = None
mhd_images = []
for phase_name in phase_names:
mhd_image_path = os.path.join(slice_dir, 'Image_' + phase_name + '.mhd')
mhd_mask_path = os.path.join(slice_dir, 'Mask_' + phase_name + '.mhd')
mhd_image = read_mhd_image(mhd_image_path)
mask_image = read_mhd_image(mhd_mask_path)
mhd_image = np.asarray(np.squeeze(mhd_image), np.float32)
mhd_image = np.transpose(mhd_image, axes=[1, 2, 0])
if phase_name == target_phase_name:
target_phase_mask = mask_image
_, _, depth_image = np.shape(mhd_image)
if depth_image == 2:
mhd_image = np.concatenate(
[mhd_image,
np.expand_dims(mhd_image[:, :, np.argmax(np.sum(np.sum(mask_image, axis=1), axis=1))], axis=2)],
axis=2
)
print('Error')
mhd_images.append(mhd_image)
# mhd_image = np.expand_dims(mhd_image, axis=2)
# mhd_image = np.concatenate([mhd_image, mhd_image, mhd_image], axis=2)
mhd_image = np.concatenate(mhd_images, axis=-1)
mask_image = target_phase_mask
mask_image = np.asarray(np.squeeze(mask_image), np.uint8)
max_v = 300.
min_v = -350.
mhd_image[mhd_image > max_v] = max_v
mhd_image[mhd_image < min_v] = min_v
print(np.mean(mhd_image, dtype=np.float32))
mhd_image -= np.mean(mhd_image)
min_v = np.min(mhd_image)
max_v = np.max(mhd_image)
interv = max_v - min_v
mhd_image = (mhd_image - min_v) / interv
file_name = os.path.basename(slice_dir)
dataset_name = os.path.basename(os.path.dirname(slice_dir))
print('the shape of mhd_image is ', np.shape(mhd_image), np.min(mhd_image), np.max(mhd_image))
print('the shape of mask_image is ', np.shape(mask_image))
for phase_idx, phase_name in enumerate(['NNC', 'ART', 'PPV']):
save_path = os.path.join(save_dir, total_phase_name, dataset_name,
file_name + '_%s.%s' % (phase_name, image_suffix_name))
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
if image_suffix_name == 'jpg':
cv2.imwrite(save_path, mhd_image[:, :, phase_idx * 3: (phase_idx + 1) * 3] * 255)
else:
cv2.imwrite(save_path, np.asarray(mhd_image[:, :, phase_idx * 3: (phase_idx + 1) * 3] * 255, np.int32))
if not os.path.exists(os.path.join(save_dir, total_phase_name, dataset_name + '_mask')):
os.mkdir(os.path.join(save_dir, total_phase_name, dataset_name + '_mask'))
if not os.path.exists(os.path.join(save_dir, total_phase_name, dataset_name + '_mask_vis')):
os.mkdir(os.path.join(save_dir, total_phase_name, dataset_name + '_mask_vis'))
real_mask_save_path = os.path.join(save_dir, total_phase_name, dataset_name + '_mask', file_name + '.' + image_suffix_name)
vis_mask_save_path = os.path.join(save_dir, total_phase_name, dataset_name + '_mask_vis', file_name + '.' + image_suffix_name)
xml_save_dir = os.path.join(save_dir, total_phase_name, dataset_name+'_xml')
if not os.path.exists(xml_save_dir):
os.makedirs(xml_save_dir)
evulate_gt_dir = os.path.join(save_dir, total_phase_name, dataset_name+'_gt')
if not os.path.exists(evulate_gt_dir):
os.makedirs(evulate_gt_dir)
xml_save_path = os.path.join(xml_save_dir, file_name + '.xml')
gt_save_path = os.path.join(evulate_gt_dir, file_name + '.txt') # for evulate
doc = Document()
root_node = doc.createElement('annotation')
doc.appendChild(root_node)
folder_name = os.path.basename(save_dir) + '/' + total_phase_name
folder_node = doc.createElement('folder')
root_node.appendChild(folder_node)
folder_txt_node = doc.createTextNode(folder_name)
folder_node.appendChild(folder_txt_node)
file_name = file_name + '.jpg'
filename_node = doc.createElement('filename')
root_node.appendChild(filename_node)
filename_txt_node = doc.createTextNode(file_name)
filename_node.appendChild(filename_txt_node)
shape = list(np.shape(mhd_image))
size_node = doc.createElement('size')
root_node.appendChild(size_node)
width_node = doc.createElement('width')
width_node.appendChild(doc.createTextNode(str(shape[0])))
height_node = doc.createElement('height')
height_node.appendChild(doc.createTextNode(str(shape[1])))
depth_node = doc.createElement('depth')
depth_node.appendChild(doc.createTextNode(str(3)))
size_node.appendChild(width_node)
size_node.appendChild(height_node)
size_node.appendChild(depth_node)
# mask_image[mask_image != 1] = 0
# xs, ys = np.where(mask_image == 1)
# min_x = np.min(xs)
# min_y = np.min(ys)
# max_x = np.max(xs)
# max_y = np.max(ys)
min_xs, min_ys, max_xs, max_ys, names, mask = extract_bboxs_mask_from_mask(mask_image,
os.path.join(slice_dir, 'tumor_types'))
cv2.imwrite(vis_mask_save_path, np.asarray(mask * 50, np.int32))
for key in pixel2type.keys():
mask[mask == key] = type2pixel[pixel2type[key]][0]
cv2.imwrite(real_mask_save_path, np.asarray(mask, np.int32))
lines = []
for min_x, min_y, max_x, max_y, name in zip(min_xs, min_ys, max_xs, max_ys, names):
object_node = doc.createElement('object')
root_node.appendChild(object_node)
name_node = doc.createElement('name')
name_node.appendChild(doc.createTextNode(name))
object_node.appendChild(name_node)
truncated_node = doc.createElement('truncated')
object_node.appendChild(truncated_node)
truncated_node.appendChild(doc.createTextNode('0'))
difficult_node = doc.createElement('difficult')
object_node.appendChild(difficult_node)
difficult_node.appendChild(doc.createTextNode('0'))
bndbox_node = doc.createElement('bndbox')
object_node.appendChild(bndbox_node)
xmin_node = doc.createElement('xmin')
xmin_node.appendChild(doc.createTextNode(str(min_y)))
bndbox_node.appendChild(xmin_node)
ymin_node = doc.createElement('ymin')
ymin_node.appendChild(doc.createTextNode(str(min_x)))
bndbox_node.appendChild(ymin_node)
xmax_node = doc.createElement('xmax')
xmax_node.appendChild(doc.createTextNode(str(max_y)))
bndbox_node.appendChild(xmax_node)
ymax_node = doc.createElement('ymax')
ymax_node.appendChild(doc.createTextNode(str(max_x)))
bndbox_node.appendChild(ymax_node)
line = '%s %d %d %d %d\n' % (name, min_y, min_x, max_y, max_x)
print(line)
lines.append(line)
with open(xml_save_path, 'wb') as f:
f.write(doc.toprettyxml(indent='\t', encoding='utf-8'))
with open(gt_save_path, 'w') as f:
f.writelines(lines)
f.close()
if __name__ == '__main__':
# 针对单个期相
# image_dir = '/home/give/Documents/dataset/LiverLesionDetection_Splited/0'
# LiverLesionDetection_Iterator(
# image_dir,
# dicom2jpg_singlephase,
# '/home/give/Documents/dataset/LiverLesionDetection_Splited/JPG/0',
# 'ART'
# )
global image_suffix_name
image_suffix_name = 'PNG'
# 针对多个期相
image_dir = '/home/give/Documents/dataset/LiverLesionDetection_Splited/0'
LiverLesionDetection_Iterator(
image_dir,
dicom2jpg_multiphase_tripleslice_mask,
'/home/give/Documents/dataset/LiverLesionDetection_Splited/JPG/0',
['NC', 'ART', 'PV'],
'PV'
)
| [
"[email protected]"
] | |
3eca6fa93c5db8360edf1d7504cead97383393d2 | 5a8214b3a452c574e6c883bf5d90ba58ba87c461 | /leetcode/549.py | bdd217ec4dc7b3604cda9930f55824e25cf201fb | [] | no_license | phlalx/algorithms | 69a3c8519687816e3c6333ec12b40659d3e3167f | f4da5a5dbda640b9bcbe14cb60a72c422b5d6240 | refs/heads/master | 2023-02-03T10:30:30.181735 | 2020-12-26T09:47:38 | 2020-12-26T09:47:38 | 129,254,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | #TAG tree
# yet another tree problem
# do it on paper first to see if we use synth/inherited attributed and how
# to deal with None
#
# If we don't recurse to None, don't forget to deal with corner case where
# root = None.
#
# In this example, we don't recurse through None because we need
# node.left, node.right value
#
class Solution:
def longestConsecutive(self, root: TreeNode) -> int:
res = float('-inf')
def f(node) -> Tuple[int, int]: # longest increasing sequence starting/end
nonlocal res
v = node.val
linc, rinc, ldec, rdec = (1,) * 4
if node.left is not None:
linc, ldec = f(node.left)
if v == node.left.val - 1:
linc += 1
else:
linc = 1
if v == node.left.val + 1:
ldec += 1
else:
ldec = 1
if node.right is not None:
rinc, rdec = f(node.right)
if v == node.right.val - 1:
rinc += 1
else:
rinc = 1
if v == node.right.val + 1:
rdec += 1
else:
rdec = 1
res = max(res, linc + rdec - 1, rinc + ldec - 1)
return max(linc, rinc), max(ldec, rdec)
if root is None:
return 0
f(root)
return res
| [
"[email protected]"
] | |
6e7e9e7be12b4e755ac17174ef8c25c82594321e | 3293dc42e15e956be202e39db196eed9912dcc01 | /estimation/causal_inference/I_and_R_treatment effect evaluation/stratified_randomized_experiments/fep_stats.py | 5f2e7f9659245edb8a169bbd0e9f13888824119d | [] | no_license | bthowe/data_science | c372e5364f24dc29e3de1fca3504211cb93b62fb | 63291df8084e5f62f9ba226e87db2242bb31ac94 | refs/heads/master | 2021-11-24T10:49:00.800890 | 2021-11-02T16:10:16 | 2021-11-02T16:10:16 | 106,839,857 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | py | import sys
import numpy as np
import pandas as pd
pd.set_option('max_columns', 1000)
pd.set_option('max_info_columns', 1000)
pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_rows', 30000)
pd.set_option('max_colwidth', 4000)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
def T_diff_lambda(y_t, y_c, lam='RSS'):
Y_t = y_t['y'].groupby(y_t['stratum']).mean()
N_t = y_t['y'].groupby(y_t['stratum']).count()
Y_c = y_c['y'].groupby(y_c['stratum']).mean()
N_c = y_c['y'].groupby(y_c['stratum']).count()
if lam == 'RSS':
l = (N_t + N_c) / (N_t.sum() + N_c.sum())
return (l * (Y_t - Y_c)).sum()
elif lam == 'OPT':
l = ((N_t + N_c) * (N_t / (N_t + N_c)) * (N_c / (N_t + N_c))) / ((N_t + N_c) * (N_t / (N_t + N_c)) * (N_c / (N_t + N_c))).sum()
return (l * (Y_t - Y_c)).sum()
def T_rank_stratum(y):
y['rank'] = y['y'].groupby(y['stratum']).rank()
y['norm'] = (y['y'].groupby(y['stratum']).transform('count') + 1) / 2
y['normalized_rank'] = y['rank'] - y['norm']
return np.abs(y.query('treatment == 1')['normalized_rank'].mean() - y.query('treatment == 0')['normalized_rank'].mean())
def T_range(y):
y_t = y.query('treatment == 1')
y_c = y.query('treatment == 0')
Y_t = y_t['y'].groupby(y_t['stratum']).max() - y_t['y'].groupby(y_t['stratum']).min()
Y_c = y_c['y'].groupby(y_c['stratum']).max() - y_c['y'].groupby(y_c['stratum']).min()
N_t = y_t['y'].groupby(y_t['stratum']).count()
N_c = y_c['y'].groupby(y_c['stratum']).count()
l = (N_t + N_c) / (N_t.sum() + N_c.sum())
return (l * (Y_t - Y_c)).sum()
if __name__ == '__main__':
np.random.seed(seed=2)
N = 10
J = 3
df = pd.DataFrame(np.random.uniform(-1, 1, size=(N, 3)), columns=['one', 'two', 'three'])
df['stratum'] = np.random.choice(range(J), size=(N, 1))
df['treatment'] = np.random.choice([0, 1], size=(N, 1))
df['y'] = np.random.uniform(0, 1, size=(N, 1))
print(
T_diff_lambda(
df.query('treatment == 1')[['y', 'stratum']],
df.query('treatment == 0')[['y', 'stratum']],
lam='OPT'
# lam='RSS'
)
)
print(
T_rank_stratum(
df[['y', 'stratum', 'treatment']]
)
)
print(
T_range(
df[['y', 'stratum', 'treatment']]
)
)
| [
"[email protected]"
] | |
64165e3ab97aeeeb15eedc35d813c6e5e60e29c1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03086/s435473021.py | 48cf74090e9644bcb577dece1ddb37330a8fc1a4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | ans = 0
c = 1
li = ["A","C","G","T"]
s = input()
lens = len(s)
for i in range(lens):
for j in range(lens-(i)):
a = s[j:j+i+1]
for k in range(i+1):
if a[k] not in li:
c = 1
break
else:
c = 0
continue
if c == 0:
ans = i+1
else:
c = 1
print(ans) | [
"[email protected]"
] | |
b334814b90ea86220c975382e7dfc5b5e03db363 | 281c0694b9c6e394a0329a34d1b9ec564811fd3a | /test/test_errors.py | d6dabec0f52bb01937c19c6e0a1e1f051da688a5 | [
"Apache-2.0"
] | permissive | fridex/json2sql | 35448d1011a53c55641ef928cbcdc2c40d55fb65 | a0851dd79827a684319b03fb899e129f81ff2d3a | refs/heads/master | 2021-01-01T19:09:30.394871 | 2018-04-01T12:12:43 | 2018-04-01T12:12:43 | 98,528,002 | 0 | 0 | Apache-2.0 | 2018-04-01T12:20:36 | 2017-07-27T11:26:43 | Python | UTF-8 | Python | false | false | 747 | py | #!/usr/bin/env python3
# ######################################################################
# Copyright (C) 2017 Fridolin Pokorny, [email protected]
# This file is part of json2sql package.
# ######################################################################
"""Tests for error handling."""
from json2sql import select2sql
from json2sql import ParsingInputError
import pytest
from .base import TestBase
class TestErrors(TestBase):
"""Tests for error handling."""
def test_unknown_subquery_key(self):
wrong_nested_query = {'$filter': {'table': 'BarTable'}, 'wrong_key': 'baz'}
with pytest.raises(ParsingInputError):
select2sql(table='FooTable', where={'something in': wrong_nested_query})
| [
"[email protected]"
] | |
e901843881086ed332a601f428db957d0382c60a | 4d7e1b959194e7ca7d99c0b5926622f37959cd02 | /todolist/admin.py | da44779ce2751e042d3daf013650127ccbdbea60 | [] | no_license | KamilBienias/my-first-blog | ce1ff6ff8912c30014a219a7059037a62a4f48d0 | 2c138991c3e53f3b3bc7b4e6d76c20d62a9a126a | refs/heads/master | 2020-12-19T07:35:58.750570 | 2020-08-23T16:15:46 | 2020-08-23T16:15:46 | 235,664,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.contrib import admin
from .models import Task, Person
admin.site.register(Task)
admin.site.register(Person)
| [
"[email protected]"
] | |
9358e544278850f748f16ad9ac2e1b2e102db86a | 94dbd40525692416ea8100d13b03ece73ee33f7f | /kgtk/iff/kgtkifempty.py | 5db7e9bffa021cfeaa1271b05c1b67106745527c | [
"MIT"
] | permissive | usc-isi-i2/kgtk | 7101129ce1dde646095803429d3c751bf87ae071 | c31ba4c33d5f925fdb66a487ba2e1184c9ca4254 | refs/heads/main | 2023-08-22T06:58:22.301262 | 2023-06-29T19:55:28 | 2023-06-29T19:55:28 | 234,676,361 | 325 | 53 | MIT | 2023-06-29T19:55:30 | 2020-01-18T03:34:48 | Jupyter Notebook | UTF-8 | Python | false | false | 10,669 | py | """Copy records from the first KGTK file to the output file, if one or more
columns are (any/all) (not) empty. If --only-count is True, report the count
of qualifying records but do not write the output file.
"""
from argparse import ArgumentParser, Namespace
import attr
from pathlib import Path
import sys
import typing
from kgtk.kgtkformat import KgtkFormat
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.io.kgtkwriter import KgtkWriter
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
@attr.s(slots=True, frozen=True)
class KgtkIfEmpty(KgtkFormat):
input_file_path: typing.Optional[Path] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Path)))
filter_column_names: typing.List[str] = attr.ib(validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(str),
iterable_validator=attr.validators.instance_of(list)))
output_file_path: typing.Optional[Path] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Path)))
reject_file_path: typing.Optional[Path] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Path)), default=None)
all_are: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
notempty: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
only_count: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
# TODO: find working validators
# value_options: typing.Optional[KgtkValueOptions] = attr.ib(attr.validators.optional(attr.validators.instance_of(KgtkValueOptions)), default=None)
reader_options: typing.Optional[KgtkReaderOptions]= attr.ib(default=None)
value_options: typing.Optional[KgtkValueOptions] = attr.ib(default=None)
error_file: typing.TextIO = attr.ib(default=sys.stderr)
verbose: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
very_verbose: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
def filter(self,
row: typing.List[str],
filter_idx_list: typing.List[int])->bool:
idx: int
if self.notempty == False and self.all_are == False:
# if any are empty.
for idx in filter_idx_list:
if len(row[idx]) == 0:
return True
return False
elif self.notempty == False and self.all_are == True:
# if all are empty.
for idx in filter_idx_list:
if len(row[idx]) != 0:
return False
return True
elif self.notempty == True and self.all_are == False:
# If any are not empty.
for idx in filter_idx_list:
if len(row[idx]) != 0:
return True
return False
else:
# if all are not empty:
for idx in filter_idx_list:
if len(row[idx]) == 0:
return False
return True
def process(self):
# Open the input file.
if self.verbose:
if self.input_file_path is not None:
print("Opening the input file: %s" % self.input_file_path, file=self.error_file, flush=True)
else:
print("Reading the input data from stdin", file=self.error_file, flush=True)
kr: KgtkReader = KgtkReader.open(self.input_file_path,
error_file=self.error_file,
options=self.reader_options,
value_options = self.value_options,
verbose=self.verbose,
very_verbose=self.very_verbose,
)
filter_idx_list: typing.List[int] = [ ]
column_name: str
for column_name in self.filter_column_names:
if column_name not in kr.column_name_map:
raise ValueError("Column %s is not in the input file" % (column_name))
filter_idx_list.append(kr.column_name_map[column_name])
ew: typing.Optional[KgtkWriter] = None
rew: typing.Optional[KgtkWriter] = None
if not self.only_count:
if self.verbose:
print("Opening the output file: %s" % self.output_file_path, file=self.error_file, flush=True)
ew = KgtkWriter.open(kr.column_names,
self.output_file_path,
mode=kr.mode,
require_all_columns=False,
prohibit_extra_columns=True,
fill_missing_columns=True,
use_mgzip=self.reader_options.use_mgzip, # Hack!
mgzip_threads=self.reader_options.mgzip_threads, # Hack!
gzip_in_parallel=False,
verbose=self.verbose,
very_verbose=self.very_verbose)
if self.reject_file_path is not None:
if self.verbose:
print("Opening the reject file: %s" % self.reject_file_path, file=self.error_file, flush=True)
rew = KgtkWriter.open(kr.column_names,
self.reject_file_path,
mode=KgtkWriter.Mode[kr.mode.name],
require_all_columns=False,
prohibit_extra_columns=True,
fill_missing_columns=True,
use_mgzip=self.reader_options.use_mgzip, # Hack!
mgzip_threads=self.reader_options.mgzip_threads, # Hack!
gzip_in_parallel=False,
verbose=self.verbose,
very_verbose=self.very_verbose)
if self.verbose:
print("Filtering records from %s" % self.input_file_path, file=self.error_file, flush=True)
input_line_count: int = 0
output_line_count: int = 0;
reject_line_count: int = 0;
row: typing.list[str]
for row in kr:
input_line_count += 1
if self.filter(row, filter_idx_list):
if not self.only_count:
ew.write(row)
output_line_count += 1
else:
if not self.only_count and rew is not None:
rew.write(row)
reject_line_count += 1
if self.only_count:
print("Read %d records, %d records passed the filter, %d rejected." % (input_line_count,
output_line_count,
reject_line_count),
file=self.error_file, flush=True)
else:
if self.verbose:
if rew is not None:
print("Read %d records, wrote %d records, rejected %d records." % (input_line_count,
output_line_count,
reject_line_count),
file=self.error_file, flush=True)
else:
print("Read %d records, wrote %d records." % (input_line_count, output_line_count), file=self.error_file, flush=True)
ew.close()
if rew is not None:
rew.close()
def main():
"""
Test the KGTK ifempty processor.
"""
parser: ArgumentParser = ArgumentParser()
parser.add_argument(dest="input_file_path", help="The KGTK file with the input data", type=Path, nargs="?")
parser.add_argument( "--columns", dest="filter_column_names", help="The columns to filter on (default=None).", nargs='+', required=True)
parser.add_argument( "--count", dest="only_count", help="Only count the records, do not copy them. (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=False)
parser.add_argument("-o", "--output-file", dest="output_file_path", help="The KGTK file to write (default=%(default)s).", type=Path, default="-")
parser.add_argument( "--all", dest="all_are", help="False: Test if any are, True: test if all are (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=False)
parser.add_argument( "--not-empty", dest="notempty", help="False: test if empty, True: test if not empty (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=False)
KgtkReader.add_debug_arguments(parser)
KgtkReaderOptions.add_arguments(parser, mode_options=True)
KgtkValueOptions.add_arguments(parser)
args: Namespace = parser.parse_args()
error_file: typing.TextIO = sys.stdout if args.errors_to_stdout else sys.stderr
# Build the option structures.
reader_options: KgtkReaderOptions = KgtkReaderOptions.from_args(args)
value_options: KgtkValueOptions = KgtkValueOptions.from_args(args)
# Show the final option structures for debugging and documentation.
if args.show_options:
# TODO: show ifempty-specific options.
reader_options.show(out=error_file)
value_options.show(out=error_file)
ie: KgtkIfEmpty = KgtkIfEmpty(
input_file_path=args.input_file_path,
filter_column_names=args.filter_column_names,
output_file_path=args.output_file_path,
all_are=args.all_are,
notempty=args.notempty,
only_count = args.only_count,
reader_options=reader_options,
value_options=value_options,
error_file=error_file,
verbose=args.verbose,
very_verbose=args.very_verbose)
ie.process()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
18963044c0b542d8438fa4a3f06fcece9a5724c2 | 19fb0eb26f5a6d2180a323cf242ce00f5e4e1c6d | /test/functional/rpc_net.py | 1edc734577b514756d5995d5ae4866c5961a14bd | [
"MIT"
] | permissive | j00v/NestEGG | bd4c9555f6473cc655e203531c6ab4d0dc795b61 | 8c507974a5d49f5ffa7000fa8b864a528dcb9c3e | refs/heads/master | 2022-12-03T09:16:14.732378 | 2020-08-12T15:25:31 | 2020-08-12T15:25:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,111 | py | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
from test_framework.test_framework import NestEggTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes_bi,
disconnect_nodes,
p2p_port,
wait_until,
)
class NetTest(NestEggTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
#self._test_getpeerinfo()
def _test_connection_count(self):
# connect_nodes_bi connects each node to the other
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# getnettotals totalbytesrecv and totalbytessent should be
# consistent with getpeerinfo. Since the RPC calls are not atomic,
# and messages might have been recvd or sent between RPC calls, call
# getnettotals before and after and verify that the returned values
# from getpeerinfo are bounded by those values.
net_totals_before = self.nodes[0].getnettotals()
peer_info = self.nodes[0].getpeerinfo()
net_totals_after = self.nodes[0].getnettotals()
assert_equal(len(peer_info), 2)
peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
peers_sent = sum([peer['bytessent'] for peer in peer_info])
assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
disconnect_nodes(self.nodes[0], 1)
# Wait a bit for all sockets to close
wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(True), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(ip_port, 'add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(True, ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, True, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
if __name__ == '__main__':
NetTest().main()
| [
"[email protected]"
] | |
30cb251586599dc04ef74e664fb104b60b42d8e1 | 4a9dada02c749e9e5277fe1e35357d7b2b28ad5c | /高艺航2018012950/操作系统实验/作业3 创建多进程.py | 6c0b31386f1a4c6f1d792b15aa9fb29d96d4ee90 | [] | no_license | wanghan79/2020_Option_System | 631cc80f52829390a128a86677de527472470348 | f37b870614edf7d85320da197d932df2f25a5720 | refs/heads/master | 2021-01-09T13:10:05.630685 | 2020-07-10T03:30:39 | 2020-07-10T03:30:39 | 242,312,271 | 13 | 9 | null | 2020-07-04T16:13:11 | 2020-02-22T09:12:56 | Python | UTF-8 | Python | false | false | 442 | py | """
Author : Yihang.Gao 高艺航
StuNumber: 2018012950
Purpose : Set up multiprocess by python.
Created : 1/7/2020
"""
from multiprocessing import Process
def setup_pro(i):
print('process',i)
if __name__ == '__main__':
list_pro = []
for i in range(3):
k = Process(target=setup_pro, args=(i+1,))
list_pro.append(k)
list_pro[0].start()
list_pro[1].start()
list_pro[2].start() | [
"[email protected]"
] | |
ed5eb4fea9abb6d9e7d56595dc9603d9abd22bf4 | 77c641fd0708b279dddbe01f6af32a8531b93185 | /marketsim/gen/_intrinsic/observable/minmax.py | 734d5a8a443fbbb0acaf22a6f90bff1435caa3bc | [] | no_license | abensrhir/marketsimulator | aea286afd2bb2e0c8a547bfa879601aef21c0cd5 | f9f55c72fb34cdbec42b96737ca20839f26c6299 | refs/heads/master | 2020-12-13T20:55:55.795344 | 2014-02-24T22:52:24 | 2014-02-24T22:52:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | from marketsim import event, _
import fold
class Min_Impl(fold.Last):
def __init__(self):
event.subscribe(self.source, _(self)._update, self)
self.reset()
def reset(self):
import blist
self._levels = blist.sorteddict()
self._x = None
def at(self, t):
p = self._levels.keys()[0] if len(self._levels) > 0 else None
x = self._x
if p is not None:
if x is not None:
return min(p,x)
return p
return x
def _remove(self, x):
self._levels[x] -= 1
if self._levels[x] == 0:
del self._levels[x]
self.fire(self)
def update(self, t, x):
if x is not None and (self._x is None or x < self._x):
if x not in self._levels:
self._levels[x] = 0
self._levels[x] += 1
self._scheduler.scheduleAfter(self.timeframe, _(self, x)._remove)
self._x = x
self.fire(self)
class Max_Impl(fold.Last):
def __init__(self):
event.subscribe(self.source, _(self)._update, self)
self.reset()
def reset(self):
import blist
self._levels = blist.sorteddict()
self._x = None
def at(self, t):
p = -self._levels.keys()[0] if len(self._levels) > 0 else None
x = self._x
if p is not None:
if x is not None:
return max(p,x)
return p
return x
def _remove(self, x):
self._levels[-x] -= 1
if self._levels[-x] == 0:
del self._levels[-x]
self.fire(self)
def update(self, t, x):
if x is not None and (self._x is None or x > self._x):
if -x not in self._levels:
self._levels[-x] = 0
self._levels[-x] += 1
self._scheduler.scheduleAfter(self.timeframe, _(self, x)._remove)
self._x = x
self.fire(self)
| [
"[email protected]"
] | |
20427a03af6d5566c36d42e5d8ea4bcbfd11aa93 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2015_02_19_strain_stress_vtk/old_version/Calculate_Stress_from_Strain_ORIGINAL.py | 46c772ad37f6fbc4da3a37d15695c54b8772e00f | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,842 | py | #
# Calculate_Stress_from_Strain_MKS.py
#
# Written by Matthew Priddy on February 19, 2015
#
from sys import *
from string import *
from math import *
from pylab import *
from random import *
from numpy import *
import itertools
from numpy import tensordot as td
import matplotlib
#matplotlib.use('PDF')
import linecache
import time
from matplotlib import pyplot as plt
from scipy import optimize
def Gmatrix(phi1, phi0, phi2):
g = zeros((3,3))
g[0,0] = ( cos(phi1) * cos(phi2) ) - ( sin(phi1) * sin(phi2) * cos(phi0) )
g[0,1] = ( sin(phi1) * cos(phi2) ) + ( cos(phi1) * sin(phi2) * cos(phi0) )
g[0,2] = ( sin(phi2) * sin(phi0) )
g[1,0] = -( cos(phi1) * sin(phi2) ) - ( sin(phi1) * cos(phi2) * cos(phi0) )
g[1,1] = -( sin(phi1) * sin(phi2) ) + ( cos(phi1) * cos(phi2) * cos(phi0) )
g[1,2] = ( cos(phi2) * sin(phi0) )
g[2,0] = ( sin(phi1) * sin(phi0) )
g[2,1] = -( cos(phi1) * sin(phi0) )
g[2,2] = ( cos(phi0) )
return g
def Bmatrix(p00, p11, p22, p01, p02, p12):
B = zeros((3,3))
B[0,0] = p00
B[0,1] = p01
B[0,2] = p02
B[1,0] = p01
B[1,1] = p11
B[1,2] = p12
B[2,0] = p02
B[2,1] = p12
B[2,2] = p22
return B
def Cijkl_dot_dot_Skl(C,S):
# Vij = Cijkl*Skl
# Technically written as Vij = Cijkl*Slk, but Skl is symmetric in this work
value = zeros((3,3))
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
value[i,j] = value[i,j] + C[i,j,k,l] * S[k,l]
return value
def calc_Cijkl_from_Cij(Cij):
## ij or kl: 11 22 33 23 31 12 32 13 21
## m or n : 1 2 3 4 5 6 7 8 9
# Theory of dislocations, pg. 34.
Cijkl = zeros((3,3,3,3))
ia = 0
ib = 0
for i in range(3):
for j in range(3):
ia = i
if (i != j):
ia = 6-i-j
for k in range(3):
for l in range(3):
ib = k
if (k != l):
ib = 6-k-l
Cijkl[i,j,k,l] = Cij[ia,ib]
return Cijkl
elements = 21*21*21
# (1c) Extract the material constants from the .inp files
# This should be automated, but for now we can hard code the input parameters
C11 = 172832.50
C12 = 97910.060
C13 = 73432.550
C33 = 192308.10
C44 = 49700.000
C66 = 0.5 * (C11 - C12)
shear_mod = (C44 * C66) ** 0.5
# For HCP crystal structures (e.g. Titanium)
Cij = zeros((6,6))
Cij[0,0] = C11; Cij[1,1] = C11
Cij[0,1] = C12; Cij[1,0] = C12
Cij[0,2] = C13; Cij[1,2] = C13; Cij[2,0] = C13; Cij[2,1] = C13
Cij[2,2] = C33
Cij[3,3] = C44; Cij[4,4] = C44
Cij[5,5] = C66
# Determine the 3x3x3x3 Stiffness matrix
Cijkl = calc_Cijkl_from_Cij(Cij)
# (1) Extract various values for use in this code
# (a) Extract the Euler angles for each element
euler_file = open(f6_EulerAngles,'r')
file_contents = euler_file.readlines()
euler = zeros((elements, 3))
for i in range(1+2,elements+3):
data1 = linecache.getline(f6_EulerAngles,i,module_globals=None)
data1 = data1.split()
euler[i-3,0] = float(data1[1])
euler[i-3,1] = float(data1[2])
euler[i-3,2] = float(data1[3])
# Total Strain
NumCycles = 3
strn_t00_el = zeros((elements, 2*NumCycles + 1)); strn_t11_el = zeros((elements, 2*NumCycles + 1)); strn_t22_el = zeros((elements, 2*NumCycles + 1))
for i in range(0,elements):
R = Gmatrix(euler[i,0], euler[i,1], euler[i,2]).T
for count in range(0,2 * NumCycles):
strain_0 = Bmatrix(strn_t00_el[i,count], strn_t11_el[i,count], strn_t22_el[i,count], strn_t01_el[i,count], strn_t02_el[i,count], strn_t12_el[i,count])
stress_0_from_strain = Cijkl_dot_dot_Skl(rotT(R.T, Cijkl),strain_0)
| [
"[email protected]"
] | |
d276364df8ec40da6b689afb1517955d749b9b02 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/eqpt/ingrcrcerrpktshist1w.py | 111b05ec20a3806ac9cbfd58cafef8a9e436340c | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,741 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class IngrCrcErrPktsHist1w(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.eqpt.IngrCrcErrPktsHist1w", "Ingress CRC Error Packets")
counter = CounterMeta("fcsRate", CounterCategory.GAUGE, "packets-per-second", "FCS CRC Errored Packets rate")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "fcsRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "fcsRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "fcsRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "fcsRateSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "fcsRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "fcsRateTr"
meta._counters.append(counter)
counter = CounterMeta("fcs", CounterCategory.COUNTER, "packets", "FCS CRC Errored Packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "fcsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "fcsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "fcsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "fcsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "fcsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "fcsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "fcsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "fcsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "fcsRate"
meta._counters.append(counter)
counter = CounterMeta("stompedRate", CounterCategory.GAUGE, "packets-per-second", "Stomped CRC Errored Packets rate")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "stompedRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "stompedRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "stompedRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "stompedRateSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "stompedRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "stompedRateTr"
meta._counters.append(counter)
counter = CounterMeta("stomped", CounterCategory.COUNTER, "packets", "Stomped CRC Errored Packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "stompedCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "stompedPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "stompedMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "stompedMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "stompedAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "stompedSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "stompedThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "stompedTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "stompedRate"
meta._counters.append(counter)
meta.moClassName = "eqptIngrCrcErrPktsHist1w"
meta.rnFormat = "HDeqptIngrCrcErrPkts1w-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Ingress CRC Error Packets stats in 1 week"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.pc.AggrIf")
meta.parentClasses.add("cobra.model.l1.PhysIf")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.eqpt.IngrCrcErrPktsHist")
meta.rnPrefixes = [
('HDeqptIngrCrcErrPkts1w-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "fcsAvg", "fcsAvg", 56357, PropCategory.IMPLICIT_AVG)
prop.label = "FCS CRC Errored Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsAvg", prop)
prop = PropMeta("str", "fcsCum", "fcsCum", 56353, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "FCS CRC Errored Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsCum", prop)
prop = PropMeta("str", "fcsMax", "fcsMax", 56356, PropCategory.IMPLICIT_MAX)
prop.label = "FCS CRC Errored Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsMax", prop)
prop = PropMeta("str", "fcsMin", "fcsMin", 56355, PropCategory.IMPLICIT_MIN)
prop.label = "FCS CRC Errored Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsMin", prop)
prop = PropMeta("str", "fcsPer", "fcsPer", 56354, PropCategory.IMPLICIT_PERIODIC)
prop.label = "FCS CRC Errored Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsPer", prop)
prop = PropMeta("str", "fcsRate", "fcsRate", 56361, PropCategory.IMPLICIT_RATE)
prop.label = "FCS CRC Errored Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsRate", prop)
prop = PropMeta("str", "fcsRateAvg", "fcsRateAvg", 56373, PropCategory.IMPLICIT_AVG)
prop.label = "FCS CRC Errored Packets rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsRateAvg", prop)
prop = PropMeta("str", "fcsRateMax", "fcsRateMax", 56372, PropCategory.IMPLICIT_MAX)
prop.label = "FCS CRC Errored Packets rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsRateMax", prop)
prop = PropMeta("str", "fcsRateMin", "fcsRateMin", 56371, PropCategory.IMPLICIT_MIN)
prop.label = "FCS CRC Errored Packets rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsRateMin", prop)
prop = PropMeta("str", "fcsRateSpct", "fcsRateSpct", 56374, PropCategory.IMPLICIT_SUSPECT)
prop.label = "FCS CRC Errored Packets rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsRateSpct", prop)
prop = PropMeta("str", "fcsRateThr", "fcsRateThr", 56375, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "FCS CRC Errored Packets rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("fcsRateThr", prop)
prop = PropMeta("str", "fcsRateTr", "fcsRateTr", 56376, PropCategory.IMPLICIT_TREND)
prop.label = "FCS CRC Errored Packets rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsRateTr", prop)
prop = PropMeta("str", "fcsSpct", "fcsSpct", 56358, PropCategory.IMPLICIT_SUSPECT)
prop.label = "FCS CRC Errored Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsSpct", prop)
prop = PropMeta("str", "fcsThr", "fcsThr", 56359, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "FCS CRC Errored Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("fcsThr", prop)
prop = PropMeta("str", "fcsTr", "fcsTr", 56360, PropCategory.IMPLICIT_TREND)
prop.label = "FCS CRC Errored Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("fcsTr", prop)
prop = PropMeta("str", "index", "index", 56338, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "stompedAvg", "stompedAvg", 56393, PropCategory.IMPLICIT_AVG)
prop.label = "Stomped CRC Errored Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedAvg", prop)
prop = PropMeta("str", "stompedCum", "stompedCum", 56389, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Stomped CRC Errored Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedCum", prop)
prop = PropMeta("str", "stompedMax", "stompedMax", 56392, PropCategory.IMPLICIT_MAX)
prop.label = "Stomped CRC Errored Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedMax", prop)
prop = PropMeta("str", "stompedMin", "stompedMin", 56391, PropCategory.IMPLICIT_MIN)
prop.label = "Stomped CRC Errored Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedMin", prop)
prop = PropMeta("str", "stompedPer", "stompedPer", 56390, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Stomped CRC Errored Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedPer", prop)
prop = PropMeta("str", "stompedRate", "stompedRate", 56397, PropCategory.IMPLICIT_RATE)
prop.label = "Stomped CRC Errored Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedRate", prop)
prop = PropMeta("str", "stompedRateAvg", "stompedRateAvg", 56409, PropCategory.IMPLICIT_AVG)
prop.label = "Stomped CRC Errored Packets rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedRateAvg", prop)
prop = PropMeta("str", "stompedRateMax", "stompedRateMax", 56408, PropCategory.IMPLICIT_MAX)
prop.label = "Stomped CRC Errored Packets rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedRateMax", prop)
prop = PropMeta("str", "stompedRateMin", "stompedRateMin", 56407, PropCategory.IMPLICIT_MIN)
prop.label = "Stomped CRC Errored Packets rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedRateMin", prop)
prop = PropMeta("str", "stompedRateSpct", "stompedRateSpct", 56410, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Stomped CRC Errored Packets rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedRateSpct", prop)
prop = PropMeta("str", "stompedRateThr", "stompedRateThr", 56411, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Stomped CRC Errored Packets rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("stompedRateThr", prop)
prop = PropMeta("str", "stompedRateTr", "stompedRateTr", 56412, PropCategory.IMPLICIT_TREND)
prop.label = "Stomped CRC Errored Packets rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedRateTr", prop)
prop = PropMeta("str", "stompedSpct", "stompedSpct", 56394, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Stomped CRC Errored Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedSpct", prop)
prop = PropMeta("str", "stompedThr", "stompedThr", 56395, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Stomped CRC Errored Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("stompedThr", prop)
prop = PropMeta("str", "stompedTr", "stompedTr", 56396, PropCategory.IMPLICIT_TREND)
prop.label = "Stomped CRC Errored Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("stompedTr", prop)
meta.namingProps.append(getattr(meta.props, "index"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("l1EthIfToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
4d6a69101128cf2a501beb4695b10e4e6725b142 | da130508b04587493424d6c95ab05a55dd70170f | /math/0x00-linear_algebra/2-size_me_please.py | e49b8b8ce7b567c07f70d7815294e0e636d80882 | [] | no_license | AndrewKalil/holbertonschool-machine_learning | ea38c0d1ef6ce2206da5f3903fcc22730404af9c | bb980395b146c9f4e0d4e9766c4a36f67de70d2e | refs/heads/master | 2023-07-09T04:09:24.271069 | 2021-08-11T02:29:54 | 2021-08-11T02:29:54 | 317,371,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | #!/usr/bin/env python3
""" 0x00. Linear Algebra """
def matrix_shape(matrix):
""" calculates shape of a matrix """
if matrix:
shape = [len(matrix)]
while type(matrix[0]) == list:
shape.append(len(matrix[0]))
matrix = matrix[0]
return shape
else:
return [0]
| [
"[email protected]"
] | |
d4088c4ab77b083f972d428a0ce87909942c2d89 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02399/s168551365.py | 01908c0ef89d62a27267c675c1736b6a0a60212f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | l=raw_input()
k=l.split()
a=0.0
b=0.0
a=int(k[0])
b=int(k[1])
#
print int((a-(a%b))/b),
print int(a%b),
a*=1.0
b*=1.0
if b==1:
print a*1.0
else:
if a/b>0.0000002:
print a/b
else: print "0.00000001.99999982" | [
"[email protected]"
] | |
ca4f5cab50d67b22d56721e02feed6e8733cc4e4 | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /gbd_2019/shared_code/cod_database/02. Map to GBD Cause list/mapping.py | f628105216254961b3a62a4acc97a8e2bfed2cf0 | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,422 | py | """Map data causes to GBD causes."""
import pandas as pd
import numpy as np
from pathlib2 import Path
from cod_prep.claude.cod_process import CodProcess
from cod_prep.downloaders import (
add_nid_metadata,
add_code_metadata,
add_cause_metadata,
get_garbage_from_package,
get_all_related_causes,
)
from cod_prep.claude.configurator import Configurator
from cod_prep.utils import report_if_merge_fail, print_log_message, distribute
class BridgeMapper(CodProcess):
"""Replace acauses with those in the bridge map.
Arguments:
source (str)
cause_set_version_id (int)
code_system (str)
Returns:
df, pandas DataFrame: only change is replacing some cause_ids
diag_df, pandas DataFrame: shows which cause_ids have been changed
"""
id_cols = ['nid', 'extract_type_id', 'location_id', 'year_id',
'age_group_id', 'sex_id', 'cause_id',
'site_id']
val_cols = ['deaths', 'deaths_rd', 'deaths_corr', 'deaths_raw']
# data type id for verbal autopsy
VA = 8
def __init__(self, source, cause_meta_df, code_system):
self.source = source
self.code_system = code_system
self.conf = Configurator("standard")
self.bridge_map_path = Path(self.conf.get_directory('bridge_maps'))
self.cause_meta_df = cause_meta_df
self.cache_options = {
'force_rerun': False,
'block_rerun': True,
'cache_results': False,
'cache_dir': 'standard'
}
def get_computed_dataframe(self, df):
"""Replace acauses with those in the bridge map."""
# VA sources are the only ones where this may not work
df = add_nid_metadata(df, ['data_type_id'], **self.cache_options)
has_verbal_autopsy = self.VA in df['data_type_id'].unique()
df.drop(columns='data_type_id', inplace=True)
if self.needs_bridging(has_verbal_autopsy):
file_name = self.get_file_name(has_verbal_autopsy)
map_df = pd.read_csv(self.bridge_map_path / file_name)
map_df = map_df[['acause', 'bridge_code']]
# add acause column to deaths data
bridge_mapped = add_cause_metadata(
df,
['acause'],
merge_col='cause_id',
cause_meta_df=self.cause_meta_df
)
# hack, this cause_id snuck in somehow...
bridge_mapped.loc[
bridge_mapped['cause_id'] == 606, 'acause'
] = 'gyne_femaleinfert'
report_if_merge_fail(bridge_mapped, 'acause', 'cause_id')
bridge_mapped.drop(['cause_id'], axis=1, inplace=True)
# perform zz bridge code redistribution before other bridge mapping
bridge_mapped = self.redistribute_zz_bridge_codes(bridge_mapped, map_df)
bridge_mapped = bridge_mapped.merge(
map_df, how='left', on='acause'
)
bridge_mapped = self.acause_to_bridge_code(bridge_mapped)
# bring cause_id back
bridge_mapped = add_cause_metadata(
bridge_mapped,
['cause_id'],
merge_col='acause',
cause_meta_df=self.cause_meta_df
)
# hack, this cause_id snuck in
bridge_mapped.loc[
bridge_mapped['acause'] == 'gyne_femaleinfert', 'cause_id'
] = 606
report_if_merge_fail(bridge_mapped, 'cause_id', 'acause')
# output diagnostic dataframe
self.diag_df = bridge_mapped
# drop unnecessary columns
bridge_mapped = self.clean_up(bridge_mapped)
return bridge_mapped
else:
self.diag_df = df
df = self.clean_up(df)
return df
def needs_bridging(self, has_verbal_autopsy):
"""
Check data type and code_system to see if the bridge map is needed.
"""
code_systems_to_bridge_map = [
"ICD9_detail", "ICD9_BTL", "ICD10_tabulated",
"ICD8_detail", "ICD8A",
"China_1991_2002", "India_SCD_states_rural", "India_MCCD_states_ICD10",
"India_MCCD_states_ICD9", "India_SRS_states_report",
"Russia_FMD_1989_1998", "ICD9_USSR_Tabulation", "INDEPTH_ICD10_VA",
"India_Maharashtra_SCD", "India_CRS", "PHL_VSR_1999_2005"
]
special_sources_to_bridge_map = [
"Russia_FMD_ICD9",
"India_SRS_states_report", "India_MCCD_Orissa_ICD10"
]
# not all VA sources use a bridge map... something to think about
# in the future, but not necessary right now
if has_verbal_autopsy | \
(self.code_system in code_systems_to_bridge_map) | \
(self.source in special_sources_to_bridge_map):
# we need to use the bridge map!
return True
else:
# we do not need to use the bridge map
return False
def get_file_name(self, has_verbal_autopsy):
"""Determine the file name needed based on the source or code system.
Note: The default file name will be the name of the code system,
with some exceptions. For some sources we have specified specific
files to bridge map with, all other sources will use the file
that matches its code_system.
"""
source_to_sheet = {
"India_MCCD_Orissa_ICD10": "India_MCCD_states_ICD10",
"India_MCCD_Delhi_ICD10": "India_MCCD_states_ICD10",
"Thailand_Public_Health_Statistics": "ICD10_tabulated",
"India_SRS_states_report": "India_SRS_states_report",
"UKR_databank_ICD10_tab": "ICD10_tabulated",
"Russia_FMD_ICD9": "Russia_FMD_1989_1998",
}
if has_verbal_autopsy and (self.source != 'India_SRS_states_report'):
file_name = 'INDEPTH_ICD10_VA'
else:
file_name = source_to_sheet.get(self.source, self.code_system)
return file_name + '.csv'
def redistribute_zz_bridge_codes(self, df, map_df):
"""
A mini-redistribution, but only redistributes causes bridge mapped to zz codes
"""
grouping_cols = list(set(self.id_cols) - {'cause_id'})
start_deaths = {col: df.groupby(grouping_cols)[col].sum() for col in self.val_cols}
zz_code_idxs = map_df['bridge_code'].str.startswith('ZZ-')
# get the order to do the zz code redistribution in:
# start on lowest level of hierarchy and work our way up
zz_code_targets = (map_df
.loc[zz_code_idxs, ['bridge_code']]
.drop_duplicates()
.assign(acause=lambda d: d['bridge_code'].str.replace('ZZ-', '_'))
.merge(self.cause_meta_df, on='acause')
.sort_values(['level', 'acause'], ascending=False)
.loc[:, 'bridge_code']
.tolist()
)
# don't distribute onto anything that maps to a zz code
all_causes_to_zz_codes = set(map_df.loc[zz_code_idxs, 'acause'])
for zz_code in zz_code_targets:
child_cause_ids = get_all_related_causes(zz_code.strip().replace('ZZ-', '_'),
self.cause_meta_df)
child_causes = self.cause_meta_df.loc[
self.cause_meta_df['cause_id'].isin(child_cause_ids),
'acause'].tolist()
acauses_to_redistribute = map_df.loc[map_df['bridge_code'] == zz_code, 'acause']
to_redistribute = df['acause'].isin(acauses_to_redistribute)
valid_child_causes = set(child_causes) - all_causes_to_zz_codes
print_log_message('Found ZZ code: {}, deaths: {}'
.format(zz_code, df.loc[to_redistribute, 'deaths'].sum()))
# distribute onto at least all combinations of these
# this is to ensure everything in df[to_redistribute]
# get weights
values_to_include = {
'acause': valid_child_causes,
}
for col in grouping_cols:
values_to_include[col] = df.loc[to_redistribute, col].unique()
distributed = distribute(df[to_redistribute],
based_on=df[df['acause'].isin(valid_child_causes)],
distribute_over='acause',
within=grouping_cols,
value_col='deaths',
values_to_include=values_to_include,
base_value=0.001, # this is mostly arbitrary
)
report_if_merge_fail(distributed, check_col='acause', merge_cols=grouping_cols)
# what follows is an unfortunate side effect of having multiple value columns
# in the data -- it makes the merging somewhat more involved than simply
# appending distributed data to existing data
# TODO: refactor this into a generic method in redistribution_utils
df = df.merge(distributed[grouping_cols + ['acause', 'deaths']],
how='outer',
on=grouping_cols + ['acause'],
suffixes=('', '_new'),
)
# default to 0 deaths in all values where new variables / IDs (i.e. new causes)
# are in the distributed data (right only)
# and where distributed does not have data (i.e. other causes in original
# data that weren't distributed onto) (left only)
df[self.val_cols + ['deaths_new']] = df[self.val_cols + ['deaths_new']].fillna(0)
# Set values that were distributed away from their cause to 0.
# This has the effect of moving deaths away from one cause to another.
df.loc[df['acause'].isin(acauses_to_redistribute), 'deaths'] = 0
# now add distributed data to old
df['deaths'] += df['deaths_new']
df.drop(columns='deaths_new', inplace=True)
# make sure deaths didn't move out of a nid-etid-site-location-year-sex-age group
for col in self.val_cols:
end_deaths = df.groupby(grouping_cols)[col].sum()
assert np.allclose(start_deaths[col], end_deaths), \
"Dropped/added deaths during ZZ code redistribution: " + \
"start {}: {}, end {}: {}".format(col, start_deaths[col], col, end_deaths)
return df
def acause_to_bridge_code(self, df):
"""Replace the acause with the bridge code."""
# there might still be zz codes in the data because we aren't
# performing zz code redistribution on the other value columns,
# so if something is coded to i.e. _neo in the raw data, then
# we keep it as _neo.
df['acause'].update(df['bridge_code'].str.replace('ZZ-', '_'))
return df
def get_diagnostic_dataframe(self):
"""Return a diagnostic dataframe.
Diagnostic dataframe shows all changes made due to bridge mapping.
Maybe change this later to there is some sort of output.
"""
if self.diag_df is None:
print("No run of get computed dataframe yet")
else:
return self.diag_df
def clean_up(self, df):
"""Group rogue duplicates."""
df = df.groupby(self.id_cols, as_index=False)[self.val_cols].sum()
return df
class GBDCauseMapper(CodProcess):
"""Convert cause codes into cause_ids.
Arguments:
id_cols (list):
data_col (list):
unique_cols (list):
Returns:
df, a pandas DataFrame with addition of cause_id
diag_df, a pandas DataFrame: assesses the difference
between different mapping versions
"""
id_cols = ['nid', 'extract_type_id', 'location_id', 'year_id',
'age_group_id', 'sex_id', 'cause_id', 'code_id',
'site_id']
data_col = ['deaths']
unique_cols = ['nid', 'extract_type_id', 'location_id', 'year_id',
'age_group_id', 'sex_id',
'cause_id', 'code_id', 'site_id']
# These are acauses 'sub_total', and '_sb'
unnecessary_causes = [920, 744]
cache_dir = str()
def __init__(self, cause_set_version_id, code_map):
self.cg = Configurator("standard")
self.cache_dir = self.cg.get_directory('db_cache')
self.cause_set_version_id = cause_set_version_id
self.code_map = code_map
def get_computed_dataframe(self, df, code_system_id):
# make special cause adjustments
df = self.special_cause_reassignment(df, code_system_id)
"""Map code id to cause id."""
print_log_message("Merging with cause map")
# get code metadata from a file already cached
df = add_code_metadata(
df, ['cause_id'], code_system_id,
code_map=self.code_map
)
report_if_merge_fail(df, 'cause_id', 'code_id')
# Make sure the mappings are good!
print("Asserting it's all good")
self.assert_valid_mappings(df, code_system_id)
df = self.drop_unnecessary_causes(df, self.unnecessary_causes)
print("Collapsing")
df = self.collapse_and_sum_by_deaths(df)
return df
def drop_unnecessary_causes(self, df, unnecessary_causes):
# Drops causes set as unnecessary, subtotal and stillbirth
df = df.copy()
df = df[~df['cause_id'].isin(unnecessary_causes)]
return df
def special_cause_reassignment(self, df, code_system_id):
"""Replace the actual data cause under certain conditions.
There are instances where a PI has good reason to
believe that a certain group of deaths were assigned
to the wrong cause, and it is known what cause to re-assign
those deaths to. Implement here.
This essentially allows mapping based on not just the cause
and code system but based on other information like
the location, NID, year, etc.
It can also be used (sparingly) for hotfixes like
changing all codes with values 'acause_digest_gastrititis'
to be named 'acause_digest_gastritis'.
Args:
df (DataFrame): data with cause
Returns:
DataFrame: with any modifications
"""
cache_args = {
'force_rerun': False,
'block_rerun': True,
'cache_dir': 'standard',
'cache_results': False
}
# Some SRS codes get redistributed differently than
# other ICD10 datasets
df = add_nid_metadata(
df, 'source', **cache_args
)
if (df['source'] == "India_SRS_states_report").any():
print_log_message("Changing SRS codes to custom garbage groups")
assert (df['source'] == "India_SRS_states_report").all()
df = add_code_metadata(
df, 'value', code_system_id=code_system_id,
**cache_args
)
custom_grbg = pd.read_csv(
self.cg.get_resource("srs_custom_garbage_groups")
)
custom_grbg = custom_grbg.query('active == 1')
custom_grbg['value'] = custom_grbg['srs_custom_garbage_group']
custom_grbg = add_code_metadata(
custom_grbg, 'code_id', code_system_id=code_system_id,
merge_col='value', **cache_args
)
custom_grbg = custom_grbg.rename(
columns={'code_id': 'new_code_id'})
custom_grbg = custom_grbg[['package_id', 'new_code_id']]
gp_dfs = []
for package_id in custom_grbg.package_id.unique():
# THIS QUERIES THE DATABASE - BUT THERE SHOULD NEVER BE A TON
# OF SRS JOBS HAPPENING AT ONCE SO IT SHOULD BE OK
gp_df = get_garbage_from_package(
code_system_id, package_id, package_arg_type="package_id"
)
assert len(gp_df) != 0, \
"Found 0 codes for package {}".format(package_id)
gp_dfs.append(gp_df)
gp_df = pd.concat(gp_dfs, ignore_index=True)
gp_df = gp_df.merge(custom_grbg, how='left')
report_if_merge_fail(gp_df, 'new_code_id', 'package_id')
gp_df = gp_df[['value', 'new_code_id']]
gp_df['value'] = gp_df['value'].str.strip()
df = df.merge(gp_df, how='left', on='value')
df.loc[df['new_code_id'].notnull(), 'code_id'] = df['new_code_id']
df['code_id'] = df['code_id'].astype(int)
df = df.drop(['new_code_id', 'value'], axis=1)
df = df.drop('source', axis=1)
china_cdc_2008 = (df['nid'] == 270005) & (df['extract_type_id'] == 2)
# J96.00 - move five to four digit J96.0 (this should be a rule in formatting, only keep 4 digit detail)
five_dig_code = df['code_id'] == 13243
df.loc[
china_cdc_2008 & five_dig_code,
'code_id'
] = 13242
return df
def collapse_and_sum_by_deaths(self, df):
"""Group by final columns, summing across deaths.
Directly modifies the dataframe, keeping only the columns needed
to move on to the next Claude step. Also includes an assertion
that there are no duplicates.
"""
df = df.groupby(self.id_cols, as_index=False)[self.data_col].sum()
self.assert_unique_cols_unique(df)
return df
def assert_valid_mappings(self, df, code_system_id):
"""Test that the mapping worked.
Runs a suite of assertions to make sure that mapping was successful.
Args:
df (DataFrame): with at least code_id and cause_id
Returns:
None
Raises:
AssertionError: Any condition fails
"""
# add code value from cached code map
print("Adding value")
df = add_code_metadata(
df, ['value'], code_system_id,
force_rerun=False,
block_rerun=True,
cache_dir=self.cache_dir
)
report_if_merge_fail(df, 'value', 'code_id')
# get acause from cached cause hierarchy
print("Adding acause")
df = add_cause_metadata(
df, ['acause'],
cause_set_version_id=self.cause_set_version_id,
force_rerun=False,
block_rerun=True,
cache_dir=self.cache_dir
)
report_if_merge_fail(df, 'acause', 'cause_id')
# Test that all causes starting with 'acause_' are mapped correctly.
# acause_cvd, for example, should be mapped to 'cvd' (not 'cvd_ihd').
# 'acause__gc_X59' should be mapped to '_gc', etc.
print("Checking implied acauses")
check_df = df.loc[df['value'].str.startswith('acause_')]
check_df['implied_acause'] = \
check_df['value'].str.replace('acause_', '', 1)
check_df.loc[
check_df['value'].str.contains("acause__gc"),
'implied_acause'
] = "_gc"
bad_df = check_df.loc[
check_df['acause'] != check_df['implied_acause']
]
if len(bad_df) > 0:
bad_stuff = bad_df[['value', 'acause']].drop_duplicates()
raise AssertionError(
"These code values do not match their acause: "
"\n{}".format(bad_stuff)
)
print("Checking for bad values")
# assert incorrect acauses are gone
bad_acauses = ['acause_digest_gastrititis',
'acause_hiv_tb',
'acause_tb_drug']
bad_df = df.loc[df['value'].isin(bad_acauses)].value.unique()
if len(bad_df) > 0:
raise AssertionError(
"Found these bad code values in the data: {}".format(bad_stuff)
)
def assert_unique_cols_unique(self, df):
"""Test that columns that should uniquely identify the dataframe do."""
assert not df.duplicated(self.unique_cols).any() | [
"[email protected]"
] | |
afb7b1c1f7b6530a07d7f811577e8294e4a176a6 | 9dba8607dce414f9905700d7a4ac44668de5e1f1 | /Brelaz/rough_calculations/dir1_reinforcement.py | 3f25e1b2285ceab6ba6be10343ff326ed6f62365 | [] | no_license | anaiortega/XCmodels | c0463ffe38531578aee281456e88528882255cd7 | e9b8c2f996a21b8aa3314242f3cc12b0e391b5df | refs/heads/master | 2023-08-16T22:44:01.168775 | 2023-08-14T18:15:10 | 2023-08-14T18:15:10 | 141,140,177 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,191 | py | # -*- coding: utf-8 -*-
from rough_calculations import ng_cantilever
from rough_calculations import ng_simple_bending_reinforcement
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
from materials.sia262 import SIA262_limit_state_checking
beam= ng_cantilever.Cantilever()
beam.l= 2.45+0.5
#Loads
Qa= -2*135e3/2.5 #N/m
qa= -0.25*2500*9.81-2.35e3-8.1e3 # N/m2
Qd= 1.5*Qa # N
qd= 1.35*qa # N/m
Md= beam.getBendingMomentUnderUniformLoad(qd,0.0)+beam.getBendingMomentUnderConcentratedLoad(Qd,1.0-0.2,0.0)
Ma= beam.getBendingMomentUnderUniformLoad(qa,0.0)+beam.getBendingMomentUnderConcentratedLoad(Qa,2.45-0.2,0.0)
MdMax= min(Md,Ma)
print 'Md= ', Md/1e3, ' kN m/m Ma= ', Ma/1e3, 'kN m/m MdMax= ', MdMax/1e3, ' kN m/m'
Vd= beam.getShearUnderUniformLoad(qd,0.25)+beam.getShearUnderConcentratedLoad(Qd,1.0-0.2,0.25)
Va= beam.getShearUnderUniformLoad(qa,0.25)+beam.getShearUnderConcentratedLoad(Qa,2.45-0.2,0.25)
MVRd= beam.getBendingMomentUnderUniformLoad(qd,0.25)+beam.getBendingMomentUnderConcentratedLoad(Qd,1.0-0.2,0.25)
VdMax= max(Vd,Va)
print 'Vd= ', Vd/1e3, ' kN/m MVRd= ', MVRd/1e3, ' kN m/m Va= ', Va/1e3, 'kN/m VdMax= ', VdMax/1e3, ' kN/m'
#Reinforcement
from materials.sia262 import SIA262_materials
concrete= SIA262_materials.c50_60
reinfSteel= SIA262_materials.B500A
d= 0.25-0.035-20e-3/2.0
As= ng_simple_bending_reinforcement.AsSimpleBending(-MdMax,-concrete.fcd(),reinfSteel.fyd(),1.0,d)
print 'As= ', As*1e6, ' mm2'
VRd= SIA262_limit_state_checking.VuNoShearRebars(concrete,reinfSteel,0.0,-MVRd,As,2.5/2.0,d)
print 'VRd= ', VRd/1e3, ' kN VdMax= ', VdMax/1e3, ' kN'
#Reinforcement 2
Md2= beam.getBendingMomentUnderUniformLoad(qd,0.0)+beam.getBendingMomentUnderConcentratedLoad(Qd,1.0-0.2,1.2)
Ma2= beam.getBendingMomentUnderUniformLoad(qa,0.0)+beam.getBendingMomentUnderConcentratedLoad(Qa,2.45-0.2,1.2)
MdMax2= min(Md2,Ma2)
print 'Md2= ', Md2/1e3, ' kN m/m Ma2= ', Ma2/1e3, 'kN m/m MdMax2= ', MdMax2/1e3, ' kN m/m'
As2= ng_simple_bending_reinforcement.AsSimpleBending(-MdMax2,-concrete.fcd(),reinfSteel.fyd(),1.0,d)
print 'As2= ', As2*1e6, ' mm2'
#Fatigue
Mf= beam.getBendingMomentUnderConcentratedLoad(Qa,0.5,0.0)
print 'Mf= ', Mf/1e3, ' kN m/m'
| [
"[email protected]"
] | |
43a3de2ecfead616819fe5e028bf38e44a50baa1 | 1b78ca7f3250ebed418717c6ea28b5a77367f1b8 | /318.maximum-product-of-word-lengths/maximum-product-of-word-lengths.py | 4bb2a28a8a6aa610056989f8cf03d7720cd87fd1 | [] | no_license | JaniceLC/lc-all-solutions | ced854f31b94f44c0b03a0677988805e3b9ee718 | 3f2a4ee8c09a8890423c6a22c73f470eccf979a2 | refs/heads/master | 2020-04-05T19:53:31.307528 | 2018-11-12T04:18:45 | 2018-11-12T04:18:45 | 157,155,285 | 0 | 2 | null | 2018-11-12T04:13:22 | 2018-11-12T04:13:22 | null | UTF-8 | Python | false | false | 619 | py | class Solution(object):
def maxProduct(self, words):
"""
:type words: List[str]
:rtype: int
"""
bitmap = [0] * len(words)
mask = 0x01
ans = 0
for i in xrange(0, len(words)):
word = words[i]
for c in word:
bitmap[i] |= (mask << (ord(c) - ord('a')))
for i in xrange(0, len(words)):
for j in xrange(0, i):
if bitmap[i] & bitmap[j] == 0:
ans = max(ans, len(words[i]) * len(words[j]))
return ans
| [
"[email protected]"
] | |
45ce1099bc5ffdfa946930b8766b76fde1714949 | 3d0ae7c8693463faa11bacad8e6ea9d0d70b9eb1 | /pytools/utilities/python/readonly_shelf.py | 6e1644dd96a518813865861187fe91557dd97f85 | [] | no_license | stefie10/slu_hri | a76f79094bd1740676fec5d889411ba3b1d9dc26 | 50753379953e1ff822162eeab094cffe4a30f3e1 | refs/heads/master | 2022-12-14T01:07:51.522258 | 2020-08-31T00:50:12 | 2020-08-31T00:50:12 | 291,386,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import shelve
def open(filename, writeback=True):
return DbfilenameReadonlyShelf(filename, writeback=writeback)
class DbfilenameReadonlyShelf(shelve.Shelf):
"""
Shelf implementation using the "anydbm" generic dbm interface,
read only. Gets rid of annoying error message on shutdown when it
tries to write back.
"""
def __init__(self, filename, writeback):
import anydbm
shelve.Shelf.__init__(self, anydbm.open(filename, flag='r'), protocol=2, writeback=writeback)
def __del__(self):
self.dict.close()
| [
"[email protected]"
] | |
9a2e5f2affbe659988b2dcc4868f355aca84fe6e | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R1/benchmark/startQiskit_noisy97.py | ca96178cd0302f984e45a6a0b7044d5df04c19b2 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,183 | py | # qubit number=3
# total number=16
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy97.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"[email protected]"
] | |
3f9b93cdc55810f2510840c677c58fcc3a808d26 | cd3fe9f814bbaab6a14749034e2c34963c9284c9 | /volttron/platform/vip/agent/subsystems/pubsub.py | 58fb2b778c266ffbca11d395a12333b8376a0f6f | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | SenHuang19/EnergyPlus-Volttron-Toolkit | 3b9d701238261e075936632b48cd661d42d0a85e | f89e68dc143e9ac01c2b07e975d21d64716bf876 | refs/heads/master | 2020-03-26T10:07:37.638443 | 2018-06-22T23:53:44 | 2018-06-22T23:53:44 | 144,782,102 | 0 | 0 | null | 2018-08-14T23:28:10 | 2018-08-14T23:28:10 | null | UTF-8 | Python | false | false | 37,846 | py | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2017, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
from __future__ import absolute_import
from base64 import b64encode, b64decode
import inspect
import logging
import random
import re
import weakref
import gevent
from zmq import green as zmq
from zmq import SNDMORE
from volttron.platform.agent import json as jsonapi
from .base import SubsystemBase
from ..decorators import annotate, annotations, dualmethod, spawn
from ..errors import Unreachable, VIPError, UnknownSubsystem
from .... import jsonrpc
from volttron.platform.agent import utils
from ..results import ResultsDictionary
from gevent.queue import Queue, Empty
from collections import defaultdict
from datetime import timedelta
__all__ = ['PubSub']
min_compatible_version = '3.0'
max_compatible_version = ''
#utils.setup_logging()
_log = logging.getLogger(__name__)
def encode_peer(peer):
if peer.startswith('\x00'):
return peer[:1] + b64encode(peer[1:])
return peer
def decode_peer(peer):
if peer.startswith('\x00'):
return peer[:1] + b64decode(peer[1:])
return peer
class PubSub(SubsystemBase):
def __init__(self, core, rpc_subsys, peerlist_subsys, owner):
self.core = weakref.ref(core)
self.rpc = weakref.ref(rpc_subsys)
self.peerlist = weakref.ref(peerlist_subsys)
self._owner = owner
self._pubsubwithrpc = PubSubWithRPC(self.core, self.rpc)
self._send_via_rpc = False
self._parameters_needed = True
def platform_subscriptions():
return defaultdict(subscriptions)
def subscriptions():
return defaultdict(set)
self._my_subscriptions = defaultdict(platform_subscriptions)
self.protected_topics = ProtectedPubSubTopics()
core.register('pubsub', self._handle_subsystem, self._handle_error)
self.rpc().export(self._peer_push, 'pubsub.push')
self.vip_socket = None
self._results = ResultsDictionary()
self._event_queue = Queue()
self._retry_period = 300.0
self._processgreenlet = None
def setup(sender, **kwargs):
# pylint: disable=unused-argument
self._processgreenlet = gevent.spawn(self._process_loop)
core.onconnected.connect(self._connected)
self.vip_socket = self.core().socket
def subscribe(member): # pylint: disable=redefined-outer-name
for peer, bus, prefix, all_platforms in annotations(
member, set, 'pubsub.subscriptions'):
# XXX: needs updated in light of onconnected signal
self._add_subscription(prefix, member, bus, all_platforms)
#_log.debug("SYNC: all_platforms {}".format(self._my_subscriptions['internal'][bus][prefix]))
inspect.getmembers(owner, subscribe)
core.onsetup.connect(setup, self)
def _connected(self, sender, **kwargs):
"""
Synchronize local subscriptions with PubSubService upon receiving connected signal.
param sender: identity of sender
type sender: str
param kwargs: optional arguments
type kwargs: pointer to arguments
"""
self.synchronize()
def _process_callback(self, sender, bus, topic, headers, message):
"""Handle incoming subscription pushes from PubSubService. It iterates over all subscriptions to find the
subscription matching the topic and bus. It then calls the corresponding callback on finding a match.
param sender: identity of the publisher
type sender: str
param bus: bus
type bus: str
param topic: publishing topic
type topic: str
param headers: header information for the incoming message
type headers: dict
param message: actual message
type message: dict
"""
peer = 'pubsub'
handled = 0
for platform in self._my_subscriptions:
#_log.debug("SYNC: process callback subscriptions: {}".format(self._my_subscriptions[platform][bus]))
buses = self._my_subscriptions[platform]
if bus in buses:
subscriptions = buses[bus]
for prefix, callbacks in subscriptions.iteritems():
if topic.startswith(prefix):
handled += 1
for callback in callbacks:
callback(peer, sender, bus, topic, headers, message)
if not handled:
# No callbacks for topic; synchronize with sender
self.synchronize()
def _viperror(self, sender, error, **kwargs):
if isinstance(error, Unreachable):
self._peer_drop(self, error.peer)
def _peer_add(self, sender, peer, **kwargs):
# Delay sync by some random amount to prevent reply storm.
delay = random.random()
self.core().spawn_later(delay, self.synchronize, peer)
def _peer_drop(self, sender, peer, **kwargs):
self._sync(peer, {})
def _sync(self, peer, items):
items = {(bus, prefix) for bus, topics in items.iteritems()
for prefix in topics}
remove = []
for bus, subscriptions in self._peer_subscriptions.iteritems():
for prefix, subscribers in subscriptions.iteritems():
item = bus, prefix
try:
items.remove(item)
except KeyError:
subscribers.discard(peer)
if not subscribers:
remove.append(item)
else:
subscribers.add(peer)
for bus, prefix in remove:
subscriptions = self._peer_subscriptions[bus]
assert not subscriptions.pop(prefix)
for bus, prefix in items:
self._add_peer_subscription(peer, bus, prefix)
def _peer_sync(self, items):
peer = bytes(self.rpc().context.vip_message.peer)
assert isinstance(items, dict)
self._sync(peer, items)
def _add_peer_subscription(self, peer, bus, prefix):
try:
subscriptions = self._peer_subscriptions[bus]
except KeyError:
self._peer_subscriptions[bus] = subscriptions = dict()
try:
subscribers = subscriptions[prefix]
except KeyError:
subscriptions[prefix] = subscribers = set()
subscribers.add(peer)
def _peer_subscribe(self, prefix, bus=''):
peer = bytes(self.rpc().context.vip_message.peer)
for prefix in prefix if isinstance(prefix, list) else [prefix]:
self._add_peer_subscription(peer, bus, prefix)
def _peer_unsubscribe(self, prefix, bus=''):
peer = bytes(self.rpc().context.vip_message.peer)
try:
subscriptions = self._peer_subscriptions[bus]
except KeyError:
return
if prefix is None:
remove = []
for topic, subscribers in subscriptions.iteritems():
subscribers.discard(peer)
if not subscribers:
remove.append(topic)
for topic in remove:
del subscriptions[topic]
else:
for prefix in prefix if isinstance(prefix, list) else [prefix]:
subscribers = subscriptions[prefix]
subscribers.discard(peer)
if not subscribers:
del subscriptions[prefix]
def _peer_list(self, prefix='', bus='', subscribed=True, reverse=False):
peer = bytes(self.rpc().context.vip_message.peer)
if bus is None:
buses = self._peer_subscriptions.iteritems()
else:
buses = [(bus, self._peer_subscriptions[bus])]
if reverse:
test = prefix.startswith
else:
test = lambda t: t.startswith(prefix)
results = []
for bus, subscriptions in buses:
for topic, subscribers in subscriptions.iteritems():
if test(topic):
member = peer in subscribers
if not subscribed or member:
results.append((bus, topic, member))
return results
def _peer_publish(self, topic, headers, message=None, bus=''):
peer = bytes(self.rpc().context.vip_message.peer)
self._distribute(peer, topic, headers, message, bus)
def _distribute(self, peer, topic, headers, message=None, bus=''):
self._check_if_protected_topic(topic)
try:
subscriptions = self._peer_subscriptions[bus]
except KeyError:
subscriptions = dict()
subscribers = set()
for prefix, subscription in subscriptions.iteritems():
if subscription and topic.startswith(prefix):
subscribers |= subscription
if subscribers:
sender = encode_peer(peer)
json_msg = jsonapi.dumps(jsonrpc.json_method(
None, 'pubsub.push',
[sender, bus, topic, headers, message], None))
frames = [zmq.Frame(b''), zmq.Frame(b''),
zmq.Frame(b'RPC'), zmq.Frame(json_msg)]
socket = self.core().socket
for subscriber in subscribers:
socket.send(subscriber, flags=SNDMORE)
socket.send_multipart(frames, copy=False)
return len(subscribers)
def _peer_push(self, sender, bus, topic, headers, message):
'''Handle incoming subscription pushes from peers.'''
peer = bytes(self.rpc().context.vip_message.peer)
handled = 0
sender = decode_peer(sender)
self._process_callback(sender, bus, topic, headers, message)
def synchronize(self):
"""Synchronize local subscriptions with the PubSubService.
"""
result = next(self._results)
items = [{platform: {bus: subscriptions.keys()} for platform, bus_subscriptions in self._my_subscriptions.items()
for bus, subscriptions in bus_subscriptions.items()}]
for subscriptions in items:
sync_msg = jsonapi.dumps(
dict(subscriptions=subscriptions)
)
frames = [b'synchronize', b'connected', sync_msg]
# For backward compatibility with old pubsub
if self._send_via_rpc:
delay = random.random()
self.core().spawn_later(delay, self.rpc().notify, 'pubsub', 'pubsub.sync', subscriptions)
else:
# Parameters are stored initially, in case remote agent/platform is using old pubsub
if self._parameters_needed:
kwargs = dict(op='synchronize', subscriptions=subscriptions)
self._save_parameters(result.ident, **kwargs)
self.vip_socket.send_vip(b'', 'pubsub', frames, result.ident, copy=False)
def list(self, peer, prefix='', bus='', subscribed=True, reverse=False, all_platforms=False):
"""Gets list of subscriptions matching the prefix and bus for the specified peer.
param peer: peer
type peer: str
param prefix: prefix of a topic
type prefix: str
param bus: bus
type bus: bus
param subscribed: subscribed or not
type subscribed: boolean
param reverse: reverse
type reverse:
:returns: List of subscriptions, i.e, list of tuples of bus, topic and flag to indicate if peer is a
subscriber or not
:rtype: list of tuples
:Return Values:
List of tuples [(topic, bus, flag to indicate if peer is a subscriber or not)]
"""
# For backward compatibility with old pubsub
if self._send_via_rpc:
return self.rpc().call(peer, 'pubsub.list', prefix,
bus, subscribed, reverse)
else:
result = next(self._results)
# Parameters are stored initially, in case remote agent/platform is using old pubsub
if self._parameters_needed:
kwargs = dict(op='list', prefix=prefix, subscribed=subscribed, reverse=reverse, bus=bus)
self._save_parameters(result.ident, **kwargs)
list_msg = jsonapi.dumps(dict(prefix=prefix, all_platforms=all_platforms,
subscribed=subscribed, reverse=reverse, bus=bus))
frames = [b'list', list_msg]
self.vip_socket.send_vip(b'', 'pubsub', frames, result.ident, copy=False)
return result
def _add_subscription(self, prefix, callback, bus='', all_platforms=False):
if not callable(callback):
raise ValueError('callback %r is not callable' % (callback,))
try:
if not all_platforms:
self._my_subscriptions['internal'][bus][prefix].add(callback)
else:
self._my_subscriptions['all'][bus][prefix].add(callback)
#_log.debug("SYNC: add subscriptions: {}".format(self._my_subscriptions['internal'][bus][prefix]))
except KeyError:
_log.error("PUBSUB something went wrong in add subscriptions")
@dualmethod
@spawn
def subscribe(self, peer, prefix, callback, bus='', all_platforms=False):
"""Subscribe to topic and register callback.
Subscribes to topics beginning with prefix. If callback is
supplied, it should be a function taking four arguments,
callback(peer, sender, bus, topic, headers, message), where peer
is the ZMQ identity of the bus owner sender is identity of the
publishing peer, topic is the full message topic, headers is a
case-insensitive dictionary (mapping) of message headers, and
message is a possibly empty list of message parts.
:param peer
:type peer
:param prefix prefix to the topic
:type prefix str
:param callback callback method
:type callback method
:param bus bus
:type bus str
:param platforms
:type platforms
:returns: Subscribe is successful or not
:rtype: boolean
:Return Values:
Success or Failure
"""
# For backward compatibility with old pubsub
if self._send_via_rpc == True:
self._add_subscription(prefix, callback, bus)
return self.rpc().call(peer, 'pubsub.subscribe', prefix, bus=bus)
else:
result = self._results.next()
# Parameters are stored initially, in case remote agent/platform is using old pubsub
if self._parameters_needed:
kwargs = dict(op='subscribe', prefix=prefix, bus=bus)
self._save_parameters(result.ident, **kwargs)
self._add_subscription(prefix, callback, bus, all_platforms)
sub_msg = jsonapi.dumps(
dict(prefix=prefix, bus=bus, all_platforms=all_platforms)
)
frames = [b'subscribe', sub_msg]
self.vip_socket.send_vip(b'', 'pubsub', frames, result.ident, copy=False)
return result
@subscribe.classmethod
def subscribe(cls, peer, prefix, bus='', all_platforms=False):
def decorate(method):
annotate(method, set, 'pubsub.subscriptions', (peer, bus, prefix, all_platforms))
return method
return decorate
def _peer_push(self, sender, bus, topic, headers, message):
"""
Added for backward compatibility with old pubsub
param sender: publisher
type sender: str
param bus: bus
type callback: str
param topic: topic for the message
type topic: str
param headers: header for the message
type headers: dict
param message: actual message
type message: dict
"""
peer = bytes(self.rpc().context.vip_message.peer)
handled = 0
sender = decode_peer(sender)
self._process_callback(sender, bus, topic, headers, message)
def _drop_subscription(self, prefix, callback, bus='', platform='internal'):
"""
Drop the subscription for the specified prefix, callback and bus.
param prefix: prefix to be removed
type prefix: str
param callback: callback method
type callback: method
param bus: bus
type bus: bus
return: list of topics/prefixes
:rtype: list
:Return Values:
List of prefixes
"""
topics = []
bus_subscriptions = dict()
subscriptions = dict()
if prefix is None:
if callback is None:
if platform in self._my_subscriptions:
bus_subscriptions = self._my_subscriptions[platform]
if bus in bus_subscriptions:
subscriptions = bus_subscriptions.pop(bus)
topics = subscriptions.keys()
else:
if platform in self._my_subscriptions:
bus_subscriptions = self._my_subscriptions[platform]
if bus in bus_subscriptions:
subscriptions = bus_subscriptions[bus]
remove = []
for topic, callbacks in subscriptions.iteritems():
try:
callbacks.remove(callback)
except KeyError:
pass
else:
topics.append(topic)
if not callbacks:
remove.append(topic)
for topic in remove:
del subscriptions[topic]
if not subscriptions:
del bus_subscriptions[bus]
if not bus_subscriptions:
del self._my_subscriptions[platform]
if not topics:
raise KeyError('no such subscription')
else:
_log.debug("PUSUB unsubscribe my subscriptions: {0} {1}".format(prefix, self._my_subscriptions))
if platform in self._my_subscriptions:
bus_subscriptions = self._my_subscriptions[platform]
if bus in bus_subscriptions:
subscriptions = bus_subscriptions[bus]
if callback is None:
try:
del subscriptions[prefix]
except KeyError:
return []
else:
try:
callbacks = subscriptions[prefix]
except KeyError:
return []
try:
callbacks.remove(callback)
except KeyError:
pass
if not callbacks:
try:
del subscriptions[prefix]
except KeyError:
return []
topics = [prefix]
if not subscriptions:
del bus_subscriptions[bus]
if not bus_subscriptions:
del self._my_subscriptions[platform]
return topics
def unsubscribe(self, peer, prefix, callback, bus='', all_platforms=False):
"""Unsubscribe and remove callback(s).
Remove all handlers matching the given info - peer, callback and bus, which was used earlier to subscribe as
well. If all handlers for a topic prefix are removed, the topic is also unsubscribed.
param peer: peer
type peer: str
param prefix: prefix that needs to be unsubscribed
type prefix: str
param callback: callback method
type callback: method
param bus: bus
type bus: bus
return: success or not
:rtype: boolean
:Return Values:
success or not
"""
# For backward compatibility with old pubsub
if self._send_via_rpc == True:
topics = self._drop_subscription(prefix, callback, bus)
return self.rpc().call(peer, 'pubsub.unsubscribe', topics, bus=bus)
else:
subscriptions = dict()
result = next(self._results)
if not all_platforms:
platform = 'internal'
topics = self._drop_subscription(prefix, callback, bus, platform)
subscriptions[platform] = dict(prefix=topics, bus=bus)
else:
platform = 'all'
topics = self._drop_subscription(prefix, callback, bus, platform)
subscriptions[platform] = dict(prefix=topics, bus=bus)
# Parameters are stored initially, in case remote agent/platform is using old pubsub
if self._parameters_needed:
kwargs = dict(op='unsubscribe', prefix=topics, bus=bus)
self._save_parameters(result.ident, **kwargs)
unsub_msg = jsonapi.dumps(subscriptions)
topics = self._drop_subscription(prefix, callback, bus)
frames = [b'unsubscribe', unsub_msg]
self.vip_socket.send_vip(b'', 'pubsub', frames, result.ident, copy=False)
return result
def publish(self, peer, topic, headers=None, message=None, bus=''):
"""Publish a message to a given topic via a peer.
Publish headers and message to all subscribers of topic on bus.
If peer is None, use self. Adds volttron platform version
compatibility information to header as variables
min_compatible_version and max_compatible version
param peer: peer
type peer: str
param topic: topic for the publish message
type topic: str
param headers: header info for the message
type headers: None or dict
param message: actual message
type message: None or any
param bus: bus
type bus: str
return: Number of subscribers the message was sent to.
:rtype: int
:Return Values:
Number of subscribers
"""
if headers is None:
headers = {}
headers['min_compatible_version'] = min_compatible_version
headers['max_compatible_version'] = max_compatible_version
if peer is None:
peer = 'pubsub'
# For backward compatibility with old pubsub
if self._send_via_rpc:
return self.rpc().call(
peer, 'pubsub.publish', topic=topic, headers=headers,
message=message, bus=bus)
else:
result = next(self._results)
# Parameters are stored initially, in case remote agent/platform is using old pubsub
if self._parameters_needed:
kwargs = dict(op='publish', peer=peer,
topic=topic, bus=bus,
headers=headers, message=message)
self._save_parameters(result.ident, **kwargs)
json_msg = jsonapi.dumps(dict(bus=bus, headers=headers, message=message))
frames = [zmq.Frame(b'publish'), zmq.Frame(str(topic)), zmq.Frame(str(json_msg))]
#<recipient, subsystem, args, msg_id, flags>
self.vip_socket.send_vip(b'', 'pubsub', frames, result.ident, copy=False)
return result
def _check_if_protected_topic(self, topic):
required_caps = self.protected_topics.get(topic)
if required_caps:
user = str(self.rpc().context.vip_message.user)
caps = self._owner.vip.auth.get_capabilities(user)
if not set(required_caps) <= set(caps):
msg = ('to publish to topic "{}" requires capabilities {},'
' but capability list {} was'
' provided').format(topic, required_caps, caps)
raise jsonrpc.exception_from_json(jsonrpc.UNAUTHORIZED, msg)
def _handle_subsystem(self, message):
"""Handler for incoming messages
param message: VIP message from PubSubService
type message: dict
"""
self._event_queue.put(message)
@spawn
def _process_incoming_message(self, message):
"""Process incoming messages
param message: VIP message from PubSubService
type message: dict
"""
op = message.args[0].bytes
if op == 'request_response':
result = None
try:
result = self._results.pop(bytes(message.id))
except KeyError:
pass
if self._parameters_needed:
self._send_via_rpc = False
self._parameters_needed = False
self._pubsubwithrpc.clear_parameters()
del self._pubsubwithrpc
response = message.args[1].bytes
#_log.debug("Message result: {}".format(response))
if result:
result.set(response)
elif op == 'publish':
try:
topic = topic = message.args[1].bytes
data = message.args[2].bytes
except IndexError:
return
try:
msg = jsonapi.loads(data)
headers = msg['headers']
message = msg['message']
sender = msg['sender']
bus = msg['bus']
self._process_callback(sender, bus, topic, headers, message)
except KeyError as exc:
_log.error("Missing keys in pubsub message: {}".format(exc))
else:
_log.error("Unknown operation ({})".format(op))
def _process_loop(self):
"""Incoming message processing loop"""
for msg in self._event_queue:
self._process_incoming_message(msg)
def _handle_error(self, sender, message, error, **kwargs):
"""Error handler. If UnknownSubsystem error is received, it implies that agent is connected to platform that has
OLD pubsub implementation. So messages are resent using RPC method.
param message: Error message
type message: dict
param error: indicates error type
type error: error class
param **kwargs: variable arguments
type **kwargs: dict
"""
if isinstance(error, UnknownSubsystem):
#Must be connected to OLD pubsub. Try sending using RPC
self._send_via_rpc = True
self._pubsubwithrpc.send(self._results, message)
else:
try:
result = self._results.pop(bytes(message.id))
except KeyError:
return
result.set_exception(error)
def _save_parameters(self, result_id, **kwargs):
"""Save the parameters for later use.
param result_id: asyn result id
type result_id: float
param **kwargs: parameters to be stored
type **kwargs: dict
"""
end_time = utils.get_aware_utc_now() + timedelta(seconds=60)
event = self.core().schedule(end_time, self._cancel_event, result_id)
if kwargs is not None:
kwargs['event'] = event
self._pubsubwithrpc.parameters[result_id] = kwargs
def _cancel_event(self, ident):
"""Cancel event
param ident: event id
param ident: float
"""
try:
parameters = self._pubsubwithrpc.parameters.pop(id)
event = parameters['event']
event.cancel()
except KeyError:
return
try:
result = self._results.parameters.pop(id)
result.set_exception(gevent.Timeout)
except KeyError:
return
class PubSubWithRPC(object):
"""For backward compatibility with old PubSub. The input parameters for each pubsub call is stored for short period
till we establish that the agent is connected to platform with old pubsub or not. Once this is established, the
parameters are no longer stored and this class is longer used."""
def __init__(self, core, rpc):
self.parameters = dict()
self._rpc = rpc
self._core = core
def send(self, results, message):
"""Check the message id to determine the type of call: subscribe or publish or list or unsubscribe.
Retrieve the corresponding input parameters and make the correct RPC call.
param results: Async results dictionary
type results: Weak dictionary
param message: Error message
type:
"""
id = bytes(message.id)
try:
parameters = self.parameters.pop(id)
except KeyError:
_log.error("Missing key {}".format(id))
return
try:
if parameters['op'] == 'synchronize':
self._core().spawn(self._synchronize, id, results, parameters)
elif parameters['op'] == 'subscribe':
self._core().spawn(self._subscribe, id, results, parameters)
elif parameters['op'] == 'publish':
self._core().spawn(self._publish, id, results, parameters)
elif parameters['op'] == 'list':
self._core().spawn(self._list, id, results, parameters)
elif parameters['op'] == 'unsubscribe':
self._core().spawn(self._unsubscribe, id, results, parameters)
else:
_log.error("Error: Unknown operation {}".format(parameters['op']))
except KeyError as exc:
_log.error("Error: Missing KEY in message {}".format(exc))
def _synchronize(self, results_id, results, parameters):
"""Unsubscribe call using RPC
param results_id: Asynchronous result ID required to the set response for the caller
type results_id: float (hash value)
param results: Async results dictionary
type results: Weak dictionary
param parameters: Input parameters for the unsubscribe call
"""
try:
subscriptions = parameters['subscriptions']
event = parameters['event']
event.cancel()
except KeyError:
return
self._rpc().notify('pubsub', 'pubsub.sync', subscriptions)
def _subscribe(self, results_id, results, parameters):
"""Subscribe call using RPC
param results_id: Asynchronous result ID required to the set response for the caller
type results_id: float (hash value)
param results: Async results dictionary
type results: Weak dictionary
param parameters: Input parameters for the subscribe call
"""
try:
result = results.pop(bytes(results_id))
except KeyError:
result = None
try:
prefix = parameters['prefix']
bus = parameters['bus']
event = parameters['event']
event.cancel()
except KeyError:
return
try:
response = self._rpc().call('pubsub', 'pubsub.subscribe', prefix, bus=bus).get(timeout=5)
if result is not None:
result.set(response)
except gevent.Timeout as exc:
if result is not None:
result.set_exception(exc)
def _list(self, results_id, results, parameters):
"""List call using RPC
param results_id: Asynchronous result ID required to the set response for the caller
type results_id: float (hash value)
param results: Async results dictionary
type results: Weak dictionary
param parameters: Input parameters for the list call
"""
try:
result = results.pop(bytes(results_id))
except KeyError:
result = None
try:
prefix = parameters['prefix']
subscribed = parameters['subscribed']
reverse = parameters['reverse']
bus = parameters['bus']
event = parameters['event']
event.cancel()
except KeyError:
return
try:
response = self._rpc().call('pubsub', 'pubsub.list', prefix,
bus, subscribed, reverse).get(timeout=5)
if result is not None:
result.set(response)
except gevent.Timeout as exc:
if result is not None:
result.set_exception(exc)
def _publish(self, results_id, results, parameters):
"""Publish call using RPC
param results_id: Asynchronous result ID required to the set response for the caller
type results_id: float (hash value)
param results: Async results dictionary
type results: Weak dictionary
param parameters: Input parameters for the publish call
"""
try:
result = results.pop(bytes(results_id))
except KeyError:
result = None
try:
topic = parameters['topic']
headers = parameters['headers']
message = parameters['message']
bus = parameters['bus']
event = parameters['event']
event.cancel()
except KeyError:
return
try:
response = self._rpc().call(
'pubsub', 'pubsub.publish', topic=topic, headers=headers,
message=message, bus=bus).get(timeout=5)
if result is not None:
result.set(response)
except gevent.Timeout as exc:
if result is not None:
result.set_exception(exc)
def _unsubscribe(self, results_id, results, parameters):
"""Unsubscribe call using RPC
param results_id: Asynchronous result ID required to the set response for the caller
type results_id: float (hash value)
param results: Async results dictionary
type results: Weak dictionary
param parameters: Input parameters for the unsubscribe call
"""
try:
result = results.pop(bytes(results_id))
except KeyError:
result = None
try:
topics = parameters['prefix']
bus = parameters['bus']
event = parameters['event']
event.cancel()
except KeyError:
return
try:
response = self._rpc().call('pubsub', 'pubsub.unsubscribe', topics, bus=bus).get(timeout=5)
if result is not None:
result.set(response)
except gevent.Timeout as exc:
if result is not None:
result.set_exception(exc)
def clear_parameters(self):
"""Clear all the saved parameters.
"""
try:
for ident, param in self.parameters.iteritems():
param['event'].cancel()
self.parameters.clear()
except KeyError:
return
class ProtectedPubSubTopics(object):
"""Simple class to contain protected pubsub topics"""
def __init__(self):
self._dict = {}
self._re_list = []
def add(self, topic, capabilities):
if isinstance(capabilities, basestring):
capabilities = [capabilities]
if len(topic) > 1 and topic[0] == topic[-1] == '/':
regex = re.compile('^' + topic[1:-1] + '$')
self._re_list.append((regex, capabilities))
else:
self._dict[topic] = capabilities
def get(self, topic):
if topic in self._dict:
return self._dict[topic]
for regex, capabilities in self._re_list:
if regex.match(topic):
return capabilities
return None
| [
"[email protected]"
] | |
5d1cb88fa75275344805c8e438afef54358aaf7b | 0b1b92e30893d4428b0e04342490da8aef121a65 | /Python/find_peak_element.py | 162886f43b6c6d0a6de053c773e2447a4969f82c | [] | no_license | miaojiang1987/LeetCode | 280dc892f8adbf8c18f30c180e76b045b3797f8c | d8f96b0ec1a85abeef1ce8c0cc409ed501ce088b | refs/heads/master | 2021-06-09T14:46:43.858955 | 2021-05-20T09:06:24 | 2021-05-20T09:06:24 | 155,755,065 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return None
l,r=0,len(nums)-1
while l<r:
mid=l+(r-l)//2
if nums[mid]>nums[mid+1]:
r=mid
else:
l=mid+1
return l | [
"[email protected]"
] | |
280ef9746c4ed0ee0513728f4b5d8d3ab3d2d34f | 1eb382ad4712721f646bf478fea747c928f47177 | /plot_perturbation_analysis.py | a542f264f5da6c4a9dff770b207998ac2e7f6258 | [
"MIT"
] | permissive | rmodi6/sentence-representations | ad8f03c6f4588020c1fcf6a691fc3b83ebae8e0f | 4124b8705002ce7188a0473c9840fef6befae0b5 | refs/heads/master | 2023-04-09T23:10:11.873093 | 2023-03-26T18:55:40 | 2023-03-26T18:55:40 | 215,374,321 | 0 | 1 | MIT | 2023-03-26T18:55:41 | 2019-10-15T18:56:57 | Python | UTF-8 | Python | false | false | 4,430 | py | import os
import json
import copy
# external libs imports
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# project imports
from data import load_vocabulary, index_instances, generate_batches
from util import load_pretrained_model
if __name__ == '__main__':
training_commands = []
choices = {"dan": range(1, 4+1), "gru": range(1, 4+1)}
models = {"dan": None, "gru": None}
vocabs = {"dan": None, "gru": None}
for seq2vec_name, _ in choices.items():
serialization_dir = os.path.join("serialization_dirs", f"main_{seq2vec_name}_5k_with_emb")
vocab_path = os.path.join(serialization_dir, "vocab.txt")
config_path = os.path.join(serialization_dir, "config.json")
weights_path = os.path.join(serialization_dir, "model.ckpt.index")
model_files_present = all([os.path.exists(path)
for path in [vocab_path, config_path, weights_path]])
if not model_files_present:
epochs = 8 if seq2vec_name == "dan" else 4 # gru is slow, use only 4 epochs
training_command = (f"python train.py main "
f"data/imdb_sentiment_train_5k.jsonl "
f"data/imdb_sentiment_dev.jsonl "
f"--seq2vec-choice {seq2vec_name} "
f"--embedding-dim 50 "
f"--num-layers 4 "
f"--num-epochs {epochs} "
f"--suffix-name _{seq2vec_name}_5k_with_emb "
f"--pretrained-embedding-file data/glove.6B.50d.txt ")
training_commands.append(training_command)
continue
model = load_pretrained_model(serialization_dir)
models[seq2vec_name] = model
vocab, _ = load_vocabulary(vocab_path)
vocabs[seq2vec_name] = vocab
if training_commands:
print("\nFirst, please finish the missing model training using the following commands:")
print("\n".join(training_commands))
exit()
original_instance = {"text_tokens": "the film performances were awesome".split()}
updates = ["worst", "okay", "cool"]
updated_instances = []
for update in updates:
updated_instance = copy.deepcopy(original_instance)
updated_instance["text_tokens"][4] = update
updated_instances.append(updated_instance)
all_instances = [original_instance]+updated_instances
layer_representations = {}
for seq2vec_name in choices.keys():
model = models[seq2vec_name]
vocab = vocabs[seq2vec_name]
all_indexed_instances = index_instances(copy.deepcopy(all_instances), vocab)
batches = generate_batches(all_indexed_instances, 4)
layer_representations[seq2vec_name] = model(**batches[0],
training=False)["layer_representations"]
for seq2vec_name, representations in layer_representations.items():
representations = np.asarray(representations)
differences_across_layers = {"worst": [], "okay": [], "cool": []}
for layer_num in choices[seq2vec_name]:
original_representation = representations[0, layer_num-1, :]
updated_representations = representations[1:, layer_num-1,:]
differences = [sum(np.abs(original_representation-updated_representation))
for updated_representation in updated_representations]
differences_across_layers["worst"].append(float(differences[0]))
differences_across_layers["okay"].append(float(differences[1]))
differences_across_layers["cool"].append(float(differences[2]))
# Make the plots
plt.style.use('seaborn-whitegrid')
plt.plot(choices[seq2vec_name], differences_across_layers["worst"], label="worst")
plt.plot(choices[seq2vec_name], differences_across_layers["okay"], label="okay")
plt.plot(choices[seq2vec_name], differences_across_layers["cool"], label="cool")
plt.xlabel("Layer")
plt.ylabel("Perturbation Response")
plt.legend()
title = f"{seq2vec_name}: Perturbation Response vs Layer"
plt.title(title)
plt.savefig(os.path.join("plots", f"perturbation_response_{seq2vec_name}.png"))
plt.clf()
| [
"[email protected]"
] | |
b26baf5d67b85a511a6b942d78af2cb10ca51f02 | 620323fc090cebaf7aca456ff3f7fbbe1e210394 | /weather__yahoo.py | 6c01d6686eaff44c12a67e3dd687dc0531713f44 | [
"CC-BY-4.0"
] | permissive | gil9red/SimplePyScripts | bd2733372728bf9b9f00570e90316fa12116516b | 773c2c9724edd8827a1dbd91694d780e03fcb05a | refs/heads/master | 2023-08-31T04:26:09.120173 | 2023-08-30T17:22:59 | 2023-08-30T17:22:59 | 22,650,442 | 157 | 46 | null | 2023-09-08T17:51:33 | 2014-08-05T16:19:52 | Python | UTF-8 | Python | false | false | 973 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import requests
city = "Магнитогорск"
# # OR:
# city = 'Magnitogorsk'
url = (
"https://query.yahooapis.com/v1/public/yql?q=select item from weather.forecast where woeid in "
"(select woeid from geo.places(1) where text='{city}') and u='c'"
"&format=json&diagnostics=true".format(city=city)
)
rs = requests.get(url)
item = rs.json()["query"]["results"]["channel"]["item"]
# Если нужна иконка для погоды:
# https://developer.yahoo.com/weather/documentation.html in Condition Codes
# code = condition['code']
#
# Weather image: http://l.yimg.com/a/i/us/we/52/' + code + '.gif
# Example: http://l.yimg.com/a/i/us/we/52/26.gif
#
condition = item["condition"]
print("Current: {temp} °C, {text}".format(**condition))
print()
print("Forecast:")
for forecast in item["forecast"]:
print("{date}: {low} - {high} °C. {day}: {text}".format(**forecast))
| [
"[email protected]"
] | |
75769b77e4341042b1aebeac586a2b46403aecd8 | 307939a14bd837b67ebf18b28a99aa2ad9755d18 | /app_user_login/migrations/0002_users_birthdate.py | dcc3e72027f14118d5113f6a0934585363a9068b | [] | no_license | raqueloropeza/Django_LoginAndRegistration | 3c4d37742c09f5f4f442154251cf81ddf159cba6 | 89b63ded8c60a5b21cd3de0f84c1aed662e29d3b | refs/heads/master | 2023-05-07T19:37:00.592985 | 2021-06-03T16:55:30 | 2021-06-03T16:55:30 | 373,557,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # Generated by Django 2.2.4 on 2021-02-25 15:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_user_login', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='users',
name='birthdate',
field=models.DateField(default=1976),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
611d4762679b0fa7bc528a0bf1515549c0bb6062 | 1b48dcc9b7a4dc7debff78e65d55617d04b74495 | /chapter10_multThreading/thread_condition.py | aa54f3e62df175f4a546fb383c91a2b5a0544aeb | [] | no_license | beenlyons/python_supreme | 15ad7baabfab57e55ea1521b173e507420a18b9e | 3279aa8db52c7c8a1295f24a39d228df7f77ce43 | refs/heads/master | 2020-03-21T17:30:37.150525 | 2018-07-24T03:54:46 | 2018-07-24T03:54:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,026 | py | from threading import Condition, Lock
import threading
# 条件变量, 用于复杂的线程间同步
# class XiaoAi(threading.Thread):
# def __init__(self, lock):
# self.lock = lock
# super().__init__(name="xiaoAi")
# def run(self):
# self.lock.acquire()
# print("{} : 在".format(self.name))
# self.lock.release()
#
# self.lock.acquire()
# print("{} : 好啊".format(self.name))
# self.lock.release()
#
# class TianMao(threading.Thread):
# def __init__(self, lock):
# self.lock = lock
# super().__init__(name="tianMao")
# def run(self):
# self.lock.acquire()
# print("{} : 小爱同学".format(self.name))
# self.lock.release()
#
# self.lock.acquire()
# print("{} : 我们来对古诗吧".format(self.name))
# self.lock.release()
class XiaoAi(threading.Thread):
def __init__(self, cond):
self.cond = cond
super().__init__(name="xiaoAi")
def run(self):
with self.cond:
self.cond.wait()
print("{} : 在".format(self.name))
self.cond.notify()
print("{} : 好啊".format(self.name))
class TianMao(threading.Thread):
def __init__(self, cond):
self.cond = cond
super().__init__(name="tianMao")
def run(self):
with self.cond:
print("{} : 小爱同学".format(self.name))
self.cond.notify()
self.cond.wait()
print("{} : 我们来对古诗吧".format(self.name))
self.cond.release()
if __name__ == '__main__':
cond = Condition()
xiaoai = XiaoAi(cond)
tianmao = TianMao(cond)
xiaoai.start()
tianmao.start()
# 启动顺序很重要, wait要先启动
# wait和notify一定要在with condition之后才能调用
# condition有两把锁, 底层锁会在wait的时候释放,上面的锁,会在每次调用wait时候分配一把
# 并放入到condition的等待队列中,等待notify方法的唤醒 | [
"[email protected]"
] | |
40c671b4b964f8a4f970e54ae2518d818bd8c8bd | 80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019 | /data/CodeChef/HAND01.py | 6bd4e91e18b0489271a1a5f90c80e17e34ed505e | [] | no_license | Ritvik19/CodeBook | ef7764d89b790e902ede5802f36d5ca910d8a50e | 2b4ed7938bbf156553d6ba5cba6216449528f0fc | refs/heads/master | 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | # Credits: Sir Isaac Newton is standing over a circle of radius 0.5641895835477563.
# Input
# The first line of the input contains an integer T denoting the number of test cases. The next T lines contain a string containing numbers and one or more special charters (?, !, %, $)
# Output
# For each test case, output a single line containing a single real number.
# Constraints
# Should contain all the constraints on the input data that you may have. Format it like:
# 1 ≤ T ≤ 100
# Example
# Input:
# 2
# 3!2?1
# 5%2$2
# Output:
# 5
# 6.0
for t in range (int(input())):
str1=input()
a=len(str1)
b=""
for i in range (a):
if(str1[i]=='!'):
b=b+'*'
elif(str1[i]=='?'):
b=b+'-'
elif(str1[i]=='%'):
b=b+'+'
elif(str1[i]=='$'):
b=b+'/'
else:
b=b+str1[i]
print(eval(b))
| [
"[email protected]"
] | |
2e4f86b82f95b90c80d1725615e2996fdb2cb164 | 9c3765dba0b249eb0a8da92076d2ae01291fc0e7 | /not_done/py_not_started/euler_392.py | 498d71d4902d7af330f192be8c5b3f979bc95492 | [] | no_license | saetar/pyEuler | 3a021f95a1856775bef87b38c753049b04282b80 | f0af7092e16c2109028b4b1aa5bed7a0057d3fe9 | refs/heads/master | 2020-03-21T12:05:15.430454 | 2018-06-15T03:50:50 | 2018-06-15T03:50:50 | 138,535,115 | 0 | 0 | null | 2018-06-25T02:40:43 | 2018-06-25T02:40:42 | null | UTF-8 | Python | false | false | 1,342 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ~ Jesse Rubin ~ project Euler ~
"""
Enmeshed unit circle
http://projecteuler.net/problem=392
A rectilinear grid is an orthogonal grid where the spacing between the gridlines does not have to be equidistant.
An example of such grid is logarithmic graph paper.
Consider rectilinear grids in the Cartesian coordinate system with the following properties:The gridlines are parallel to the axes of the Cartesian coordinate system.There are N+2 vertical and N+2 horizontal gridlines. Hence there are (N+1) x (N+1) rectangular cells.The equations of the two outer vertical gridlines are x = -1 and x = 1.The equations of the two outer horizontal gridlines are y = -1 and y = 1.The grid cells are colored red if they overlap with the unit circle, black otherwise.For this problem we would like you to find the positions of the remaining N inner horizontal and N inner vertical gridlines so that the area occupied by the red cells is minimized.
E.g. here is a picture of the solution for N = 10:
The area occupied by the red cells for N = 10 rounded to 10 digits behind the decimal point is 3.3469640797.
Find the positions for N = 400.
Give as your answer the area occupied by the red cells rounded to 10 digits behind the decimal point.
"""
def p392():
pass
if __name__ == '__main__':
p392() | [
"[email protected]"
] | |
8e559598071a54f18e8b5b49476b09087984d51e | dfe4d9f6b09109b22e76069b0f6e56993165e91d | /camp-python-2021-find-me-develop/apps/users/apps.py | 00812dc9a1174718ab09f30466d4af9874f10952 | [] | no_license | rhanmar/oi_projects_summer_2021 | ef6b06e003a1c54583df4b006d141df924adeafb | 0879ade24685b628624dce06698f8a0afd042000 | refs/heads/main | 2023-08-25T04:02:46.284756 | 2021-09-17T03:29:16 | 2021-09-17T03:29:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
from watson import search as watson
class UsersAppConfig(AppConfig):
"""Default configuration for Users app."""
name = "apps.users"
verbose_name = _("Users")
def ready(self):
# pylint: disable=unused-import,invalid-name
from .api.auth import scheme # noqa
from .signals import check_avatar, copy_default_avatar_to_media # noqa
User = self.get_model("User")
watson.register(
User,
fields=("first_name", "last_name", "email"),
)
| [
"[email protected]"
] | |
85a51ad7dc8750ab6d9a66bec400b2433c202821 | 368be25e37bafa8cc795f7c9f34e4585e017091f | /.history/app_fav_books/views_20201114172332.py | ae26a428bcd7b78a9862ed83d5fbd6fd90c986e0 | [] | no_license | steven-halla/fav_books_proj | ebcfbfda0e7f3cdc49d592c86c633b1d331da513 | 512005deb84ac906c9f24d4ab0939bd0db096716 | refs/heads/master | 2023-03-30T09:37:38.016063 | 2021-04-02T20:27:22 | 2021-04-02T20:27:22 | 354,125,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
def index(request):
return render(request, "index.html")
def register_New_User(request):
errors = User.objects.basic_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect("/")
else:
first_name_from_post = request.POST['first_name']
last_name_from_post = request.POST['last_name']
email_from_post = request.POST['email']
password_from_post = request.POST['password']
new_user = User.objects.create(
first_name=first_name_from_post,
last_name=last_name_from_post,
email=email_from_post,
password=password_from_post
)
print(new_user.id)
request.session['user_id'] = new_user.id
return redirect('/register/success')
def books_info(request):
user = User.objects.get(id=request.session['user_id'])
context = {
"user":user
}
return render(request, "books_user_page.html", context)
def log_user_out(request):
request.session.clear()
return redirect("/")
def log_in(request):
email_from_post = request.POST['email']
password_from_post = request.POST['password']
user = User.objects.filter(email=email_from_post)
if len(user) > 0:
logged_user = user[0]
print(logged_user.first_name)
print(logged_user.password)
request.session['user_id'] = logged_user.id
return redirect("/register/success")
else:
messages.error(request, "email/password does not exist")
return redirect("register/success")
def add_new_book(request):
errors = Books.objects.basic_validator(request.POST)
| [
"[email protected]"
] | |
63f1c26ff5ab2a139d219ec741d15abd32daa59e | 58a14a13ccc948fe21082bb5df3a65b0ff6f5e81 | /dynamic_forms/models.py | bb337ff7fbd7edb6ed51e0af0e03ea9fcf8798ec | [
"BSD-3-Clause"
] | permissive | cheluis/django-dynamic-forms | 38dce222f17d18a45794cca904a6c18b7a721595 | c6555714d0acc1e732c245d1983b217be6590433 | refs/heads/master | 2020-12-27T12:15:37.247627 | 2015-04-09T13:12:50 | 2015-04-09T13:12:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,879 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
try: # pragma: no cover
from collections import OrderedDict
except ImportError: # pragma: no cover
from django.utils.datastructures import SortedDict as OrderedDict
from django.db import models
try: # pragma: no cover
from django.db.transaction import atomic
except ImportError: # pragma: no cover
from django.db.transaction import commit_on_success as atomic
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from django.utils.crypto import get_random_string
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, mark_safe
from django.utils.translation import ugettext_lazy as _
from dynamic_forms.actions import action_registry
from dynamic_forms.conf import settings
from dynamic_forms.fields import TextMultiSelectField
from dynamic_forms.formfields import formfield_registry
@python_2_unicode_compatible
class FormModel(models.Model):
name = models.CharField(_('Name'), max_length=50, unique=True)
submit_url = models.CharField(_('Submit URL'), max_length=100, unique=True,
help_text=_('The full URL path to the form. It should start '
'and end with a forward slash (<code>/</code>).'))
success_url = models.CharField(_('Success URL'), max_length=100,
help_text=_('The full URL path where the user will be '
'redirected after successfully sending the form. It should start '
'and end with a forward slash (<code>/</code>). If empty, the '
'success URL is generated by appending <code>done/</code> to the '
'“Submit URL”.'), blank=True, default='')
actions = TextMultiSelectField(_('Actions'), default='',
choices=action_registry.get_as_choices())
form_template = models.CharField(_('Form template path'), max_length=100,
default='dynamic_forms/form.html',
choices=settings.DYNAMIC_FORMS_FORM_TEMPLATES)
success_template = models.CharField(_('Success template path'),
max_length=100, default='dynamic_forms/form_success.html',
choices=settings.DYNAMIC_FORMS_SUCCESS_TEMPLATES)
allow_display = models.BooleanField(_('Allow display'), default=False,
help_text=_('Allow a user to view the input at a later time. This '
'requires the “Store in database” action to be active. The sender '
'will be given a unique URL to recall the data.'))
recipient_email = models.EmailField(_('Recipient email'), blank=True,
null=True, help_text=_('Email address to send form data.'))
class Meta:
ordering = ['name']
verbose_name = _('Dynamic form')
verbose_name_plural = _('Dynamic forms')
def __str__(self):
return self.name
def get_fields_as_dict(self):
"""
Returns an ``OrderedDict`` (``SortedDict`` when ``OrderedDict is not
available) with all fields associated with this form where their name
is the key and their label is the value.
"""
return OrderedDict(self.fields.values_list('name', 'label').all())
def save(self, *args, **kwargs):
"""
Makes sure that the ``submit_url`` and -- if defined the
``success_url`` -- end with a forward slash (``'/'``).
"""
if not self.submit_url.endswith('/'):
self.submit_url = self.submit_url + '/'
if self.success_url:
if not self.success_url.endswith('/'):
self.success_url = self.success_url + '/'
else:
self.success_url = self.submit_url + 'done/'
super(FormModel, self).save(*args, **kwargs)
@python_2_unicode_compatible
class FormFieldModel(models.Model):
parent_form = models.ForeignKey(FormModel, on_delete=models.CASCADE,
related_name='fields')
field_type = models.CharField(_('Type'), max_length=255,
choices=formfield_registry.get_as_choices())
label = models.CharField(_('Label'), max_length=255)
name = models.SlugField(_('Name'), max_length=50, blank=True)
_options = models.TextField(_('Options'), blank=True, null=True)
position = models.SmallIntegerField(_('Position'), blank=True, default=0)
class Meta:
ordering = ['parent_form', 'position']
unique_together = ("parent_form", "name",)
verbose_name = _('Form field')
verbose_name_plural = _('Form fields')
def __str__(self):
return _('Field “%(field_name)s” in form “%(form_name)s”') % {
'field_name': self.label,
'form_name': self.parent_form.name,
}
def generate_form_field(self, form):
field_type_cls = formfield_registry.get(self.field_type)
field = field_type_cls(**self.get_form_field_kwargs())
field.contribute_to_form(form)
return field
def get_form_field_kwargs(self):
kwargs = self.options
kwargs.update({
'name': self.name,
'label': self.label,
})
return kwargs
@property
def options(self):
"""Options passed to the form field during construction."""
if not hasattr(self, '_options_cached'):
self._options_cached = {}
if self._options:
try:
self._options_cached = json.loads(self._options)
except ValueError:
pass
return self._options_cached
@options.setter
def options(self, opts):
if hasattr(self, '_options_cached'):
del self._options_cached
self._options = json.dumps(opts)
def save(self, *args, **kwargs):
if not self.name:
self.name = slugify(self.label)
given_options = self.options
field_type_cls = formfield_registry.get(self.field_type)
invalid = set(self.options.keys()) - set(field_type_cls._meta.keys())
if invalid:
for key in invalid:
del given_options[key]
self.options = given_options
super(FormFieldModel, self).save(*args, **kwargs)
@python_2_unicode_compatible
class FormModelData(models.Model):
form = models.ForeignKey(FormModel, on_delete=models.SET_NULL,
related_name='data', null=True)
value = models.TextField(_('Form data'), blank=True, default='')
submitted = models.DateTimeField(_('Submitted on'), auto_now_add=True)
display_key = models.CharField(_('Display key'), max_length=24, null=True,
blank=True, db_index=True, default=None, unique=True,
help_text=_('A unique identifier that is used to allow users to view '
'their sent data. Unique over all stored data sets.'))
class Meta:
verbose_name = _('Form data')
verbose_name_plural = _('Form data')
def __str__(self):
return _('Form: “%(form)s” on %(date)s') % {
'form': self.form,
'date': self.submitted,
}
def save(self, *args, **kwargs):
with atomic():
if self.form.allow_display and not self.display_key:
dk = get_random_string(24)
while FormModelData.objects.filter(display_key=dk).exists():
dk = get_random_string(24)
self.display_key = dk
super(FormModelData, self).save(*args, **kwargs)
@property
def json_value(self):
return OrderedDict(sorted(json.loads(self.value).items()))
def pretty_value(self):
try:
output = ['<dl>']
for k, v in self.json_value.items():
output.append('<dt>%(key)s</dt><dd>%(value)s</dd>' % {
'key': escape(force_text(k)),
'value': escape(force_text(v)),
})
output.append('</dl>')
return mark_safe(''.join(output))
except ValueError:
return self.value
pretty_value.allow_tags = True
@property
def show_url(self):
"""
If the form this data set belongs to has
:attr:`~FormModel.allow_display` ``== True``, return the permanent URL.
If displaying is not allowed, return an empty string.
"""
if self.form.allow_display:
return reverse('dynamic_forms:data-set-detail',
kwargs={'display_key': self.display_key})
return ''
@property
def show_url_link(self):
"""
Similar to :attr:`show_url` but wraps the display key in an `<a>`-tag
linking to the permanent URL.
"""
if self.form.allow_display:
# TODO: Django >1.4
# Use format_html
return mark_safe('<a href="{0}">{1}</a>'.format(self.show_url,
self.display_key))
return ''
| [
"[email protected]"
] | |
47e588e7c429cf9a9c1d10db3d2ef25f983ed712 | 2850d9adba96bc4e73185de5d6adebf363a5c534 | /tce/tcloud/cvm/AssociateSecurityGroups.py | 11d8e5ca52a09b8e0e312396bb00ee9dfb7bb036 | [
"Apache-2.0"
] | permissive | FatAnker/tencentcloud-sdk-python | d8f757b12ad336e78a06b68a789ecc3c86d1d331 | d6f75a41dc7053cb51f9091f4d41b8cb7a837559 | refs/heads/master | 2020-04-30T22:34:16.740484 | 2019-04-28T11:14:11 | 2019-04-28T11:14:11 | 177,122,691 | 0 | 1 | null | 2019-03-22T10:46:01 | 2019-03-22T10:46:01 | null | UTF-8 | Python | false | false | 1,971 | py | # -*- coding: utf-8 -*-
import os
from tencentcloud.common import credential
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
# 导入对应产品模块的client models。
from tencentcloud.cvm.v20170312 import cvm_client, models
import json
# 导入可选配置类
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
import ssl
from tce.tcloud.utils.config import global_config
ssl._create_default_https_context = ssl._create_unverified_context
region = global_config.get('regions')
params = global_config.get(region)
secretId = params['secretId']
secretKey = params['secretKey']
domain = params['domain']
try:
# 实例化一个认证对象,入参需要传入腾讯云账户secretId,secretKey
cred = credential.Credential(secretId, secretKey)
httpProfile = HttpProfile()
httpProfile.endpoint = "cvm."+domain
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
# 实例化要请求产品(以cvm为例)的client对象,clientProfile是可选的。
client = cvm_client.CvmClient(cred, region, clientProfile)
# 实例化一个cvm实例信息查询请求对象,每个接口都会对应一个request对象。
req = models.AssociateSecurityGroupsRequest()
# 这里还支持以标准json格式的string来赋值请求参数的方式。下面的代码跟上面的参数赋值是等效的。
params = '{"InstanceIds":["ins-i4ekkudx","ins-gwggvy39"],"SecurityGroupIds":["sg-poxp7nok"]}'
req.from_json_string(params)
resp = client.AssociateSecurityGroups(req)
# 输出json格式的字符串回包
print(resp.to_json_string())
# 也可以取出单个值。
# 你可以通过官网接口文档或跳转到response对象的定义处查看返回字段的定义。
# print(resp.TotalCount)
except TencentCloudSDKException as err:
print(err) | [
"[email protected]"
] | |
5fa589d9a038fcbb13e019a6129a02a94b582d64 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2751486_0/Python/axr123/a.py | 49363e53c0937f442e2efd0decfa7c6a90f1f400 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | import sys
vowels = 'aeiou'
def concons(str):
maxN = 0
n = 0
for l in str:
if l in vowels:
if n > maxN: maxN = n
n = 0
else:
n += 1
if n > maxN: maxN = n
return maxN
def solve(name, n):
count = 0
for s in range(len(name)):
for e in range(s+1, len(name)+1):
if concons(name[s:e]) >= n: count += 1
return count
numcases = int(sys.stdin.readline())
for c in range(numcases):
name, n = sys.stdin.readline().split()
print("Case #%d: %d" % (c+1, solve(name, int(n))))
| [
"[email protected]"
] | |
31be72f47f8097ba9df421e724f210ac08707884 | 602a4e86499841fbae43d84fc92908c533106aea | /core/forms.py | 92edcd63936e84acef0f101ba7523413660abe6f | [] | no_license | vden/TsoguNG | b187ccf1bef387417ec73467c51458d6f1443239 | f8d5e7ab9d85559aa163c232c9f28a24a2b7c2a4 | refs/heads/master | 2021-01-02T08:52:03.914218 | 2011-04-26T07:01:57 | 2011-04-26T07:01:57 | 1,663,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,173 | py | # -*- coding: utf-8 -*-
from django import forms
from core.fields import CalendarDateField
from django.contrib.admin import widgets as admin_widgets
from core import models
from datetime import datetime
def extra_form_factory(type):
class BaseExtraForm(forms.ModelForm):
time_choices = [('None',u'не задано')] + [('%s'%x,'%2d:00'%x) for x in xrange(0,25)]
slug = forms.SlugField(label=u'Адрес', required=False)
date_published = CalendarDateField(label=u'Дата публикации', required=False)
time = forms.ChoiceField(label = u'Время публикации', required=False, choices=time_choices)
view_template = forms.ModelChoiceField(label=u"Шаблон",queryset=type.available_templates(),empty_label="(стандартный шаблон)",required=False)
def __init__(self, *args, **kw):
try:
kw['initial'] = {'time':str(kw['instance'].date_published.hour)}
except:
kw['initial'] = {'time':'None'}
super(BaseExtraForm, self).__init__(*args, **kw)
def save(self):
s = super(BaseExtraForm, self).save()
if self['time'].data != 'None':
d = s.date_published
s.date_published = datetime(d.year, d.month, d.day, int(self['time'].data))
s.save()
class Meta:
model = type
fields = ('slug','view_template','not_browse','block','date_published')
return BaseExtraForm
class BaseConfigletForm(forms.ModelForm):
bid = models.Configlet._meta.get_field("bid").formfield(widget=forms.HiddenInput())
value = forms.CharField(label = u'Значение')
remove = forms.CharField(label = u'Удалить')
class Meta:
model = models.Configlet
fields = ('predicate','value','bid')
def is_valid(self):
if self['predicate'].data:
return True
return False
def save(self):
conf = models.Configlet.objects.filter(bid = self['bid'].data,
predicate = self['predicate'].data)
if conf:
conf = conf[0]
if str(self['remove'].data) == 'True':
conf.delete()
return True
else:
conf = models.Configlet()
conf.predicate = self['predicate'].data
conf.value = self['value'].data
conf.bid = models.BaseObject.objects.get(id=self['bid'].data)
conf.save()
| [
"[email protected]"
] | |
3571215df13d920412f8c5912d86fde82b88a82e | fe4b49f22cd851ee4f7639bef720d774fbfb1248 | /src/comments/models.py | cc5f4cbe71dfee3814480f8a9c14549170dde5f7 | [] | no_license | tyagow/servicos-paraguai | b2c20a48651e7e046f4e86b0e94f026589dbc545 | a71078174a175f86f2a4f49fcaf26b8f91ea778d | refs/heads/master | 2020-12-24T05:39:57.243158 | 2017-05-23T14:14:28 | 2017-05-23T14:14:28 | 73,492,227 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
class CommentManager(models.Manager):
def filter_by_instance(self, instance):
content_type = ContentType.objects.get_for_model(instance.__class__)
obj_id = instance.id
qs = super(CommentManager, self).filter(content_type=content_type, object_id=obj_id).filter(aprovado=True)
return qs
class Comment(models.Model):
nome = models.CharField(max_length=60)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
conteudo = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
aprovado = models.BooleanField(default=False)
objects = CommentManager()
def __str__(self):
return self.nome | [
"[email protected]"
] | |
c12d8a3717648e007e9abc3dc9d63a711c4d0582 | b864232c0133738e329e61ca74188c8eafe74108 | /misc/experiment/distortion/main.py | 80b7d04914341bb72e17df1ec3410fc3387e5877 | [] | no_license | QilinGu/tf-face-recognizer | 0b16af8225d4e3bd67b0bd2df3005b5f1a3a7f35 | d1092b72d01f08a7bbfb2f30072a60b8d8409804 | refs/heads/master | 2021-01-09T06:40:30.678375 | 2017-02-03T17:57:21 | 2017-02-03T17:57:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | import os
import tensorflow as tf
def main(argv=None):
with open(os.path.join(os.path.dirname(__file__), 'face.png'), 'rb') as f:
png = f.read()
image = tf.image.decode_png(png, channels=3)
begin, size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(image),
[[[8.0/112.0, 8.0/112.0, (112.0-8.0)/112.0, (112.0-8.0)/112.0]]],
min_object_covered=0.9)
image = tf.slice(image, begin, size)
resize = tf.random_uniform([2], minval=48, maxval=144, dtype=tf.int32)
image = tf.image.resize_images(image, resize, method=2)
image = tf.image.resize_images(image, [96, 96], method=2)
image = tf.image.random_brightness(image, max_delta=0.4)
image = tf.image.random_contrast(image, lower=0.6, upper=1.4)
image = tf.image.random_hue(image, max_delta=0.04)
image = tf.image.random_saturation(image, lower=0.6, upper=1.4)
images = tf.train.batch([tf.image.per_image_standardization(image)], 20)
summary = tf.summary.image('images', images, max_outputs=20)
writer = tf.summary.FileWriter(os.path.dirname(__file__))
with tf.Session() as sess:
tf.train.start_queue_runners(sess=sess)
summary_value, begin_value, size_value, resize_value = sess.run([summary, begin, size, resize])
print(begin_value, size_value, resize_value)
writer.add_summary(summary_value)
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
] | |
8cfa28a1b18e8ac19215d87eebf5216525b2160e | 25fa5fdc9f67738332bd6f95a1e4f038cd286890 | /BOJ/단계별로 풀어보기/ch07_문자열/1152_단어의 개수.py | 0e051399bc66d888bae95cbbf9345f6aa8eeb91a | [] | no_license | mandos1995/online_judge | b0cfd56e3391495f22b9832895cddcea70334349 | 9b90bffdcbfb5369e8dd5dafbb07f8e9e7050617 | refs/heads/main | 2023-08-02T19:29:03.716295 | 2021-10-04T15:10:34 | 2021-10-04T15:10:34 | 329,517,747 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | '''
문제
영어 대소문자와 띄어쓰기만으로 이루어진 문자열이 주어진다. 이 문자열에는 몇 개의 단어가 있을까?
이를 구하는 프로그램을 작성하시오. 단, 한 단어가 여러 번 등장하면 등장한 횟수만큼 모두 세어야 한다.
'''
# solution
string = list(map(str,input().strip().split()))
print(len(string)) | [
"[email protected]"
] | |
9040402abc555921137b96e21d59ea69a4a65b3f | ebbc32882b1bbb6e6935695a400e8ffd8c0fb7f6 | /ROAR/planning_module/mission_planner/waypoint_following_mission_planner.py | 81d8b4b89955d40a27fae6e422993f7a437d1132 | [
"Apache-2.0"
] | permissive | augcog/ROAR | 25fc3555934854871883de84bae49d84f86fbd7b | b7cd322f451ceccf5c53b331e15f5025f2e63bac | refs/heads/main | 2023-08-31T16:51:00.857463 | 2023-08-12T00:08:06 | 2023-08-12T00:08:06 | 302,450,761 | 29 | 222 | Apache-2.0 | 2023-08-26T07:04:26 | 2020-10-08T20:10:20 | Jupyter Notebook | UTF-8 | Python | false | false | 4,333 | py | from ROAR.planning_module.mission_planner.mission_planner import (
MissionPlanner,
)
from pathlib import Path
import logging
from typing import List, Optional
from ROAR.utilities_module.data_structures_models import Transform, Location, Rotation
from collections import deque
from ROAR.agent_module.agent import Agent
import numpy as np
class WaypointFollowingMissionPlanner(MissionPlanner):
"""
A mission planner that takes in a file that contains x,y,z coordinates, formulate into carla.Transform
"""
def run_in_series(self) -> deque:
"""
Regenerate waypoints from file
Find the waypoint that is closest to the current vehicle location.
return a mission plan starting from that waypoint
Args:
vehicle: current state of the vehicle
Returns:
mission plan that start from the current vehicle location
"""
super(WaypointFollowingMissionPlanner, self).run_in_series()
return self.produce_mission_plan()
def __init__(self, agent: Agent):
super().__init__(agent=agent)
self.logger = logging.getLogger(__name__)
self.file_path: Path = Path(self.agent.agent_settings.waypoint_file_path)
self.mission_plan = self.produce_mission_plan()
self._mission_plan_backup = self.mission_plan.copy()
self.logger.debug("Path Following Mission Planner Initiated.")
def produce_mission_plan(self) -> deque:
"""
Generates a list of waypoints based on the input file path
:return a list of waypoint
"""
raw_path: List[List[float]] = self._read_data_file()
length = self.agent.agent_settings.num_laps * len(raw_path)
mission_plan = deque(maxlen=length)
for coord in np.tile(raw_path, (self.agent.agent_settings.num_laps, 1)):
if len(coord) == 3 or len(coord) == 6:
mission_plan.append(self._raw_coord_to_transform(coord))
self.logger.debug(f"Computed Mission path of length [{len(mission_plan)}]")
return mission_plan
def produce_single_lap_mission_plan(self):
raw_path: List[List[float]] = self._read_data_file()
mission_plan = deque(maxlen=len(raw_path))
for coord in raw_path:
if len(coord) == 3 or len(coord) == 6:
mission_plan.append(self._raw_coord_to_transform(coord))
self.logger.debug(f"Computed Mission path of length [{len(mission_plan)}]")
return mission_plan
def _read_data_file(self) -> List[List[float]]:
"""
Read data file and generate a list of (x, y, z) where each of x, y, z is of type float
Returns:
List of waypoints in format of [x, y, z]
"""
result = []
with open(self.file_path.as_posix(), "r") as f:
for line in f:
result.append(self._read_line(line=line))
return result
def _raw_coord_to_transform(self, raw: List[float]) -> Optional[Transform]:
"""
transform coordinate to Transform instance
Args:
raw: coordinate in form of [x, y, z, pitch, yaw, roll]
Returns:
Transform instance
"""
if len(raw) == 3:
return Transform(
location=Location(x=raw[0], y=raw[1], z=raw[2]),
rotation=Rotation(pitch=0, yaw=0, roll=0),
)
elif len(raw) == 6:
return Transform(
location=Location(x=raw[0], y=raw[1], z=raw[2]),
rotation=Rotation(roll=raw[3], pitch=raw[4], yaw=raw[5]),
)
else:
self.logger.error(f"Point {raw} is invalid, skipping")
return None
def _read_line(self, line: str) -> List[float]:
"""
parse a line of string of "x,y,z" into [x,y,z]
Args:
line: comma delimetered line
Returns:
[x, y, z]
"""
try:
x, y, z = line.split(",")
x, y, z = float(x), float(y), float(z)
return [x, y, z]
except:
x, y, z, roll, pitch, yaw = line.split(",")
return [float(x), float(y), float(z), float(roll), float(pitch), float(yaw)]
def restart(self):
self.mission_plan = self._mission_plan_backup.copy() | [
"[email protected]"
] | |
479a24ebea460d630afb100cbe446f2b98ea29c5 | 49819aef1336ddb8511e6dec53311777c0b41a7e | /apps/authentication/middleware.py | b2dd6834f09fcecbc03130d84405085fa4a502c7 | [] | no_license | devmaster54/pslam | d91f80340a89820ee596068f13fe6a628a93aab6 | e81316677a1db24bbedce70bf59f85d30583742d | refs/heads/master | 2022-12-24T15:20:39.219597 | 2020-10-02T15:46:20 | 2020-10-02T15:46:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | import re
from psalm import settings
from django.contrib.auth.decorators import login_required
class AuthVerificationMiddleware(object):
def process_exception(self, request, exception):
return None | [
"[email protected]"
] | |
b0a4d0f17558cbe4b82cc607cb5c64a7b2238ed4 | ae381913c23385f004b82161624097645ba8c4c8 | /Xianyang_modwt/projects/ensemble_models.py | 505bd0dc6a0bcac8eb2a2a3565d5e738a7deca6b | [
"MIT"
] | permissive | zjy8006/MonthlyRunoffForecastByAutoReg | aa37910fdc66276d0df9d30af6885209d4a4ebfc | 661fcb5dcdfbbb2ec6861e1668a035b50e69f7c2 | refs/heads/master | 2020-12-12T05:25:48.768993 | 2020-08-20T07:21:12 | 2020-08-20T07:21:12 | 259,588,564 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | import matplotlib.pyplot as plt
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
sys.path.append(root_path)
from tools.ensembler import ensemble
from Xianyang_modwt.projects.variables import variables
# Set the project parameters
ORIGINAL = 'XianyangRunoff1951-2018(1953-2018).xlsx'
STATION = 'Xianyang'
DECOMPOSER = 'modwt'
PREDICTOR = 'esvr' # esvr or gbrt or lstm
wavelet_level='db1-4'
# ensemble(
# root_path=root_path,
# original_series=ORIGINAL,
# station=STATION,
# decomposer=DECOMPOSER,
# variables = variables,
# predictor=PREDICTOR,
# predict_pattern='single_hybrid_1_ahead',
# wavelet_level=wavelet_level,
# )
for lead_time in [1,3,5,7]:
ensemble(
root_path=root_path,
original_series=ORIGINAL,
station=STATION,
decomposer=DECOMPOSER,
variables = variables,
predictor=PREDICTOR,
predict_pattern='single_hybrid_'+str(lead_time)+'_ahead_lag12_mi_ts0.1',
wavelet_level=wavelet_level,
)
| [
"[email protected]"
] | |
f024ccaadc2065d52a7c473fe13a3620d3f75af1 | 6d913683be43f459b6e29dd84f09c05234efeb4d | /single_cell_differentiation_cuomo_data/merge_parallelized_covariate_modulated_eqtls.py | 3cea2a67cb603b0c7c42cf93e49bc86f36b4b564 | [] | no_license | BennyStrobes/eqtl_factorization | 4f94d8e2e00cf1830fd008f3264d1f9c57f6b2a0 | e555485e40e44c51e86f67761e5200b370673910 | refs/heads/master | 2021-06-20T15:54:34.906395 | 2021-04-05T14:11:46 | 2021-04-05T14:11:46 | 198,705,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,186 | py | import numpy as np
import os
import sys
import pdb
def bf_fdr_multiple_testing_correction(variant_gene_pairs_eqtl_results_file, multple_testing_correction_results_file, fdr_thresh):
f = open(variant_gene_pairs_eqtl_results_file)
t = open(multple_testing_correction_results_file, 'w')
head_count = 0
genes = {}
for line in f:
line = line.rstrip()
data = line.split()
if head_count == 0:
head_count = head_count + 1
t.write(line + '\tnum_snps_in_gene\tfdr\n')
continue
gene_id = data[0]
variant_id = data[1]
pvalue = float(data[7])
if gene_id not in genes:
genes[gene_id] = (variant_id, pvalue, 1, line)
else:
old_pvalue = genes[gene_id][1]
old_count = genes[gene_id][2]
if pvalue <= old_pvalue:
genes[gene_id] = (variant_id, pvalue, old_count+1, line)
else:
genes[gene_id] = (genes[gene_id][0], genes[gene_id][1], old_count+1, genes[gene_id][3])
f.close()
# Loop through genes and do BF correction
bf_gene_array = []
for gene in genes.keys():
lead_variant = genes[gene][0]
lead_nominal_pvalue = genes[gene][1]
num_variants_at_gene = genes[gene][2]
test_line = genes[gene][3]
bf_corrected_pvalue = lead_nominal_pvalue*num_variants_at_gene
if bf_corrected_pvalue > 1.0:
bf_corrected_pvalue = 1.0
bf_gene_array.append((bf_corrected_pvalue, lead_variant, gene, num_variants_at_gene, test_line))
sorted_bf_gene_array = sorted(bf_gene_array, key=lambda tup: tup[0])
# BH correction
kk = 1
num_genes = len(sorted_bf_gene_array)
sig = True
for gene_tuple in sorted_bf_gene_array:
bf_pvalue = gene_tuple[0]
fdr = num_genes*bf_pvalue/kk
kk = kk + 1
if fdr > fdr_thresh:
sig = False
if sig == True:
t.write(gene_tuple[4] + '\t' + str(gene_tuple[3]) + '\t' + str(fdr) + '\n')
t.close()
def make_sure_files_exist(output_root, total_jobs, suffix):
booly = True
for job_number in range(total_jobs):
file_name = output_root + str(job_number) + '_' + str(total_jobs) + suffix
if os.path.isfile(file_name) == False:
print(file_name)
booly = False
return booly
def merge_parallelized_results(output_root, suffix, total_jobs):
to_run = make_sure_files_exist(output_root, total_jobs, suffix)
if to_run == False:
print('Missing required input files. Please re-evaluate :)')
return
# Open output (merged result) file handle
t = open(output_root + 'merged' + suffix, 'w')
t.write('Gene_id\tvariant_id\tchrom_num\tgene_tss\tvariant_position\tmaf\tcell_maf\tcovariate_modulated_eqtl_pvalue\teqtl_pvalue\n')
# Loop through parrallelized jobs
for job_number in range(total_jobs):
file_name = output_root + str(job_number) + '_' + str(total_jobs) + suffix
# Open file for one job
f = open(file_name)
# To identify header
head_count = 0
# Stream file from one job
for line in f:
line = line.rstrip()
# Standard line
t.write(line + '\n')
f.close()
# Delete file from single job
os.system ('rm ' + file_name)
t.close()
output_root = sys.argv[1]
total_jobs = int(sys.argv[2])
merge_parallelized_results(output_root, ".txt", total_jobs)
merged_file = output_root + 'merged.txt'
####################
# Multiple-testing correction
####################
# Output file
fdr_thresh=.01
multple_testing_correction_results_file = output_root + 'multiple_testing_bf_bh_' + str(fdr_thresh) + '_fdr_' + '.txt'
# Perform bonferonni correction at variant level (per gene) and then BH at gene level
bf_fdr_multiple_testing_correction(merged_file, multple_testing_correction_results_file, fdr_thresh)
# Output file
fdr_thresh=.05
multple_testing_correction_results_file = output_root + 'multiple_testing_bf_bh_' + str(fdr_thresh) + '_fdr_' + '.txt'
# Perform bonferonni correction at variant level (per gene) and then BH at gene level
bf_fdr_multiple_testing_correction(merged_file, multple_testing_correction_results_file, fdr_thresh)
# Output file
fdr_thresh=.1
multple_testing_correction_results_file = output_root + 'multiple_testing_bf_bh_' + str(fdr_thresh) + '_fdr_' + '.txt'
# Perform bonferonni correction at variant level (per gene) and then BH at gene level
bf_fdr_multiple_testing_correction(merged_file, multple_testing_correction_results_file, fdr_thresh)
| [
"[email protected]"
] | |
3d522fc5eb54ef2e47e6a569e0ad572dc5690bf3 | 6c9d41994937733dc84e54359f5789ac945724a2 | /echo.py | 863cb8941b7dd620f6dacb81c28f14c916781f13 | [
"MIT"
] | permissive | cider-load-test/pygadu | 2268f38b4612cb32236c687ef355c6dc1d4cae33 | d4c1b25908ae6facd89d3509ea8bc2ec6b4eb11c | refs/heads/master | 2021-12-02T06:28:02.877479 | 2008-11-30T13:01:41 | 2008-11-30T13:01:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | import time
from pygadu.session import PyGadu
from pygadu.util import query_hub
class Echo(PyGadu):
def __init__(self):
super(Echo, self).__init__()
def onWelcome(self, packet):
print repr(packet)
print "Welcome!"
def onLoginOk(self, packet):
print repr(packet)
print "Login Ok!"
self.sendFriendList([])
def onConnect(self):
print "Connect"
def onClose(self):
print "Close"
def onSendMessage(self, packet):
print repr(packet)
print "on send message?"
def onLoginError(self, packet):
print repr(packet)
print "Error!"
def onRecvMessage(self, packet):
print repr(packet)
print packet.sender, packet.text
self.sendMessage(packet.sender, packet.text)
def onUnknown(self, packet):
print repr(packet)
if __name__ == '__main__':
try:
host = query_hub(10533767)
print host
gg = Echo()
gg.login(10533767, "123456", host=host)
while True:
time.sleep(300)
gg.ping()
except KeyboardInterrupt:
gg.logout()
| [
"devnull@localhost"
] | devnull@localhost |
68c366cf3fe05227f6edc4c1f969590059582f5e | e3fd35a8443aaf2f293ae03a5f6c819046a4dd21 | /leetcode-java/leetcode.py | 9eecc06a263da457fb4f2cbe087d8d5b6c42ee2f | [] | no_license | hieutran106/leetcode-ht | 2223ea6bcd459c2cdbc33344c0ff69df7f8a3c7f | 8332eb20e613f82cda2e326218154c7803a32403 | refs/heads/main | 2023-08-09T02:52:41.360360 | 2023-07-27T10:12:28 | 2023-07-27T10:12:28 | 234,890,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | import sys
import os
if __name__ == "__main__":
difficulty = sys.argv[1]
problem = sys.argv[2]
cwd = os.getcwd()
path = os.path.join(cwd, "src", "test", "java", difficulty, problem)
# early exit
if os.path.exists(path):
print("Solution existed. Exit ...")
sys.exit()
print(f'Create {difficulty}/{problem}')
os.mkdir(path)
# create README.md
file = os.path.join(path, 'README.md')
open(file, 'a').close()
# create SolutionTest.java
file = os.path.join(path, 'SolutionTest.java')
with open(file, 'w+') as f:
test_content = f"""package {difficulty}.{problem};
import org.junit.Assert;
import org.junit.Test;
public class SolutionTest {{
public static class Solution {{
}}
@Test
public void testCase1() {{
var s = new Solution();
}}
@Test
public void testCase0() {{
var s = new Solution();
}}
}}"""
f.writelines(test_content)
| [
"[email protected]"
] | |
7a5dcf48ff36cbc9f82e9329b3f7faec2ce88438 | 8eafb73fdab3e422aa717bac9af338dcba5e3c1e | /bbp/tests/test_gp_gof.py | bfb3309cb2b062ef6e9a0ed863563eb5f5da02ec | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | LevyForchh/bbp | 6dae4ce3577a73f5cef9b9b5507753a1381ec870 | 3cc389fb956ea14ef827af0f437ce37e8291afac | refs/heads/master | 2020-06-03T05:10:35.751009 | 2019-06-11T21:38:18 | 2019-06-11T21:38:18 | 191,453,945 | 0 | 0 | null | 2019-06-11T21:38:16 | 2019-06-11T21:38:15 | null | UTF-8 | Python | false | false | 7,314 | py | #! /usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division, print_function
# Import Python modules
import os
import unittest
# Import Broadband modules
import cmp_bbp
import seqnum
import bband_utils
from install_cfg import InstallCfg
from gp_gof_cfg import GPGofCfg
from gp_gof import GPGof
class TestGPGof(unittest.TestCase):
"""
Unit test for gp_gof.py
"""
def setUp(self):
self.install = InstallCfg()
self.gp_gof_cfg = GPGofCfg()
os.chdir(self.install.A_INSTALL_ROOT)
self.srcfile = "test_wh.src"
self.stations = "test_stat.txt"
self.sim_id = int(seqnum.get_seq_num())
sta_base = os.path.basename(os.path.splitext(self.stations)[0])
# Set up paths
refdir = os.path.join(self.install.A_TEST_REF_DIR, "gp")
a_indir = os.path.join(self.install.A_IN_DATA_DIR, str(self.sim_id))
a_tmpdir = os.path.join(self.install.A_TMP_DATA_DIR, str(self.sim_id))
a_outdir_seis = os.path.join(self.install.A_OUT_DATA_DIR,
str(self.sim_id),
"obs_seis_%s" % (sta_base))
a_outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(self.sim_id))
a_logdir = os.path.join(self.install.A_OUT_LOG_DIR, str(self.sim_id))
# Create directories
bband_utils.mkdirs([a_indir, a_tmpdir, a_outdir_seis,
a_outdir, a_logdir],
print_cmd=False)
# Copy stations
cmd = "cp %s %s" % (os.path.join(refdir, self.stations), a_indir)
bband_utils.runprog(cmd, print_cmd=False)
# Copy src file
cmd = "cp %s %s" % (os.path.join(refdir, self.srcfile), a_indir)
bband_utils.runprog(cmd, print_cmd=False)
for i in range(1, 6):
# Copy sample calculated seismograms and response files
cmd = "cp %s %s" % (os.path.join(refdir,
"s%02d.merged.bbp" % (i)),
os.path.join(a_outdir,
"%d.s%02d.vel.bbp" %
(self.sim_id, i)))
bband_utils.runprog(cmd, print_cmd=False)
cmd = "cp %s %s" % (os.path.join(refdir,
"s%02d.rd50" % (i)),
os.path.join(a_outdir,
"%d.s%02d.rd50" %
(self.sim_id, i)))
bband_utils.runprog(cmd, print_cmd=False)
# Cope sample observed seismograms and response files
cmd = "cp %s %s" % (os.path.join(refdir,
"s%02d.merged.bbp" % (i)),
os.path.join(a_outdir_seis,
"s%02d.bbp" % (i)))
bband_utils.runprog(cmd, print_cmd=False)
cmd = "cp %s %s" % (os.path.join(refdir,
"s%02d.rd50" % (i)),
os.path.join(a_outdir_seis,
"s%02d.rd50" % (i)))
bband_utils.runprog(cmd, print_cmd=False)
def test_gof(self):
"""
Test GP GOF Code
"""
gof_obj = GPGof(self.srcfile, self.stations,
"NR", 25, sim_id=self.sim_id)
gof_obj.run()
resid_ref_file = os.path.join(self.install.A_TEST_REF_DIR,
"gp", "GoF", "nr-rd50-resid.txt")
resid_file = os.path.join(self.install.A_OUT_DATA_DIR,
str(self.sim_id),
"NR-%d.rd50-resid.txt" % (self.sim_id))
self.failIf(cmp_bbp.cmp_resid(resid_ref_file,
resid_file,
tolerance=0.005) != 0,
"output resid file %s does not match reference resid file %s" %
(resid_file, resid_ref_file))
a_outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(self.sim_id))
for comp in ['psa5e', 'psa5n', 'rotd50']:
bias_ref_file = os.path.join(self.install.A_TEST_REF_DIR, "gp",
"GoF", "nr_r0-25-rd50-%s.bias" % (comp))
m90_ref_file = os.path.join(self.install.A_TEST_REF_DIR, "gp",
"GoF", "nr_r0-25-rd50-%s.m90" % (comp))
p90_ref_file = os.path.join(self.install.A_TEST_REF_DIR, "gp",
"GoF", "nr_r0-25-rd50-%s.p90" % (comp))
sigma_ref_file = os.path.join(self.install.A_TEST_REF_DIR, "gp",
"GoF", "nr_r0-25-rd50-%s.sigma" % (comp))
sigma0_ref_file = os.path.join(self.install.A_TEST_REF_DIR, "gp",
"GoF", "nr_r0-25-rd50-%s.sigma0" % (comp))
bias_file = os.path.join(a_outdir, "NR-%d_r0-25-rd50-%s.bias" % (self.sim_id, comp))
m90_file = os.path.join(a_outdir, "NR-%d_r0-25-rd50-%s.m90" % (self.sim_id, comp))
p90_file = os.path.join(a_outdir, "NR-%d_r0-25-rd50-%s.p90" % (self.sim_id, comp))
sigma_file = os.path.join(a_outdir, "NR-%d_r0-25-rd50-%s.sigma" % (self.sim_id, comp))
sigma0_file = os.path.join(a_outdir, "NR-%d_r0-25-rd50-%s.sigma0" % (self.sim_id, comp))
self.failIf(cmp_bbp.cmp_bias(bias_ref_file, bias_file) != 0,
"output bias file %s does not match reference bias file %s" %
(bias_file, bias_ref_file))
self.failIf(cmp_bbp.cmp_bias(m90_ref_file, m90_file) != 0,
"output m90 file %s does not match reference m90 file %s" %
(m90_file, m90_ref_file))
self.failIf(cmp_bbp.cmp_bias(p90_ref_file, p90_file, tolerance=0.0025) != 0,
"output p90 file %s does not match reference p90 file %s" %
(p90_file, p90_ref_file))
self.failIf(cmp_bbp.cmp_bias(sigma_ref_file, sigma_file) != 0,
"output sigma file %s does not match reference sigma file %s" %
(sigma_file, sigma_ref_file))
self.failIf(cmp_bbp.cmp_bias(sigma0_ref_file, sigma0_file) != 0,
"output sigma0 file %s does not match reference sigma0 file %s" %
(sigma0_file, sigma0_ref_file))
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(TestGPGof)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| [
"[email protected]"
] | |
b55110e630e67fe860d2037fa0094925bbf45e28 | 64a8dcfa0a98c665b8b2ac796ed7231135977d9a | /fluent_contents/plugins/googledocsviewer/content_plugins.py | cf330dfc9bf3f2783a245dbbb6a29a3f46c583a2 | [
"Apache-2.0"
] | permissive | jpotterm/django-fluent-contents | 8bc70c9f0309bfeeb3c1e7a96c0687c7070e48ed | e617815874c936af1e00a8bfb79a4c8fc9a40cbb | refs/heads/master | 2021-01-18T06:56:15.737257 | 2015-09-09T15:40:43 | 2015-09-09T15:40:43 | 30,092,032 | 0 | 0 | null | 2015-01-30T21:42:42 | 2015-01-30T21:42:42 | null | UTF-8 | Python | false | false | 1,174 | py | """
Google apps widgets for your site.
"""
from django.utils.html import escape
from django.utils.http import urlquote
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from fluent_contents.extensions import ContentPlugin, plugin_pool
from fluent_contents.plugins.googledocsviewer.models import GoogleDocsViewerItem
@plugin_pool.register
class GoogleDocsViewerPlugin(ContentPlugin):
"""
Plugin to add a Google Docs viewer to the page.
This can be used to display a PDF file inline.
Note then when using the Google Docs viewer on your site,
Google assumes you agree with the Terms of Service,
see: https://docs.google.com/viewer/TOS
"""
model = GoogleDocsViewerItem
category = _('Media')
def render(self, request, instance, **kwargs):
url = 'http://docs.google.com/viewer?url={url}&embedded=true'.format(url=urlquote(instance.url, ''))
return mark_safe(u'<iframe class="googledocsviewer" src="{src}" width="{width}" height="{height}"></iframe>'.format(
src=escape(url),
width=instance.width,
height=instance.height
))
| [
"[email protected]"
] | |
5aec7d812d592d9d37ec207d0ed43b8a3d21710a | 58e51a01846176168bcd83175f63b240cd6db916 | /apps/news/models.py | 070e6feb4346a30d22e857ca08a87b62ba33f7b9 | [] | no_license | reddytocode/projectX-backend | 7e79795cd0c6951ca39f93e316af7a61b631940d | 05fb5f6e14889ecac94ad52c13796eb14c59814d | refs/heads/main | 2023-07-18T06:37:16.298857 | 2021-06-30T20:29:33 | 2021-06-30T20:29:33 | 380,374,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | from django.db import models
class News(models.Model):
# Todo: inherit
guid = models.TextField()
title = models.CharField(max_length=100, blank=False)
link = models.CharField(max_length=200, blank=False)
content = models.TextField()
author = models.CharField(max_length=100, blank=False) | [
"[email protected]"
] | |
21df715483d4461c101e7b8501691258ca4d040d | 4ae34a5179d7adf1037eb9a3cb249f9a5c06684e | /examples/v1beta1/trial-images/darts-cnn-cifar10/model.py | dea7d43f84c6d761b0697b84b5ffd2adc18d9249 | [
"Apache-2.0"
] | permissive | kubeflow/katib | 367373c0452d49a7a115b86893f4dab9e1f278ea | e3e0aa24aeea1edfab0fd42f55392af651d2b3ae | refs/heads/master | 2023-09-04T05:02:05.752156 | 2023-08-24T22:40:54 | 2023-08-24T22:40:54 | 127,941,481 | 1,385 | 422 | Apache-2.0 | 2023-09-14T13:17:29 | 2018-04-03T17:07:12 | Go | UTF-8 | Python | false | false | 7,044 | py | # Copyright 2022 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import FactorizedReduce, StdConv, MixedOp
class Cell(nn.Module):
""" Cell for search
Each edge is mixed and continuous relaxed.
"""
def __init__(self, num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space):
"""
Args:
num_nodes: Number of intermediate cell nodes
c_prev_prev: channels_out[k-2]
c_prev : Channels_out[k-1]
c_cur : Channels_in[k] (current)
reduction_prev: flag for whether the previous cell is reduction cell or not
reduction_cur: flag for whether the current cell is reduction cell or not
"""
super(Cell, self).__init__()
self.reduction_cur = reduction_cur
self.num_nodes = num_nodes
# If previous cell is reduction cell, current input size does not match with
# output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing
if reduction_prev:
self.preprocess0 = FactorizedReduce(c_prev_prev, c_cur)
else:
self.preprocess0 = StdConv(c_prev_prev, c_cur, kernel_size=1, stride=1, padding=0)
self.preprocess1 = StdConv(c_prev, c_cur, kernel_size=1, stride=1, padding=0)
# Generate dag from mixed operations
self.dag_ops = nn.ModuleList()
for i in range(self.num_nodes):
self.dag_ops.append(nn.ModuleList())
# Include 2 input nodes
for j in range(2+i):
# Reduction with stride = 2 must be only for the input node
stride = 2 if reduction_cur and j < 2 else 1
op = MixedOp(c_cur, stride, search_space)
self.dag_ops[i].append(op)
def forward(self, s0, s1, w_dag):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for edges, w_list in zip(self.dag_ops, w_dag):
state_cur = sum(edges[i](s, w) for i, (s, w) in enumerate((zip(states, w_list))))
states.append(state_cur)
state_out = torch.cat(states[2:], dim=1)
return state_out
class NetworkCNN(nn.Module):
def __init__(self, init_channels, input_channels, num_classes,
num_layers, criterion, search_space, num_nodes, stem_multiplier):
super(NetworkCNN, self).__init__()
self.init_channels = init_channels
self.num_classes = num_classes
self.num_layers = num_layers
self.criterion = criterion
self.num_nodes = num_nodes
self.stem_multiplier = stem_multiplier
c_cur = self.stem_multiplier*self.init_channels
self.stem = nn.Sequential(
nn.Conv2d(input_channels, c_cur, 3, padding=1, bias=False),
nn.BatchNorm2d(c_cur)
)
# In first Cell stem is used for s0 and s1
# c_prev_prev and c_prev - output channels size
# c_cur - init channels size
c_prev_prev, c_prev, c_cur = c_cur, c_cur, self.init_channels
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(self.num_layers):
# For Network with 1 layer: Only Normal Cell
if self.num_layers == 1:
reduction_cur = False
else:
# For Network with two layers: First layer - Normal, Second - Reduction
# For Other Networks: [1/3, 2/3] Layers - Reduction cell with double channels
# Others - Normal cell
if ((self.num_layers == 2 and i == 1) or
(self.num_layers > 2 and i in [self.num_layers//3, 2*self.num_layers//3])):
c_cur *= 2
reduction_cur = True
else:
reduction_cur = False
cell = Cell(self.num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space)
reduction_prev = reduction_cur
self.cells.append(cell)
c_cur_out = c_cur * self.num_nodes
c_prev_prev, c_prev = c_prev, c_cur_out
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(c_prev, self.num_classes)
# Initialize alphas parameters
num_ops = len(search_space.primitives)
self.alpha_normal = nn.ParameterList()
self.alpha_reduce = nn.ParameterList()
for i in range(self.num_nodes):
self.alpha_normal.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))
if self.num_layers > 1:
self.alpha_reduce.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))
# Setup alphas list
self.alphas = []
for name, parameter in self.named_parameters():
if "alpha" in name:
self.alphas.append((name, parameter))
def forward(self, x):
weights_normal = [F.softmax(alpha, dim=-1) for alpha in self.alpha_normal]
weights_reduce = [F.softmax(alpha, dim=-1) for alpha in self.alpha_reduce]
s0 = s1 = self.stem(x)
for cell in self.cells:
weights = weights_reduce if cell.reduction_cur else weights_normal
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
# Make out flatten
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return logits
def print_alphas(self):
print("\n>>> Alphas Normal <<<")
for alpha in self.alpha_normal:
print(F.softmax(alpha, dim=-1))
if self.num_layers > 1:
print("\n>>> Alpha Reduce <<<")
for alpha in self.alpha_reduce:
print(F.softmax(alpha, dim=-1))
print("\n")
def getWeights(self):
return self.parameters()
def getAlphas(self):
for _, parameter in self.alphas:
yield parameter
def loss(self, x, y):
logits = self.forward(x)
return self.criterion(logits, y)
def genotype(self, search_space):
gene_normal = search_space.parse(self.alpha_normal, k=2)
gene_reduce = search_space.parse(self.alpha_reduce, k=2)
# concat all intermediate nodes
concat = range(2, 2 + self.num_nodes)
return search_space.genotype(normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat)
| [
"[email protected]"
] | |
8d0fe3775c506e3d3501551bd2693ec99edb0d39 | fe19d2fac4580d463132e61509bd6e3cc2cf958d | /toontown/parties/DistributedPartyFireworksActivity.py | cbe6cf69ed730e33a04c428825cbb5fd222fffe2 | [] | no_license | t00nt0wn1dk/c0d3 | 3e6db6dd42c3aa36ad77709cf9016176a3f3a44f | 7de105d7f3de0f8704b020e32fd063ee2fad8d0d | refs/heads/master | 2021-01-01T16:00:15.367822 | 2015-03-21T21:25:52 | 2015-03-21T21:25:55 | 32,647,654 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 7,941 | py | # 2013.08.22 22:23:22 Pacific Daylight Time
# Embedded file name: toontown.parties.DistributedPartyFireworksActivity
from pandac.PandaModules import Vec3
from pandac.PandaModules import OmniBoundingVolume
from pandac.PandaModules import AlphaTestAttrib
from pandac.PandaModules import RenderAttrib
from direct.actor.Actor import Actor
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import globalClockDelta
from toontown.effects.FireworkShowMixin import FireworkShowMixin
from toontown.effects.RocketExplosion import RocketExplosion
from toontown.toonbase import TTLocalizer
from PartyGlobals import FireworkShows
from PartyGlobals import ActivityIds
from PartyGlobals import ActivityTypes
from PartyGlobals import FireworksStartedEvent
from PartyGlobals import FireworksFinishedEvent
from PartyGlobals import FireworksPostLaunchDelay
from PartyGlobals import RocketSoundDelay
from PartyGlobals import RocketDirectionDelay
from DistributedPartyActivity import DistributedPartyActivity
from activityFSMs import FireworksActivityFSM
import PartyGlobals
class DistributedPartyFireworksActivity(DistributedPartyActivity, FireworkShowMixin):
__module__ = __name__
notify = directNotify.newCategory('DistributedPartyFireworksActivity')
def __init__(self, cr):
DistributedPartyFireworksActivity.notify.debug('__init__')
DistributedPartyActivity.__init__(self, cr, ActivityIds.PartyFireworks, ActivityTypes.HostInitiated, wantLever=True)
FireworkShowMixin.__init__(self, restorePlaygroundMusic=True, startDelay=FireworksPostLaunchDelay)
def setEventId(self, eventId):
DistributedPartyFireworksActivity.notify.debug('setEventId( %s )' % FireworkShows.getString(eventId))
self.eventId = eventId
def setShowStyle(self, showStyle):
DistributedPartyFireworksActivity.notify.debug('setShowStyle( %d )' % showStyle)
self.showStyle = showStyle
def load(self):
DistributedPartyFireworksActivity.notify.debug('load')
DistributedPartyActivity.load(self)
self.eventId = PartyGlobals.FireworkShows.Summer
self.launchPadModel = loader.loadModel('phase_13/models/parties/launchPad')
self.launchPadModel.setH(90.0)
self.launchPadModel.setPos(0.0, -18.0, 0.0)
self.launchPadModel.reparentTo(self.root)
railingsCollection = self.launchPadModel.findAllMatches('**/launchPad_mesh/*railing*')
for i in range(railingsCollection.getNumPaths()):
railingsCollection[i].setAttrib(AlphaTestAttrib.make(RenderAttrib.MGreater, 0.75))
leverLocator = self.launchPadModel.find('**/RocketLever_locator')
self.lever.setPosHpr(Vec3.zero(), Vec3.zero())
self.lever.reparentTo(leverLocator)
self.toonPullingLeverInterval = None
self.sign.reparentTo(self.launchPadModel.find('**/launchPad_sign_locator'))
self.rocketActor = Actor('phase_13/models/parties/rocket_model', {'launch': 'phase_13/models/parties/rocket_launch'})
rocketLocator = self.launchPadModel.find('**/rocket_locator')
self.rocketActor.reparentTo(rocketLocator)
self.rocketActor.node().setBound(OmniBoundingVolume())
self.rocketActor.node().setFinal(True)
effectsLocator = self.rocketActor.find('**/joint1')
self.rocketExplosionEffect = RocketExplosion(effectsLocator, rocketLocator)
self.rocketParticleSeq = None
self.launchSound = base.loadSfx('phase_13/audio/sfx/rocket_launch.mp3')
self.activityFSM = FireworksActivityFSM(self)
self.activityFSM.request('Idle')
return
def unload(self):
DistributedPartyFireworksActivity.notify.debug('unload')
taskMgr.remove(self.taskName('delayedStartShow'))
if self.rocketParticleSeq:
self.rocketParticleSeq.pause()
self.rocketParticleSeq = None
self.launchPadModel.removeNode()
del self.launchPadModel
del self.toonPullingLeverInterval
self.rocketActor.delete()
self.rocketExplosionEffect.destroy()
self.activityFSM.request('Disabled')
del self.rocketActor
del self.launchSound
del self.activityFSM
del self.eventId
del self.showStyle
DistributedPartyActivity.unload(self)
return
def _leverPulled(self, collEntry):
DistributedPartyFireworksActivity.notify.debug('_leverPulled')
hostPulledLever = DistributedPartyActivity._leverPulled(self, collEntry)
if self.activityFSM.getCurrentOrNextState() == 'Active':
self.showMessage(TTLocalizer.PartyFireworksAlreadyActive)
elif self.activityFSM.getCurrentOrNextState() == 'Disabled':
self.showMessage(TTLocalizer.PartyFireworksAlreadyDone)
elif self.activityFSM.getCurrentOrNextState() == 'Idle':
if hostPulledLever:
base.cr.playGame.getPlace().fsm.request('activity')
self.toonPullingLeverInterval = self.getToonPullingLeverInterval(base.localAvatar)
self.toonPullingLeverInterval.append(Func(self.d_toonJoinRequest))
self.toonPullingLeverInterval.append(Func(base.cr.playGame.getPlace().fsm.request, 'walk'))
self.toonPullingLeverInterval.start()
else:
self.showMessage(TTLocalizer.PartyOnlyHostLeverPull)
def setState(self, newState, timestamp):
DistributedPartyFireworksActivity.notify.debug('setState( newState=%s, ... )' % newState)
DistributedPartyActivity.setState(self, newState, timestamp)
if newState == 'Active':
self.activityFSM.request(newState, timestamp)
else:
self.activityFSM.request(newState)
def startIdle(self):
DistributedPartyFireworksActivity.notify.debug('startIdle')
def finishIdle(self):
DistributedPartyFireworksActivity.notify.debug('finishIdle')
def startActive(self, showStartTimestamp):
DistributedPartyFireworksActivity.notify.debug('startActive')
messenger.send(FireworksStartedEvent)
timeSinceStart = globalClockDelta.localElapsedTime(showStartTimestamp)
if timeSinceStart > self.rocketActor.getDuration('launch'):
self.rocketActor.hide()
self.startShow(self.eventId, self.showStyle, showStartTimestamp)
else:
self.rocketActor.play('launch')
self.rocketParticleSeq = Sequence(Wait(RocketSoundDelay), Func(base.playSfx, self.launchSound), Func(self.rocketExplosionEffect.start), Wait(RocketDirectionDelay), LerpHprInterval(self.rocketActor, 4.0, Vec3(0, 0, -60)), Func(self.rocketExplosionEffect.end), Func(self.rocketActor.hide))
self.rocketParticleSeq.start()
taskMgr.doMethodLater(FireworksPostLaunchDelay, self.startShow, self.taskName('delayedStartShow'), extraArgs=[self.eventId,
self.showStyle,
showStartTimestamp,
self.root])
def finishActive(self):
self.rocketParticleSeq = None
DistributedPartyFireworksActivity.notify.debug('finishActive')
messenger.send(FireworksFinishedEvent)
taskMgr.remove(self.taskName('delayedStartShow'))
FireworkShowMixin.disable(self)
return
def startDisabled(self):
DistributedPartyFireworksActivity.notify.debug('startDisabled')
if not self.rocketActor.isEmpty():
self.rocketActor.hide()
def finishDisabled(self):
DistributedPartyFireworksActivity.notify.debug('finishDisabled')
def handleToonDisabled(self, toonId):
self.notify.warning('handleToonDisabled no implementation yet')
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\parties\DistributedPartyFireworksActivity.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:23:22 Pacific Daylight Time
| [
"[email protected]"
] | |
eee3ed0cdfe623261a833b1da967a5cb2705933d | 30a61c74d5108279af498a181ebc83151f13c033 | /one_hundred_eighteen.py | 30b365d21d51a1e5aa523f67b58d17a91ffc45e6 | [] | no_license | Yanl05/LeetCode | 274267cb189813c96fff67d8cbfba4afebd5c2b2 | c0807a7f31a265b3090ef3d32a0ad5a2b10579f7 | refs/heads/master | 2020-03-15T12:25:16.456742 | 2020-01-20T01:12:35 | 2020-01-20T01:12:35 | 132,143,417 | 0 | 0 | null | 2018-12-21T03:01:32 | 2018-05-04T13:25:24 | Python | UTF-8 | Python | false | false | 704 | py | # -*- coding: UTF-8 -*-
"""
# @Time : 2019-08-28 16:33
# @Author : yanlei
# @FileName: one_hundred_eighteen.py
给定一个非负整数 numRows,生成杨辉三角的前 numRows 行。
在杨辉三角中,每个数是它左上方和右上方的数的和。
"""
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
ret = []
if numRows == 0: return ret
for i in range(numRows):
ret.append([1]*(i+1))
if i > 1:
for j in range(1,i):
ret[-1][j] = ret[-2][j-1] + ret[-2][j]
return ret
print(Solution().generate(5)) | [
"[email protected]"
] | |
83f6803e4b1251b1ff5c7750399269d0197edd3d | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2449486_0/Python/ynasu/B.py | 2540035aa29eb33019f7839e601b4e6d8c3a9332 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | #!/usr/bin/env python
import sys
yes = "YES"
no = "NO"
def solve(heights):
N = len(heights)
M = len(heights[0])
for y in xrange(N):
for x in xrange(M):
possible = True
for y1 in xrange(N):
if heights[y1][x] > heights[y][x]:
possible = False
if possible:
continue
possible = True
for x1 in xrange(M):
if heights[y][x1] > heights[y][x]:
possible = False
if possible:
continue
return no
return yes
def readInts():
return [ int(s) for s in sys.stdin.readline().split() ]
T = int(sys.stdin.readline())
for t in xrange(T):
inputs = readInts()
N = inputs[0]
heights = []
for i in xrange(N):
heights.append(readInts())
res = solve(heights)
print "Case #%d: %s" % (t + 1, res)
| [
"[email protected]"
] | |
41e1a56d2699383d73c627f9741b6d6f607a5171 | 6364bb727b623f06f6998941299c49e7fcb1d437 | /msgraph-cli-extensions/src/teamschats/azext_teamschats/vendored_sdks/teamschats/aio/operations_async/_chat_chat_operations_async.py | 8306168c06f4e22be9d73ae8046a198361fd26d2 | [
"MIT"
] | permissive | kanakanaidu/msgraph-cli | 1d6cd640f4e10f4bdf476d44d12a7c48987b1a97 | b3b87f40148fb691a4c331f523ca91f8a5cc9224 | refs/heads/main | 2022-12-25T08:08:26.716914 | 2020-09-23T14:29:13 | 2020-09-23T14:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,956 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ChatChatOperations:
"""ChatChatOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~teams_chats.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_chat(
self,
orderby: Optional[List[Union[str, "models.Get5ItemsItem"]]] = None,
select: Optional[List[Union[str, "models.Get6ItemsItem"]]] = None,
expand: Optional[List[Union[str, "models.Get7ItemsItem"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfChat"]:
"""Get entities from chats.
Get entities from chats.
:param orderby: Order items by property values.
:type orderby: list[str or ~teams_chats.models.Get5ItemsItem]
:param select: Select properties to be returned.
:type select: list[str or ~teams_chats.models.Get6ItemsItem]
:param expand: Expand related entities.
:type expand: list[str or ~teams_chats.models.Get7ItemsItem]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfChat or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~teams_chats.models.CollectionOfChat]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfChat"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_chat.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfChat', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_chat.metadata = {'url': '/chats'} # type: ignore
async def create_chat(
self,
id: Optional[str] = None,
topic: Optional[str] = None,
created_date_time: Optional[datetime.datetime] = None,
last_updated_date_time: Optional[datetime.datetime] = None,
members: Optional[List["models.MicrosoftGraphConversationMember"]] = None,
messages: Optional[List["models.MicrosoftGraphChatMessage"]] = None,
installed_apps: Optional[List["models.MicrosoftGraphTeamsAppInstallation"]] = None,
**kwargs
) -> "models.MicrosoftGraphChat":
"""Add new entity to chats.
Add new entity to chats.
:param id: Read-only.
:type id: str
:param topic:
:type topic: str
:param created_date_time:
:type created_date_time: ~datetime.datetime
:param last_updated_date_time:
:type last_updated_date_time: ~datetime.datetime
:param members:
:type members: list[~teams_chats.models.MicrosoftGraphConversationMember]
:param messages:
:type messages: list[~teams_chats.models.MicrosoftGraphChatMessage]
:param installed_apps:
:type installed_apps: list[~teams_chats.models.MicrosoftGraphTeamsAppInstallation]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphChat, or the result of cls(response)
:rtype: ~teams_chats.models.MicrosoftGraphChat
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphChat"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphChat(id=id, topic=topic, created_date_time=created_date_time, last_updated_date_time=last_updated_date_time, members=members, messages=messages, installed_apps=installed_apps)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_chat.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphChat')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphChat', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_chat.metadata = {'url': '/chats'} # type: ignore
async def get_chat(
self,
chat_id: str,
select: Optional[List[Union[str, "models.Get1ItemsItem"]]] = None,
expand: Optional[List[Union[str, "models.Get2ItemsItem"]]] = None,
**kwargs
) -> "models.MicrosoftGraphChat":
"""Get entity from chats by key.
Get entity from chats by key.
:param chat_id: key: chat-id of chat.
:type chat_id: str
:param select: Select properties to be returned.
:type select: list[str or ~teams_chats.models.Get1ItemsItem]
:param expand: Expand related entities.
:type expand: list[str or ~teams_chats.models.Get2ItemsItem]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphChat, or the result of cls(response)
:rtype: ~teams_chats.models.MicrosoftGraphChat
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphChat"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_chat.metadata['url'] # type: ignore
path_format_arguments = {
'chat-id': self._serialize.url("chat_id", chat_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphChat', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_chat.metadata = {'url': '/chats/{chat-id}'} # type: ignore
async def update_chat(
self,
chat_id: str,
id: Optional[str] = None,
topic: Optional[str] = None,
created_date_time: Optional[datetime.datetime] = None,
last_updated_date_time: Optional[datetime.datetime] = None,
members: Optional[List["models.MicrosoftGraphConversationMember"]] = None,
messages: Optional[List["models.MicrosoftGraphChatMessage"]] = None,
installed_apps: Optional[List["models.MicrosoftGraphTeamsAppInstallation"]] = None,
**kwargs
) -> None:
"""Update entity in chats.
Update entity in chats.
:param chat_id: key: chat-id of chat.
:type chat_id: str
:param id: Read-only.
:type id: str
:param topic:
:type topic: str
:param created_date_time:
:type created_date_time: ~datetime.datetime
:param last_updated_date_time:
:type last_updated_date_time: ~datetime.datetime
:param members:
:type members: list[~teams_chats.models.MicrosoftGraphConversationMember]
:param messages:
:type messages: list[~teams_chats.models.MicrosoftGraphChatMessage]
:param installed_apps:
:type installed_apps: list[~teams_chats.models.MicrosoftGraphTeamsAppInstallation]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphChat(id=id, topic=topic, created_date_time=created_date_time, last_updated_date_time=last_updated_date_time, members=members, messages=messages, installed_apps=installed_apps)
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_chat.metadata['url'] # type: ignore
path_format_arguments = {
'chat-id': self._serialize.url("chat_id", chat_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphChat')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_chat.metadata = {'url': '/chats/{chat-id}'} # type: ignore
async def delete_chat(
self,
chat_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete entity from chats.
Delete entity from chats.
:param chat_id: key: chat-id of chat.
:type chat_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.delete_chat.metadata['url'] # type: ignore
path_format_arguments = {
'chat-id': self._serialize.url("chat_id", chat_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_chat.metadata = {'url': '/chats/{chat-id}'} # type: ignore
| [
"[email protected]"
] | |
f102adefa4347ff2438caea310576abfc8cc8e52 | 27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f | /tests/unit/modules/network/iosxr/test_iosxr_netconf.py | fbb7819e173700b0821c2a564cbc459b04350fe6 | [] | no_license | coll-test/notstdlib.moveitallout | eb33a560070bbded5032385d0aea2f3cf60e690b | 0987f099b783c6cf977db9233e1c3d9efcbcb3c7 | refs/heads/master | 2020-12-19T22:28:33.369557 | 2020-01-23T18:51:26 | 2020-01-23T18:51:26 | 235,865,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,632 | py | # (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.notstdlib.moveitallout.tests.unit.compat.mock import patch
from ansible_collections.notstdlib.moveitallout.plugins.modules import iosxr_netconf
from ansible_collections.notstdlib.moveitallout.tests.unit.modules.utils import set_module_args
from ..iosxr_module import TestIosxrModule
class TestIosxrNetconfModule(TestIosxrModule):
module = iosxr_netconf
def setUp(self):
super(TestIosxrNetconfModule, self).setUp()
self.mock_get_config = patch('ansible_collections.notstdlib.moveitallout.plugins.modules.iosxr_netconf.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible_collections.notstdlib.moveitallout.plugins.modules.iosxr_netconf.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestIosxrNetconfModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def test_iosxr_disable_netconf_service(self):
self.get_config.return_value = '''
netconf-yang agent
ssh
!
ssh server netconf vrf default
'''
self.load_config.return_value = 'dummy diff'
set_module_args(dict(netconf_port=830, netconf_vrf='default', state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['no netconf-yang agent ssh', 'no ssh server netconf port 830', 'no ssh server netconf vrf default'])
def test_iosxr_enable_netconf_service(self):
self.get_config.return_value = ''
self.load_config.return_value = 'dummy diff'
set_module_args(dict(netconf_port=830, netconf_vrf='default', state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['netconf-yang agent ssh', 'ssh server netconf port 830', 'ssh server netconf vrf default'])
def test_iosxr_change_netconf_port(self):
self.get_config.return_value = '''
netconf-yang agent
ssh
!
ssh server netconf vrf default
'''
self.load_config.return_value = 'dummy diff'
set_module_args(dict(netconf_port=9000, state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['ssh server netconf port 9000'])
def test_iosxr_change_netconf_vrf(self):
self.get_config.return_value = '''
netconf-yang agent
ssh
!
ssh server netconf vrf default
'''
self.load_config.return_value = 'dummy diff'
set_module_args(dict(netconf_vrf='new_default', state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['ssh server netconf vrf new_default'])
| [
"[email protected]"
] | |
8719cc7f7757f07243f18ba716001c8c590540ed | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/pubsub/v1/pubsub-v1-py/tests/unit/gapic/pubsub_v1/test_subscriber.py | 08b27b5656d703dec02cc1054278b8afbf5d2c31 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185,102 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google import auth
from google.api_core import client_options
from google.api_core import exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore
from google.iam.v1 import options_pb2 as options # type: ignore
from google.iam.v1 import policy_pb2 as policy # type: ignore
from google.oauth2 import service_account
from google.protobuf import duration_pb2 as duration # type: ignore
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
from google.pubsub_v1.services.subscriber import SubscriberAsyncClient
from google.pubsub_v1.services.subscriber import SubscriberClient
from google.pubsub_v1.services.subscriber import pagers
from google.pubsub_v1.services.subscriber import transports
from google.pubsub_v1.types import pubsub
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SubscriberClient._get_default_mtls_endpoint(None) is None
assert SubscriberClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert SubscriberClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert SubscriberClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert SubscriberClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert SubscriberClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
SubscriberClient,
SubscriberAsyncClient,
])
def test_subscriber_client_from_service_account_info(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'pubsub.googleapis.com:443'
@pytest.mark.parametrize("client_class", [
SubscriberClient,
SubscriberAsyncClient,
])
def test_subscriber_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'pubsub.googleapis.com:443'
def test_subscriber_client_get_transport_class():
transport = SubscriberClient.get_transport_class()
available_transports = [
transports.SubscriberGrpcTransport,
]
assert transport in available_transports
transport = SubscriberClient.get_transport_class("grpc")
assert transport == transports.SubscriberGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SubscriberClient, transports.SubscriberGrpcTransport, "grpc"),
(SubscriberAsyncClient, transports.SubscriberGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(SubscriberClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SubscriberClient))
@mock.patch.object(SubscriberAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SubscriberAsyncClient))
def test_subscriber_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SubscriberClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SubscriberClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(SubscriberClient, transports.SubscriberGrpcTransport, "grpc", "true"),
(SubscriberAsyncClient, transports.SubscriberGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(SubscriberClient, transports.SubscriberGrpcTransport, "grpc", "false"),
(SubscriberAsyncClient, transports.SubscriberGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(SubscriberClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SubscriberClient))
@mock.patch.object(SubscriberAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SubscriberAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_subscriber_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SubscriberClient, transports.SubscriberGrpcTransport, "grpc"),
(SubscriberAsyncClient, transports.SubscriberGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_subscriber_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SubscriberClient, transports.SubscriberGrpcTransport, "grpc"),
(SubscriberAsyncClient, transports.SubscriberGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_subscriber_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_subscriber_client_client_options_from_dict():
with mock.patch('google.pubsub_v1.services.subscriber.transports.SubscriberGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = SubscriberClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_create_subscription(transport: str = 'grpc', request_type=pubsub.Subscription):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription(
name='name_value',
topic='topic_value',
ack_deadline_seconds=2066,
retain_acked_messages=True,
enable_message_ordering=True,
filter='filter_value',
detached=True,
)
response = client.create_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.Subscription()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Subscription)
assert response.name == 'name_value'
assert response.topic == 'topic_value'
assert response.ack_deadline_seconds == 2066
assert response.retain_acked_messages is True
assert response.enable_message_ordering is True
assert response.filter == 'filter_value'
assert response.detached is True
def test_create_subscription_from_dict():
test_create_subscription(request_type=dict)
def test_create_subscription_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription),
'__call__') as call:
client.create_subscription()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.Subscription()
@pytest.mark.asyncio
async def test_create_subscription_async(transport: str = 'grpc_asyncio', request_type=pubsub.Subscription):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription(
name='name_value',
topic='topic_value',
ack_deadline_seconds=2066,
retain_acked_messages=True,
enable_message_ordering=True,
filter='filter_value',
detached=True,
))
response = await client.create_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.Subscription()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Subscription)
assert response.name == 'name_value'
assert response.topic == 'topic_value'
assert response.ack_deadline_seconds == 2066
assert response.retain_acked_messages is True
assert response.enable_message_ordering is True
assert response.filter == 'filter_value'
assert response.detached is True
@pytest.mark.asyncio
async def test_create_subscription_async_from_dict():
await test_create_subscription_async(request_type=dict)
def test_create_subscription_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.Subscription()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription),
'__call__') as call:
call.return_value = pubsub.Subscription()
client.create_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_subscription_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.Subscription()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription())
await client.create_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_create_subscription_flattened():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_subscription(
name='name_value',
topic='topic_value',
push_config=pubsub.PushConfig(push_endpoint='push_endpoint_value'),
ack_deadline_seconds=2066,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].topic == 'topic_value'
assert args[0].push_config == pubsub.PushConfig(push_endpoint='push_endpoint_value')
assert args[0].ack_deadline_seconds == 2066
def test_create_subscription_flattened_error():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_subscription(
pubsub.Subscription(),
name='name_value',
topic='topic_value',
push_config=pubsub.PushConfig(push_endpoint='push_endpoint_value'),
ack_deadline_seconds=2066,
)
@pytest.mark.asyncio
async def test_create_subscription_flattened_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_subscription(
name='name_value',
topic='topic_value',
push_config=pubsub.PushConfig(push_endpoint='push_endpoint_value'),
ack_deadline_seconds=2066,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].topic == 'topic_value'
assert args[0].push_config == pubsub.PushConfig(push_endpoint='push_endpoint_value')
assert args[0].ack_deadline_seconds == 2066
@pytest.mark.asyncio
async def test_create_subscription_flattened_error_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_subscription(
pubsub.Subscription(),
name='name_value',
topic='topic_value',
push_config=pubsub.PushConfig(push_endpoint='push_endpoint_value'),
ack_deadline_seconds=2066,
)
def test_get_subscription(transport: str = 'grpc', request_type=pubsub.GetSubscriptionRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription(
name='name_value',
topic='topic_value',
ack_deadline_seconds=2066,
retain_acked_messages=True,
enable_message_ordering=True,
filter='filter_value',
detached=True,
)
response = client.get_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.GetSubscriptionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Subscription)
assert response.name == 'name_value'
assert response.topic == 'topic_value'
assert response.ack_deadline_seconds == 2066
assert response.retain_acked_messages is True
assert response.enable_message_ordering is True
assert response.filter == 'filter_value'
assert response.detached is True
def test_get_subscription_from_dict():
test_get_subscription(request_type=dict)
def test_get_subscription_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_subscription),
'__call__') as call:
client.get_subscription()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.GetSubscriptionRequest()
@pytest.mark.asyncio
async def test_get_subscription_async(transport: str = 'grpc_asyncio', request_type=pubsub.GetSubscriptionRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription(
name='name_value',
topic='topic_value',
ack_deadline_seconds=2066,
retain_acked_messages=True,
enable_message_ordering=True,
filter='filter_value',
detached=True,
))
response = await client.get_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.GetSubscriptionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Subscription)
assert response.name == 'name_value'
assert response.topic == 'topic_value'
assert response.ack_deadline_seconds == 2066
assert response.retain_acked_messages is True
assert response.enable_message_ordering is True
assert response.filter == 'filter_value'
assert response.detached is True
@pytest.mark.asyncio
async def test_get_subscription_async_from_dict():
await test_get_subscription_async(request_type=dict)
def test_get_subscription_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.GetSubscriptionRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_subscription),
'__call__') as call:
call.return_value = pubsub.Subscription()
client.get_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_subscription_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.GetSubscriptionRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_subscription),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription())
await client.get_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
def test_get_subscription_flattened():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_subscription(
subscription='subscription_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].subscription == 'subscription_value'
def test_get_subscription_flattened_error():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_subscription(
pubsub.GetSubscriptionRequest(),
subscription='subscription_value',
)
@pytest.mark.asyncio
async def test_get_subscription_flattened_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_subscription(
subscription='subscription_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].subscription == 'subscription_value'
@pytest.mark.asyncio
async def test_get_subscription_flattened_error_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_subscription(
pubsub.GetSubscriptionRequest(),
subscription='subscription_value',
)
def test_update_subscription(transport: str = 'grpc', request_type=pubsub.UpdateSubscriptionRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Subscription(
name='name_value',
topic='topic_value',
ack_deadline_seconds=2066,
retain_acked_messages=True,
enable_message_ordering=True,
filter='filter_value',
detached=True,
)
response = client.update_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.UpdateSubscriptionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Subscription)
assert response.name == 'name_value'
assert response.topic == 'topic_value'
assert response.ack_deadline_seconds == 2066
assert response.retain_acked_messages is True
assert response.enable_message_ordering is True
assert response.filter == 'filter_value'
assert response.detached is True
def test_update_subscription_from_dict():
test_update_subscription(request_type=dict)
def test_update_subscription_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_subscription),
'__call__') as call:
client.update_subscription()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.UpdateSubscriptionRequest()
@pytest.mark.asyncio
async def test_update_subscription_async(transport: str = 'grpc_asyncio', request_type=pubsub.UpdateSubscriptionRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription(
name='name_value',
topic='topic_value',
ack_deadline_seconds=2066,
retain_acked_messages=True,
enable_message_ordering=True,
filter='filter_value',
detached=True,
))
response = await client.update_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.UpdateSubscriptionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Subscription)
assert response.name == 'name_value'
assert response.topic == 'topic_value'
assert response.ack_deadline_seconds == 2066
assert response.retain_acked_messages is True
assert response.enable_message_ordering is True
assert response.filter == 'filter_value'
assert response.detached is True
@pytest.mark.asyncio
async def test_update_subscription_async_from_dict():
await test_update_subscription_async(request_type=dict)
def test_update_subscription_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.UpdateSubscriptionRequest()
request.subscription.name = 'subscription.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_subscription),
'__call__') as call:
call.return_value = pubsub.Subscription()
client.update_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription.name=subscription.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_subscription_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.UpdateSubscriptionRequest()
request.subscription.name = 'subscription.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_subscription),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Subscription())
await client.update_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription.name=subscription.name/value',
) in kw['metadata']
def test_list_subscriptions(transport: str = 'grpc', request_type=pubsub.ListSubscriptionsRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.ListSubscriptionsResponse(
next_page_token='next_page_token_value',
)
response = client.list_subscriptions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ListSubscriptionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSubscriptionsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_subscriptions_from_dict():
test_list_subscriptions(request_type=dict)
def test_list_subscriptions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
'__call__') as call:
client.list_subscriptions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ListSubscriptionsRequest()
@pytest.mark.asyncio
async def test_list_subscriptions_async(transport: str = 'grpc_asyncio', request_type=pubsub.ListSubscriptionsRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.ListSubscriptionsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_subscriptions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ListSubscriptionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSubscriptionsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_subscriptions_async_from_dict():
await test_list_subscriptions_async(request_type=dict)
def test_list_subscriptions_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ListSubscriptionsRequest()
request.project = 'project/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
'__call__') as call:
call.return_value = pubsub.ListSubscriptionsResponse()
client.list_subscriptions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'project=project/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_subscriptions_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ListSubscriptionsRequest()
request.project = 'project/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.ListSubscriptionsResponse())
await client.list_subscriptions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'project=project/value',
) in kw['metadata']
def test_list_subscriptions_flattened():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.ListSubscriptionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_subscriptions(
project='project_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project == 'project_value'
def test_list_subscriptions_flattened_error():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_subscriptions(
pubsub.ListSubscriptionsRequest(),
project='project_value',
)
@pytest.mark.asyncio
async def test_list_subscriptions_flattened_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.ListSubscriptionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.ListSubscriptionsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_subscriptions(
project='project_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project == 'project_value'
@pytest.mark.asyncio
async def test_list_subscriptions_flattened_error_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_subscriptions(
pubsub.ListSubscriptionsRequest(),
project='project_value',
)
def test_list_subscriptions_pager():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
pubsub.Subscription(),
pubsub.Subscription(),
],
next_page_token='abc',
),
pubsub.ListSubscriptionsResponse(
subscriptions=[],
next_page_token='def',
),
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
],
next_page_token='ghi',
),
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
pubsub.Subscription(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('project', ''),
)),
)
pager = client.list_subscriptions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, pubsub.Subscription)
for i in results)
def test_list_subscriptions_pages():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
pubsub.Subscription(),
pubsub.Subscription(),
],
next_page_token='abc',
),
pubsub.ListSubscriptionsResponse(
subscriptions=[],
next_page_token='def',
),
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
],
next_page_token='ghi',
),
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
pubsub.Subscription(),
],
),
RuntimeError,
)
pages = list(client.list_subscriptions(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_subscriptions_async_pager():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
pubsub.Subscription(),
pubsub.Subscription(),
],
next_page_token='abc',
),
pubsub.ListSubscriptionsResponse(
subscriptions=[],
next_page_token='def',
),
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
],
next_page_token='ghi',
),
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
pubsub.Subscription(),
],
),
RuntimeError,
)
async_pager = await client.list_subscriptions(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, pubsub.Subscription)
for i in responses)
@pytest.mark.asyncio
async def test_list_subscriptions_async_pages():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_subscriptions),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
pubsub.Subscription(),
pubsub.Subscription(),
],
next_page_token='abc',
),
pubsub.ListSubscriptionsResponse(
subscriptions=[],
next_page_token='def',
),
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
],
next_page_token='ghi',
),
pubsub.ListSubscriptionsResponse(
subscriptions=[
pubsub.Subscription(),
pubsub.Subscription(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_subscriptions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_delete_subscription(transport: str = 'grpc', request_type=pubsub.DeleteSubscriptionRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.DeleteSubscriptionRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_subscription_from_dict():
test_delete_subscription(request_type=dict)
def test_delete_subscription_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription),
'__call__') as call:
client.delete_subscription()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.DeleteSubscriptionRequest()
@pytest.mark.asyncio
async def test_delete_subscription_async(transport: str = 'grpc_asyncio', request_type=pubsub.DeleteSubscriptionRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.DeleteSubscriptionRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_subscription_async_from_dict():
await test_delete_subscription_async(request_type=dict)
def test_delete_subscription_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.DeleteSubscriptionRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription),
'__call__') as call:
call.return_value = None
client.delete_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_subscription_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.DeleteSubscriptionRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_subscription(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
def test_delete_subscription_flattened():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_subscription(
subscription='subscription_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].subscription == 'subscription_value'
def test_delete_subscription_flattened_error():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_subscription(
pubsub.DeleteSubscriptionRequest(),
subscription='subscription_value',
)
@pytest.mark.asyncio
async def test_delete_subscription_flattened_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_subscription),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_subscription(
subscription='subscription_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].subscription == 'subscription_value'
@pytest.mark.asyncio
async def test_delete_subscription_flattened_error_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_subscription(
pubsub.DeleteSubscriptionRequest(),
subscription='subscription_value',
)
def test_modify_ack_deadline(transport: str = 'grpc', request_type=pubsub.ModifyAckDeadlineRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.modify_ack_deadline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ModifyAckDeadlineRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_modify_ack_deadline_from_dict():
test_modify_ack_deadline(request_type=dict)
def test_modify_ack_deadline_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline),
'__call__') as call:
client.modify_ack_deadline()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ModifyAckDeadlineRequest()
@pytest.mark.asyncio
async def test_modify_ack_deadline_async(transport: str = 'grpc_asyncio', request_type=pubsub.ModifyAckDeadlineRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.modify_ack_deadline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ModifyAckDeadlineRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_modify_ack_deadline_async_from_dict():
await test_modify_ack_deadline_async(request_type=dict)
def test_modify_ack_deadline_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ModifyAckDeadlineRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline),
'__call__') as call:
call.return_value = None
client.modify_ack_deadline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_modify_ack_deadline_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ModifyAckDeadlineRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.modify_ack_deadline(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
def test_modify_ack_deadline_flattened():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.modify_ack_deadline(
subscription='subscription_value',
ack_ids=['ack_ids_value'],
ack_deadline_seconds=2066,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].subscription == 'subscription_value'
assert args[0].ack_ids == ['ack_ids_value']
assert args[0].ack_deadline_seconds == 2066
def test_modify_ack_deadline_flattened_error():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.modify_ack_deadline(
pubsub.ModifyAckDeadlineRequest(),
subscription='subscription_value',
ack_ids=['ack_ids_value'],
ack_deadline_seconds=2066,
)
@pytest.mark.asyncio
async def test_modify_ack_deadline_flattened_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_ack_deadline),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.modify_ack_deadline(
subscription='subscription_value',
ack_ids=['ack_ids_value'],
ack_deadline_seconds=2066,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].subscription == 'subscription_value'
assert args[0].ack_ids == ['ack_ids_value']
assert args[0].ack_deadline_seconds == 2066
@pytest.mark.asyncio
async def test_modify_ack_deadline_flattened_error_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.modify_ack_deadline(
pubsub.ModifyAckDeadlineRequest(),
subscription='subscription_value',
ack_ids=['ack_ids_value'],
ack_deadline_seconds=2066,
)
def test_acknowledge(transport: str = 'grpc', request_type=pubsub.AcknowledgeRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.acknowledge),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.acknowledge(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.AcknowledgeRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_acknowledge_from_dict():
test_acknowledge(request_type=dict)
def test_acknowledge_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.acknowledge),
'__call__') as call:
client.acknowledge()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.AcknowledgeRequest()
@pytest.mark.asyncio
async def test_acknowledge_async(transport: str = 'grpc_asyncio', request_type=pubsub.AcknowledgeRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.acknowledge),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.acknowledge(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.AcknowledgeRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_acknowledge_async_from_dict():
await test_acknowledge_async(request_type=dict)
def test_acknowledge_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.AcknowledgeRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.acknowledge),
'__call__') as call:
call.return_value = None
client.acknowledge(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_acknowledge_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.AcknowledgeRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.acknowledge),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.acknowledge(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
def test_acknowledge_flattened():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.acknowledge),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.acknowledge(
subscription='subscription_value',
ack_ids=['ack_ids_value'],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].subscription == 'subscription_value'
assert args[0].ack_ids == ['ack_ids_value']
def test_acknowledge_flattened_error():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.acknowledge(
pubsub.AcknowledgeRequest(),
subscription='subscription_value',
ack_ids=['ack_ids_value'],
)
@pytest.mark.asyncio
async def test_acknowledge_flattened_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.acknowledge),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.acknowledge(
subscription='subscription_value',
ack_ids=['ack_ids_value'],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].subscription == 'subscription_value'
assert args[0].ack_ids == ['ack_ids_value']
@pytest.mark.asyncio
async def test_acknowledge_flattened_error_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.acknowledge(
pubsub.AcknowledgeRequest(),
subscription='subscription_value',
ack_ids=['ack_ids_value'],
)
def test_pull(transport: str = 'grpc', request_type=pubsub.PullRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pull),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.PullResponse(
)
response = client.pull(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.PullRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.PullResponse)
def test_pull_from_dict():
test_pull(request_type=dict)
def test_pull_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pull),
'__call__') as call:
client.pull()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.PullRequest()
@pytest.mark.asyncio
async def test_pull_async(transport: str = 'grpc_asyncio', request_type=pubsub.PullRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pull),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.PullResponse(
))
response = await client.pull(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.PullRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.PullResponse)
@pytest.mark.asyncio
async def test_pull_async_from_dict():
await test_pull_async(request_type=dict)
def test_pull_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.PullRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pull),
'__call__') as call:
call.return_value = pubsub.PullResponse()
client.pull(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_pull_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.PullRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pull),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.PullResponse())
await client.pull(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
def test_pull_flattened():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pull),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.PullResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.pull(
subscription='subscription_value',
return_immediately=True,
max_messages=1277,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].subscription == 'subscription_value'
assert args[0].return_immediately == True
assert args[0].max_messages == 1277
def test_pull_flattened_error():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.pull(
pubsub.PullRequest(),
subscription='subscription_value',
return_immediately=True,
max_messages=1277,
)
@pytest.mark.asyncio
async def test_pull_flattened_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pull),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.PullResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.PullResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.pull(
subscription='subscription_value',
return_immediately=True,
max_messages=1277,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].subscription == 'subscription_value'
assert args[0].return_immediately == True
assert args[0].max_messages == 1277
@pytest.mark.asyncio
async def test_pull_flattened_error_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.pull(
pubsub.PullRequest(),
subscription='subscription_value',
return_immediately=True,
max_messages=1277,
)
def test_streaming_pull(transport: str = 'grpc', request_type=pubsub.StreamingPullRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
requests = [request]
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_pull),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = iter([pubsub.StreamingPullResponse()])
response = client.streaming_pull(iter(requests))
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert next(args[0]) == request
# Establish that the response is the type that we expect.
for message in response:
assert isinstance(message, pubsub.StreamingPullResponse)
def test_streaming_pull_from_dict():
test_streaming_pull(request_type=dict)
@pytest.mark.asyncio
async def test_streaming_pull_async(transport: str = 'grpc_asyncio', request_type=pubsub.StreamingPullRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
requests = [request]
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.streaming_pull),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = mock.Mock(aio.StreamStreamCall, autospec=True)
call.return_value.read = mock.AsyncMock(side_effect=[pubsub.StreamingPullResponse()])
response = await client.streaming_pull(iter(requests))
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert next(args[0]) == request
# Establish that the response is the type that we expect.
message = await response.read()
assert isinstance(message, pubsub.StreamingPullResponse)
@pytest.mark.asyncio
async def test_streaming_pull_async_from_dict():
await test_streaming_pull_async(request_type=dict)
def test_modify_push_config(transport: str = 'grpc', request_type=pubsub.ModifyPushConfigRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.modify_push_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ModifyPushConfigRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_modify_push_config_from_dict():
test_modify_push_config(request_type=dict)
def test_modify_push_config_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config),
'__call__') as call:
client.modify_push_config()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ModifyPushConfigRequest()
@pytest.mark.asyncio
async def test_modify_push_config_async(transport: str = 'grpc_asyncio', request_type=pubsub.ModifyPushConfigRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.modify_push_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ModifyPushConfigRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_modify_push_config_async_from_dict():
await test_modify_push_config_async(request_type=dict)
def test_modify_push_config_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ModifyPushConfigRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config),
'__call__') as call:
call.return_value = None
client.modify_push_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_modify_push_config_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ModifyPushConfigRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.modify_push_config(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
def test_modify_push_config_flattened():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.modify_push_config(
subscription='subscription_value',
push_config=pubsub.PushConfig(push_endpoint='push_endpoint_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].subscription == 'subscription_value'
assert args[0].push_config == pubsub.PushConfig(push_endpoint='push_endpoint_value')
def test_modify_push_config_flattened_error():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.modify_push_config(
pubsub.ModifyPushConfigRequest(),
subscription='subscription_value',
push_config=pubsub.PushConfig(push_endpoint='push_endpoint_value'),
)
@pytest.mark.asyncio
async def test_modify_push_config_flattened_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_push_config),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.modify_push_config(
subscription='subscription_value',
push_config=pubsub.PushConfig(push_endpoint='push_endpoint_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].subscription == 'subscription_value'
assert args[0].push_config == pubsub.PushConfig(push_endpoint='push_endpoint_value')
@pytest.mark.asyncio
async def test_modify_push_config_flattened_error_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.modify_push_config(
pubsub.ModifyPushConfigRequest(),
subscription='subscription_value',
push_config=pubsub.PushConfig(push_endpoint='push_endpoint_value'),
)
def test_get_snapshot(transport: str = 'grpc', request_type=pubsub.GetSnapshotRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot(
name='name_value',
topic='topic_value',
)
response = client.get_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.GetSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Snapshot)
assert response.name == 'name_value'
assert response.topic == 'topic_value'
def test_get_snapshot_from_dict():
test_get_snapshot(request_type=dict)
def test_get_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_snapshot),
'__call__') as call:
client.get_snapshot()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.GetSnapshotRequest()
@pytest.mark.asyncio
async def test_get_snapshot_async(transport: str = 'grpc_asyncio', request_type=pubsub.GetSnapshotRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot(
name='name_value',
topic='topic_value',
))
response = await client.get_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.GetSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Snapshot)
assert response.name == 'name_value'
assert response.topic == 'topic_value'
@pytest.mark.asyncio
async def test_get_snapshot_async_from_dict():
await test_get_snapshot_async(request_type=dict)
def test_get_snapshot_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.GetSnapshotRequest()
request.snapshot = 'snapshot/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_snapshot),
'__call__') as call:
call.return_value = pubsub.Snapshot()
client.get_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'snapshot=snapshot/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_snapshot_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.GetSnapshotRequest()
request.snapshot = 'snapshot/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_snapshot),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot())
await client.get_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'snapshot=snapshot/value',
) in kw['metadata']
def test_get_snapshot_flattened():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_snapshot(
snapshot='snapshot_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].snapshot == 'snapshot_value'
def test_get_snapshot_flattened_error():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_snapshot(
pubsub.GetSnapshotRequest(),
snapshot='snapshot_value',
)
@pytest.mark.asyncio
async def test_get_snapshot_flattened_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_snapshot(
snapshot='snapshot_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].snapshot == 'snapshot_value'
@pytest.mark.asyncio
async def test_get_snapshot_flattened_error_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_snapshot(
pubsub.GetSnapshotRequest(),
snapshot='snapshot_value',
)
def test_list_snapshots(transport: str = 'grpc', request_type=pubsub.ListSnapshotsRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.ListSnapshotsResponse(
next_page_token='next_page_token_value',
)
response = client.list_snapshots(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ListSnapshotsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSnapshotsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_snapshots_from_dict():
test_list_snapshots(request_type=dict)
def test_list_snapshots_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__') as call:
client.list_snapshots()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ListSnapshotsRequest()
@pytest.mark.asyncio
async def test_list_snapshots_async(transport: str = 'grpc_asyncio', request_type=pubsub.ListSnapshotsRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.ListSnapshotsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_snapshots(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.ListSnapshotsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSnapshotsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_snapshots_async_from_dict():
await test_list_snapshots_async(request_type=dict)
def test_list_snapshots_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ListSnapshotsRequest()
request.project = 'project/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__') as call:
call.return_value = pubsub.ListSnapshotsResponse()
client.list_snapshots(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'project=project/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_snapshots_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.ListSnapshotsRequest()
request.project = 'project/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.ListSnapshotsResponse())
await client.list_snapshots(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'project=project/value',
) in kw['metadata']
def test_list_snapshots_flattened():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.ListSnapshotsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_snapshots(
project='project_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project == 'project_value'
def test_list_snapshots_flattened_error():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_snapshots(
pubsub.ListSnapshotsRequest(),
project='project_value',
)
@pytest.mark.asyncio
async def test_list_snapshots_flattened_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.ListSnapshotsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.ListSnapshotsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_snapshots(
project='project_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project == 'project_value'
@pytest.mark.asyncio
async def test_list_snapshots_flattened_error_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_snapshots(
pubsub.ListSnapshotsRequest(),
project='project_value',
)
def test_list_snapshots_pager():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSnapshotsResponse(
snapshots=[
pubsub.Snapshot(),
pubsub.Snapshot(),
pubsub.Snapshot(),
],
next_page_token='abc',
),
pubsub.ListSnapshotsResponse(
snapshots=[],
next_page_token='def',
),
pubsub.ListSnapshotsResponse(
snapshots=[
pubsub.Snapshot(),
],
next_page_token='ghi',
),
pubsub.ListSnapshotsResponse(
snapshots=[
pubsub.Snapshot(),
pubsub.Snapshot(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('project', ''),
)),
)
pager = client.list_snapshots(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, pubsub.Snapshot)
for i in results)
def test_list_snapshots_pages():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSnapshotsResponse(
snapshots=[
pubsub.Snapshot(),
pubsub.Snapshot(),
pubsub.Snapshot(),
],
next_page_token='abc',
),
pubsub.ListSnapshotsResponse(
snapshots=[],
next_page_token='def',
),
pubsub.ListSnapshotsResponse(
snapshots=[
pubsub.Snapshot(),
],
next_page_token='ghi',
),
pubsub.ListSnapshotsResponse(
snapshots=[
pubsub.Snapshot(),
pubsub.Snapshot(),
],
),
RuntimeError,
)
pages = list(client.list_snapshots(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_snapshots_async_pager():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSnapshotsResponse(
snapshots=[
pubsub.Snapshot(),
pubsub.Snapshot(),
pubsub.Snapshot(),
],
next_page_token='abc',
),
pubsub.ListSnapshotsResponse(
snapshots=[],
next_page_token='def',
),
pubsub.ListSnapshotsResponse(
snapshots=[
pubsub.Snapshot(),
],
next_page_token='ghi',
),
pubsub.ListSnapshotsResponse(
snapshots=[
pubsub.Snapshot(),
pubsub.Snapshot(),
],
),
RuntimeError,
)
async_pager = await client.list_snapshots(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, pubsub.Snapshot)
for i in responses)
@pytest.mark.asyncio
async def test_list_snapshots_async_pages():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_snapshots),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
pubsub.ListSnapshotsResponse(
snapshots=[
pubsub.Snapshot(),
pubsub.Snapshot(),
pubsub.Snapshot(),
],
next_page_token='abc',
),
pubsub.ListSnapshotsResponse(
snapshots=[],
next_page_token='def',
),
pubsub.ListSnapshotsResponse(
snapshots=[
pubsub.Snapshot(),
],
next_page_token='ghi',
),
pubsub.ListSnapshotsResponse(
snapshots=[
pubsub.Snapshot(),
pubsub.Snapshot(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_snapshots(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_create_snapshot(transport: str = 'grpc', request_type=pubsub.CreateSnapshotRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot(
name='name_value',
topic='topic_value',
)
response = client.create_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.CreateSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Snapshot)
assert response.name == 'name_value'
assert response.topic == 'topic_value'
def test_create_snapshot_from_dict():
test_create_snapshot(request_type=dict)
def test_create_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_snapshot),
'__call__') as call:
client.create_snapshot()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.CreateSnapshotRequest()
@pytest.mark.asyncio
async def test_create_snapshot_async(transport: str = 'grpc_asyncio', request_type=pubsub.CreateSnapshotRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot(
name='name_value',
topic='topic_value',
))
response = await client.create_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.CreateSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Snapshot)
assert response.name == 'name_value'
assert response.topic == 'topic_value'
@pytest.mark.asyncio
async def test_create_snapshot_async_from_dict():
await test_create_snapshot_async(request_type=dict)
def test_create_snapshot_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.CreateSnapshotRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_snapshot),
'__call__') as call:
call.return_value = pubsub.Snapshot()
client.create_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_snapshot_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.CreateSnapshotRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_snapshot),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot())
await client.create_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_create_snapshot_flattened():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_snapshot(
name='name_value',
subscription='subscription_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].subscription == 'subscription_value'
def test_create_snapshot_flattened_error():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_snapshot(
pubsub.CreateSnapshotRequest(),
name='name_value',
subscription='subscription_value',
)
@pytest.mark.asyncio
async def test_create_snapshot_flattened_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_snapshot(
name='name_value',
subscription='subscription_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].subscription == 'subscription_value'
@pytest.mark.asyncio
async def test_create_snapshot_flattened_error_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_snapshot(
pubsub.CreateSnapshotRequest(),
name='name_value',
subscription='subscription_value',
)
def test_update_snapshot(transport: str = 'grpc', request_type=pubsub.UpdateSnapshotRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.Snapshot(
name='name_value',
topic='topic_value',
)
response = client.update_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.UpdateSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Snapshot)
assert response.name == 'name_value'
assert response.topic == 'topic_value'
def test_update_snapshot_from_dict():
test_update_snapshot(request_type=dict)
def test_update_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_snapshot),
'__call__') as call:
client.update_snapshot()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.UpdateSnapshotRequest()
@pytest.mark.asyncio
async def test_update_snapshot_async(transport: str = 'grpc_asyncio', request_type=pubsub.UpdateSnapshotRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot(
name='name_value',
topic='topic_value',
))
response = await client.update_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.UpdateSnapshotRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.Snapshot)
assert response.name == 'name_value'
assert response.topic == 'topic_value'
@pytest.mark.asyncio
async def test_update_snapshot_async_from_dict():
await test_update_snapshot_async(request_type=dict)
def test_update_snapshot_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.UpdateSnapshotRequest()
request.snapshot.name = 'snapshot.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_snapshot),
'__call__') as call:
call.return_value = pubsub.Snapshot()
client.update_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'snapshot.name=snapshot.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_snapshot_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.UpdateSnapshotRequest()
request.snapshot.name = 'snapshot.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_snapshot),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.Snapshot())
await client.update_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'snapshot.name=snapshot.name/value',
) in kw['metadata']
def test_delete_snapshot(transport: str = 'grpc', request_type=pubsub.DeleteSnapshotRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.DeleteSnapshotRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_snapshot_from_dict():
test_delete_snapshot(request_type=dict)
def test_delete_snapshot_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
client.delete_snapshot()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.DeleteSnapshotRequest()
@pytest.mark.asyncio
async def test_delete_snapshot_async(transport: str = 'grpc_asyncio', request_type=pubsub.DeleteSnapshotRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.DeleteSnapshotRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_snapshot_async_from_dict():
await test_delete_snapshot_async(request_type=dict)
def test_delete_snapshot_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.DeleteSnapshotRequest()
request.snapshot = 'snapshot/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
call.return_value = None
client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'snapshot=snapshot/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_snapshot_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.DeleteSnapshotRequest()
request.snapshot = 'snapshot/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_snapshot(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'snapshot=snapshot/value',
) in kw['metadata']
def test_delete_snapshot_flattened():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_snapshot(
snapshot='snapshot_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].snapshot == 'snapshot_value'
def test_delete_snapshot_flattened_error():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_snapshot(
pubsub.DeleteSnapshotRequest(),
snapshot='snapshot_value',
)
@pytest.mark.asyncio
async def test_delete_snapshot_flattened_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_snapshot),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_snapshot(
snapshot='snapshot_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].snapshot == 'snapshot_value'
@pytest.mark.asyncio
async def test_delete_snapshot_flattened_error_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_snapshot(
pubsub.DeleteSnapshotRequest(),
snapshot='snapshot_value',
)
def test_seek(transport: str = 'grpc', request_type=pubsub.SeekRequest):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.seek),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = pubsub.SeekResponse(
)
response = client.seek(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.SeekRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.SeekResponse)
def test_seek_from_dict():
test_seek(request_type=dict)
def test_seek_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.seek),
'__call__') as call:
client.seek()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.SeekRequest()
@pytest.mark.asyncio
async def test_seek_async(transport: str = 'grpc_asyncio', request_type=pubsub.SeekRequest):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.seek),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.SeekResponse(
))
response = await client.seek(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == pubsub.SeekRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pubsub.SeekResponse)
@pytest.mark.asyncio
async def test_seek_async_from_dict():
await test_seek_async(request_type=dict)
def test_seek_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.SeekRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.seek),
'__call__') as call:
call.return_value = pubsub.SeekResponse()
client.seek(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_seek_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = pubsub.SeekRequest()
request.subscription = 'subscription/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.seek),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pubsub.SeekResponse())
await client.seek(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'subscription=subscription/value',
) in kw['metadata']
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SubscriberGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SubscriberGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SubscriberClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.SubscriberGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SubscriberClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SubscriberGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
client = SubscriberClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SubscriberGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SubscriberGrpcAsyncIOTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.SubscriberGrpcTransport,
transports.SubscriberGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(auth, 'default') as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.SubscriberGrpcTransport,
)
def test_subscriber_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(exceptions.DuplicateCredentialArgs):
transport = transports.SubscriberTransport(
credentials=credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_subscriber_base_transport():
# Instantiate the base transport.
with mock.patch('google.pubsub_v1.services.subscriber.transports.SubscriberTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.SubscriberTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'create_subscription',
'get_subscription',
'update_subscription',
'list_subscriptions',
'delete_subscription',
'modify_ack_deadline',
'acknowledge',
'pull',
'streaming_pull',
'modify_push_config',
'get_snapshot',
'list_snapshots',
'create_snapshot',
'update_snapshot',
'delete_snapshot',
'seek',
'set_iam_policy',
'get_iam_policy',
'test_iam_permissions',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_subscriber_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(auth, 'load_credentials_from_file') as load_creds, mock.patch('google.pubsub_v1.services.subscriber.transports.SubscriberTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.SubscriberTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/pubsub',
),
quota_project_id="octopus",
)
def test_subscriber_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(auth, 'default') as adc, mock.patch('google.pubsub_v1.services.subscriber.transports.SubscriberTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.SubscriberTransport()
adc.assert_called_once()
def test_subscriber_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, 'default') as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
SubscriberClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/pubsub',),
quota_project_id=None,
)
def test_subscriber_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, 'default') as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.SubscriberGrpcTransport(host="squid.clam.whelk", quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/pubsub',),
quota_project_id="octopus",
)
@pytest.mark.parametrize("transport_class", [transports.SubscriberGrpcTransport, transports.SubscriberGrpcAsyncIOTransport])
def test_subscriber_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/pubsub',
),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_subscriber_host_no_port():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='pubsub.googleapis.com'),
)
assert client.transport._host == 'pubsub.googleapis.com:443'
def test_subscriber_host_with_port():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='pubsub.googleapis.com:8000'),
)
assert client.transport._host == 'pubsub.googleapis.com:8000'
def test_subscriber_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SubscriberGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_subscriber_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SubscriberGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SubscriberGrpcTransport, transports.SubscriberGrpcAsyncIOTransport])
def test_subscriber_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/pubsub',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SubscriberGrpcTransport, transports.SubscriberGrpcAsyncIOTransport])
def test_subscriber_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/pubsub',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_snapshot_path():
project = "squid"
snapshot = "clam"
expected = "projects/{project}/snapshots/{snapshot}".format(project=project, snapshot=snapshot, )
actual = SubscriberClient.snapshot_path(project, snapshot)
assert expected == actual
def test_parse_snapshot_path():
expected = {
"project": "whelk",
"snapshot": "octopus",
}
path = SubscriberClient.snapshot_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_snapshot_path(path)
assert expected == actual
def test_subscription_path():
project = "oyster"
subscription = "nudibranch"
expected = "projects/{project}/subscriptions/{subscription}".format(project=project, subscription=subscription, )
actual = SubscriberClient.subscription_path(project, subscription)
assert expected == actual
def test_parse_subscription_path():
expected = {
"project": "cuttlefish",
"subscription": "mussel",
}
path = SubscriberClient.subscription_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_subscription_path(path)
assert expected == actual
def test_topic_path():
project = "winkle"
topic = "nautilus"
expected = "projects/{project}/topics/{topic}".format(project=project, topic=topic, )
actual = SubscriberClient.topic_path(project, topic)
assert expected == actual
def test_parse_topic_path():
expected = {
"project": "scallop",
"topic": "abalone",
}
path = SubscriberClient.topic_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_topic_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = SubscriberClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = SubscriberClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder, )
actual = SubscriberClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = SubscriberClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization, )
actual = SubscriberClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = SubscriberClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project, )
actual = SubscriberClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = SubscriberClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = SubscriberClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = SubscriberClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SubscriberClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.SubscriberTransport, '_prep_wrapped_messages') as prep:
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.SubscriberTransport, '_prep_wrapped_messages') as prep:
transport_class = SubscriberClient.get_transport_class()
transport = transport_class(
credentials=credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_set_iam_policy(transport: str = "grpc"):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy.SetIamPolicyRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy.Policy(version=774, etag=b"etag_blob",)
response = client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, policy.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_set_iam_policy_async(transport: str = "grpc_asyncio"):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy.SetIamPolicyRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_iam_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy.Policy(version=774, etag=b"etag_blob",)
)
response = await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, policy.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_set_iam_policy_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = policy.Policy()
client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_iam_policy_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy.SetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.set_iam_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy())
await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_set_iam_policy_from_dict():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy.Policy()
response = client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy.Policy(version=774),
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_set_iam_policy_from_dict_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy.Policy()
)
response = await client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy.Policy(version=774),
}
)
call.assert_called()
def test_get_iam_policy(transport: str = "grpc"):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy.GetIamPolicyRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy.Policy(version=774, etag=b"etag_blob",)
response = client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, policy.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_get_iam_policy_async(transport: str = "grpc_asyncio"):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy.GetIamPolicyRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_iam_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy.Policy(version=774, etag=b"etag_blob",)
)
response = await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, policy.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_get_iam_policy_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = policy.Policy()
client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_iam_policy_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy.GetIamPolicyRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_iam_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy())
await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_get_iam_policy_from_dict():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy.Policy()
response = client.get_iam_policy(
request={
"resource": "resource_value",
"options": options.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_get_iam_policy_from_dict_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy.Policy()
)
response = await client.get_iam_policy(
request={
"resource": "resource_value",
"options": options.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
def test_test_iam_permissions(transport: str = "grpc"):
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy.TestIamPermissionsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
response = client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"):
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = iam_policy.TestIamPermissionsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy.TestIamPermissionsResponse(permissions=["permissions_value"],)
)
response = await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_field_headers():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = iam_policy.TestIamPermissionsResponse()
client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_test_iam_permissions_field_headers_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy.TestIamPermissionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy.TestIamPermissionsResponse()
)
await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_test_iam_permissions_from_dict():
client = SubscriberClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy.TestIamPermissionsResponse()
response = client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
@pytest.mark.asyncio
async def test_test_iam_permissions_from_dict_async():
client = SubscriberAsyncClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy.TestIamPermissionsResponse()
)
response = await client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
efe05d84e80b3c205eaad6cbb005cf317016866d | 5f6cd57c692191acfea18c1af9d87e7db8e873f1 | /devito/ir/iet/scheduler.py | 7f2bf50f2985af84bbae530639c9d5aeae566821 | [
"MIT"
] | permissive | Antongk/devito | 684b0f4928d47bb6acc6469cc0471b5122c34561 | a50c0a8337497cf6d7603cf9beff4a3231e63bee | refs/heads/master | 2020-04-19T17:00:18.380108 | 2019-01-30T09:16:38 | 2019-01-30T09:16:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,330 | py | from collections import OrderedDict
from devito.cgen_utils import Allocator
from devito.ir.iet import (Expression, Increment, LocalExpression, Element, Iteration,
List, Conditional, Section, HaloSpot, ExpressionBundle,
MapExpressions, Transformer, FindNodes, FindSymbols, XSubs,
iet_analyze, filter_iterations)
from devito.symbolics import IntDiv, xreplace_indices
from devito.tools import as_mapper
from devito.types import ConditionalDimension
__all__ = ['iet_build', 'iet_insert_C_decls']
def iet_build(stree):
"""
Create an Iteration/Expression tree (IET) from a :class:`ScheduleTree`.
The nodes in the returned IET are decorated with properties deriving from
data dependence analysis.
"""
# Schedule tree -> Iteration/Expression tree
iet = iet_make(stree)
# Data dependency analysis. Properties are attached directly to nodes
iet = iet_analyze(iet)
# Turn DerivedDimensions into lower-level Dimensions or Symbols
iet = iet_lower_dimensions(iet)
return iet
def iet_make(stree):
"""Create an IET from a :class:`ScheduleTree`."""
nsections = 0
queues = OrderedDict()
for i in stree.visit():
if i == stree:
# We hit this handle at the very end of the visit
return List(body=queues.pop(i))
elif i.is_Exprs:
exprs = [Increment(e) if e.is_Increment else Expression(e) for e in i.exprs]
body = [ExpressionBundle(i.shape, i.ops, i.traffic, body=exprs)]
elif i.is_Conditional:
body = [Conditional(i.guard, queues.pop(i))]
elif i.is_Iteration:
# Order to ensure deterministic code generation
uindices = sorted(i.sub_iterators, key=lambda d: d.name)
# Generate Iteration
body = [Iteration(queues.pop(i), i.dim, i.dim._limits, offsets=i.limits,
direction=i.direction, uindices=uindices)]
elif i.is_Section:
body = [Section('section%d' % nsections, body=queues.pop(i))]
nsections += 1
elif i.is_Halo:
body = [HaloSpot(hs) for hs in i.halo_scheme.components] + queues.pop(i)
queues.setdefault(i.parent, []).extend(body)
assert False
def iet_lower_dimensions(iet):
"""
Replace all :class:`DerivedDimension`s within the ``iet``'s expressions with
lower-level symbolic objects (other :class:`Dimension`s, or :class:`sympy.Symbol`).
* Array indices involving :class:`SteppingDimension`s are turned into
:class:`ModuloDimension`s.
Example: ``u[t+1, x] = u[t, x] + 1 >>> u[t1, x] = u[t0, x] + 1``
* Array indices involving :class:`ConditionalDimension`s used are turned into
integer-division expressions.
Example: ``u[t_sub, x] = u[time, x] >>> u[time / 4, x] = u[time, x]``
"""
# Lower SteppingDimensions
for i in FindNodes(Iteration).visit(iet):
if not i.uindices:
# Be quick: avoid uselessy reconstructing nodes
continue
# In an expression, there could be `u[t+1, ...]` and `v[t+1, ...]`, where
# `u` and `v` are TimeFunction with circular time buffers (save=None) *but*
# different modulo extent. The `t+1` indices above are therefore conceptually
# different, so they will be replaced with the proper ModuloDimension through
# two different calls to `xreplace`
groups = as_mapper(i.uindices, lambda d: d.modulo)
for k, v in groups.items():
mapper = {d.origin: d for d in v}
rule = lambda i: i.function.is_TimeFunction and i.function._time_size == k
replacer = lambda i: xreplace_indices(i, mapper, rule)
iet = XSubs(replacer=replacer).visit(iet)
# Lower ConditionalDimensions
cdims = [d for d in FindSymbols('free-symbols').visit(iet)
if isinstance(d, ConditionalDimension)]
mapper = {d: IntDiv(d.index, d.factor) for d in cdims}
iet = XSubs(mapper).visit(iet)
return iet
def iet_insert_C_decls(iet, external=None):
"""
Given an IET, build a new tree with the necessary symbol declarations.
Declarations are placed as close as possible to the first symbol occurrence.
Parameters
----------
iet : Node
The input Iteration/Expression tree.
external : tuple, optional
The symbols defined in some outer Callable, which therefore must not
be re-defined.
"""
external = external or []
# Classify and then schedule declarations to stack/heap
allocator = Allocator()
mapper = OrderedDict()
for k, v in MapExpressions().visit(iet).items():
if k.is_Expression:
if k.is_scalar_assign:
# Inline declaration
mapper[k] = LocalExpression(**k.args)
continue
objs = [k.write]
elif k.is_Call:
objs = k.params
for i in objs:
try:
if i.is_LocalObject:
# On the stack
site = v[-1] if v else iet
allocator.push_stack(site, i)
elif i.is_Array:
if i in external:
# The Array is to be defined in some foreign IET
continue
elif i._mem_stack:
# On the stack
key = lambda i: not i.is_Parallel
site = filter_iterations(v, key=key, stop='asap') or [iet]
allocator.push_stack(site[-1], i)
else:
# On the heap, as a tensor that must be globally accessible
allocator.push_heap(i)
except AttributeError:
# E.g., a generic SymPy expression
pass
# Introduce declarations on the stack
for k, v in allocator.onstack:
mapper[k] = tuple(Element(i) for i in v)
iet = Transformer(mapper, nested=True).visit(iet)
# Introduce declarations on the heap (if any)
if allocator.onheap:
decls, allocs, frees = zip(*allocator.onheap)
iet = List(header=decls + allocs, body=iet, footer=frees)
return iet
| [
"[email protected]"
] | |
e1816c2710b111f0d03effc960902ea50e60d696 | 32226e72c8cbaa734b2bdee081c2a2d4d0322702 | /experiments/murtaza/multiworld/camera_ready/pusher/offline_vae_twin_sac.py | 621da28adc1a2950fe2cadb66df88816f479d492 | [
"MIT"
] | permissive | Asap7772/rail-rl-franka-eval | 2b1cbad7adae958b3b53930a837df8a31ab885dc | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | refs/heads/master | 2022-11-15T07:08:33.416025 | 2020-07-12T22:05:32 | 2020-07-12T22:05:32 | 279,155,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,361 | py | import railrl.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v1
from railrl.launchers.launcher_util import run_experiment
from railrl.torch.grill.launcher import grill_her_td3_full_experiment, grill_her_twin_sac_full_experiment
if __name__ == "__main__":
variant = dict(
imsize=48,
init_camera=sawyer_pusher_camera_upright_v1,
env_id='SawyerPushAndReachEnvEasy-v0',
grill_variant=dict(
save_video=True,
save_video_period=100,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
vf_kwargs=dict(
hidden_sizes=[400, 300],
),
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=1005,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
min_num_steps_before_training=4000,
batch_size=128,
max_path_length=100,
discount=0.99,
num_updates_per_env_step=1,
collection_mode='online-parallel',
parallel_env_params=dict(
num_workers=1,
),
reward_scale=1,
),
her_kwargs=dict(
),
twin_sac_kwargs=dict(
train_policy_with_reparameterization=True,
soft_target_tau=1e-3, # 1e-2
policy_update_period=1,
target_update_period=1, # 1
use_automatic_entropy_tuning=True,
),
),
replay_buffer_kwargs=dict(
max_size=int(1e6),
fraction_goals_are_rollout_goals=0.5,
fraction_resampled_goals_are_env_goals=0.5,
),
algorithm='RIG-HER-TD3',
normalize=False,
render=False,
exploration_noise=0,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
vae_wrapped_env_kwargs=dict(
sample_from_true_prior=True,
)
),
train_vae_variant=dict(
vae_path=None,
representation_size=16,
beta=2.5,
num_epochs=1000,
dump_skew_debug_plots=False,
generate_vae_dataset_kwargs=dict(
test_p=.9,
N=5000,
oracle_dataset_using_set_to_goal=True,
use_cached=True,
vae_dataset_specific_kwargs=dict(
),
show=False,
),
vae_kwargs=dict(
input_channels=3,
),
algo_kwargs=dict(
do_scatterplot=False,
use_linear_dynamics=False,
is_auto_encoder=False,
batch_size=64,
lr=1e-3,
),
decoder_activation='sigmoid',
save_period=100,
),
)
search_space = {
'grill_variant.exploration_noise':[0, .1, .3],
'env_id':['SawyerPushAndReachSmallArenaEnv-v0', 'SawyerPushAndReachSmallArenaResetFreeEnv-v0', 'SawyerPushAndReachEnvEasy-v0']
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
# n_seeds = 1
# mode = 'local'
# exp_prefix = 'test'
n_seeds = 3
mode = 'gcp'
exp_prefix = 'sawyer_pusher_offline_vae_twin_sac_easier_envs'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
grill_her_twin_sac_full_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
num_exps_per_instance=1,
gcp_kwargs=dict(
zone='us-west2-b',
)
)
| [
"[email protected]"
] | |
a86265864b4893259754738fe02624202d5d2073 | eb166976684cf7c3e6ce613e17b270334bcdd837 | /configs/DOTA/faster_rcnn_r50_fpn_1x_dota.py | 7e905b2c0cc4fa43e1cd7846b589a845fdeb687c | [
"Apache-2.0"
] | permissive | dlrudco/AerialDetection | d987c3a301737911e29197065da00c2b8230e423 | e9381016113ddf77a09d07209341bb2208481850 | refs/heads/master | 2023-04-13T16:02:26.331775 | 2021-04-15T07:00:40 | 2021-04-15T07:00:40 | 290,456,718 | 0 | 0 | Apache-2.0 | 2020-08-26T09:43:20 | 2020-08-26T09:43:19 | null | UTF-8 | Python | false | false | 5,037 | py | # model settings
model = dict(
type='FasterRCNN',
pretrained='modelzoo://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=16,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssignerCy',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssignerCy',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
# score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=1000)
score_thr = 0.05, nms = dict(type='nms', iou_thr=0.5), max_per_img = 2000)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'DOTADataset'
data_root = 'data/dota1_1024/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'trainval1024/DOTA_trainval1024.json',
img_prefix=data_root + 'trainval1024/images/',
img_scale=(1024, 1024),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'trainval1024/DOTA_trainval1024.json',
img_prefix=data_root + 'trainval1024/images',
img_scale=(1024, 1024),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'test1024/DOTA_test1024.json',
img_prefix=data_root + 'test1024/images',
img_scale=(1024, 1024),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=12)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_r50_fpn_1x_dota'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"[email protected]"
] | |
d2974097e777a4e406eabe78dffd4f18f27f2d4f | 2500a2ab1f43c649fb0b4fe3b9e3420efa017efa | /MPK_mini/config.py | 9bd08eee27ccd0fff60ef4fb0ba3349531d15488 | [] | no_license | cappytan3/AbletonLive9_RemoteScripts | 0ce3e2d728190ba2ff5d2422cd03ae8a5df9d46f | 65d08fd4ccdadd8366eca6f3c0fa7932516147bf | refs/heads/master | 2021-01-15T11:50:14.152579 | 2014-04-11T17:37:22 | 2014-04-11T17:37:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | #Embedded file name: /Users/versonator/Hudson/live/Projects/AppLive/Resources/MIDI Remote Scripts/MPK_mini/config.py
from consts import *
TRANSPORT_CONTROLS = {'STOP': -1,
'PLAY': -1,
'REC': -1,
'LOOP': -1,
'RWD': -1,
'FFWD': -1}
DEVICE_CONTROLS = (GENERIC_ENC1,
GENERIC_ENC2,
GENERIC_ENC3,
GENERIC_ENC4,
GENERIC_ENC5,
GENERIC_ENC6,
GENERIC_ENC7,
GENERIC_ENC8)
VOLUME_CONTROLS = ((-1, -1),
(-1, -1),
(-1, -1),
(-1, -1),
(-1, -1),
(-1, -1),
(-1, -1),
(-1, -1))
TRACKARM_CONTROLS = (-1, -1, -1, -1, -1, -1, -1, -1)
BANK_CONTROLS = {'TOGGLELOCK': -1,
'BANKDIAL': -1,
'NEXTBANK': -1,
'PREVBANK': -1,
'BANK1': -1,
'BANK2': -1,
'BANK3': -1,
'BANK4': -1,
'BANK5': -1,
'BANK6': -1,
'BANK7': -1,
'BANK8': -1}
PAD_TRANSLATION = ((0, 0, 36, 0),
(1, 0, 37, 0),
(2, 0, 38, 0),
(3, 0, 39, 0),
(0, 1, 32, 0),
(1, 1, 33, 0),
(2, 1, 34, 0),
(3, 1, 35, 0),
(0, 2, 48, 0),
(1, 2, 49, 0),
(2, 2, 50, 0),
(3, 2, 51, 0),
(0, 3, 44, 0),
(1, 3, 45, 0),
(2, 3, 46, 0),
(3, 3, 47, 0))
CONTROLLER_DESCRIPTION = {'INPUTPORT': 'MPK mini',
'OUTPUTPORT': 'MPK mini',
'CHANNEL': -1,
'PAD_TRANSLATION': PAD_TRANSLATION}
MIXER_OPTIONS = {'NUMSENDS': 2,
'SEND1': (-1, -1, -1, -1, -1, -1, -1, -1),
'SEND2': (-1, -1, -1, -1, -1, -1, -1, -1),
'MASTERVOLUME': -1} | [
"[email protected]"
] | |
745ce5c114180c8dfd4cfc9986795a097c916ddb | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/homework02/maoyanspiders/maoyanspiders/spiders/movies_20200627213700.py | 713340873e0307f55fcf3881bebea59a02cdfb52 | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 741 | py | # -*- coding: utf-8 -*-
import scrapy
from maoyanspiders.items import MaoyanspidersItem
# import xlml.etree
from bs4 import BeautifulSoup as bs
class MoviesSpider(scrapy.Spider):
name = 'movies'
allowed_domains = ['maoyan.com']
start_urls = ['http://maoyan.com/board/4']
# def parse(self, response):
# pass
def start_requests(self):
url = f'https://maoyan.com/board/4'
print(url)
yield scrapy.Request(url=url,callback=self.parse)
def parse(self, response):
soup = bs(response.text,'html.parser')
print(soup.text)
return soup
item = MaoyanspidersItem()
item['films_name'] = 'name'
item['release_time'] = "tiome"
return item
| [
"[email protected]"
] | |
0eeceafe6472fa14ed5584e9b820ca5caf6bcd8f | f6af4dee160aed43afae42c6c7d92542265b26e7 | /backend/views/product_views.py | 9d4eaf4c842da2b6afc8b31ddc8c0a43395c8aa5 | [] | no_license | jasimdipu/django_1st_batch_final_project | 8fcd0aad0fe9dc7922ea0692b24679546796748b | c75a80402967a6492433f75392dd0d8a20314601 | refs/heads/main | 2023-04-07T14:15:03.199353 | 2021-04-03T04:42:51 | 2021-04-03T04:42:51 | 353,403,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | from django.shortcuts import render
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from ..models import Product, Review
from ..serializers import ProductSerializer, ReviewSeralizer
from rest_framework import status
@api_view('GET')
def getProducts(request):
query = request.query_params.get('keyword')
if query == None:
query = ""
products = Product.objects.filter(product_name__icontains=query).order_by('-created_at')
page = request.query_params.get('page')
paginator = Paginator(products, 6)
try:
products = paginator.page((page))
except PageNotAnInteger:
products = paginator.page(1)
except EmptyPage:
products = paginator.page(paginator.num_pages)
if page == None:
page = 1
page = int(page)
print('Page', page)
serializers = ProductSerializer(products, many=True)
return Response({'products': serializers.data, 'page': page, "pages": paginator.num_pages})
@api_view(["POST"])
def createProduct(request):
pass
@api_view(["POST"])
def updateProduct(request, pk):
pass
@api_view(["Delete"])
def deleteProduct(request, pk):
pass
| [
"[email protected]"
] | |
037fc945b94f2fda69201f2375319345568518c0 | 38ac429d63369922e12e19cdda042b08b8123027 | /test/test_saml_api.py | 26846e6f2fcda7d9165535256227fcc7e7e394f6 | [] | no_license | aviv-julienjehannet/collibra_apiclient | 0dfebe5df2eb929645b87eba42fab4c06ff0a6be | 10a89e7acaf56ab8c7417698cd12616107706b6b | refs/heads/master | 2021-09-12T16:52:19.803624 | 2018-04-19T01:35:20 | 2018-04-19T01:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | # coding: utf-8
"""
\"Data Governance Center: REST API v2\"
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.saml_api import SAMLApi # noqa: E501
from swagger_client.rest import ApiException
class TestSAMLApi(unittest.TestCase):
"""SAMLApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.saml_api.SAMLApi() # noqa: E501
def tearDown(self):
pass
def test_resource_saml_resource_get_sp_metadata_as_string_get(self):
"""Test case for resource_saml_resource_get_sp_metadata_as_string_get
Returns the SAML Service Provider metadata for a this instance. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8b1820d27a95d40776ef0c21fe1b3fc18a519080 | b8cc6d34ad44bf5c28fcca9e0df01d9ebe0ee339 | /Pandas_study/p25.py | a87f99f1a432299ee2ff02de0f557417bca76b3c | [] | no_license | python-yc/pycharm_script | ae0e72898ef44a9de47e7548170a030c0a752eb5 | c8947849090c71e131df5dc32173ebe9754df951 | refs/heads/master | 2023-01-05T06:16:33.857668 | 2020-10-31T08:09:53 | 2020-10-31T08:09:53 | 296,778,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | # -*- coding: utf-8 -*-
"""
pycharm无法显示条件格式的情况,jupyter是可以的,这里只写代码
"""
import pandas as pd
def low_score_red(s):
color = 'red' if s < 20 else 'black'
return f'color:{color}'
def highest_score_green(col):
return ['background-color:lime' if s == col.max()
else 'backgroud-color:white' for s in col]
students = pd.read_excel('./excel/Students-25-26.xlsx')
# print(students)
students.style.applymap(low_score_red, subset=['Test_1', 'Test_2', 'Test_3'])
students.style.applymap(low_score_red, subset=['Test_1', 'Test_2', 'Test_3'])\
.apply(highest_score_green, subset=['Test_1', 'Test_2', 'Test_3'])
| [
"15655982512.com"
] | 15655982512.com |
f63d3b091f50788337d5c3cb0719c39c23f0dfba | 376b6933872b9110765154094d2c77713da2c853 | /assemblies/bad.assemblies/abyss/3abyss.assembly.24.py | 61ae7ee6df58578b2948a7ad2b7ef9fbaa8bf97b | [] | no_license | markphuong/geographus-genome | 46b037e7789641895f1a99b8bf6dee3418887600 | a0ff439fbc0c350279359a51321e40e7778f5170 | refs/heads/master | 2020-03-19T07:21:29.297458 | 2018-06-05T04:15:18 | 2018-06-05T04:15:18 | 136,107,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,554 | py | #!/usr/bin/env python
#flash manual: http://ccb.jhu.edu/software/FLASH/MANUAL
#this script cleans reads using trimmomatic, merges reads using flash, and creates a read1 file, read2 file (these represent paired files) and an unpaired file
import os
import sys
import argparse
import multiprocessing
# an arguments portion in the code represents necessary inputs to give to the script. I usually use this to give the program a file that contains all the unique sample IDs which should be in the read file names
def get_args():
parser = argparse.ArgumentParser(description="run blastx")
required = parser.add_argument_group("required arguments")
required.add_argument("--map", help="textfile with samples to run and what fasta file to match it to", required=True) #A map file with the sample ID and the fasta file it goes to
return parser.parse_args()
def align(element):
#the adapters file should have both forward and reverse, and the universal adapters
#this variables dict species the names for the input/out files
variables = dict(
sampleID = element) #name your output
commands = """
cp /pylon2/bi4s86p/phuong/geographus.genome/regular/2trim/*final*.fq ./
cp /pylon2/bi4s86p/phuong/geographus.genome/fosmids/4scaffolding/longreads.fa ./
cp /pylon2/bi4s86p/phuong/geographus.genome/matepair/2trim/*.fastq ./
abyss-pe np=60 k=24 name={sampleID}_kmer24 lib='pe1 pe2 pe3 pe4' mp='mp1 mp2 mp3 mp4' pe1='UU0018MY.final1.fq UU0018MY.final2.fq' pe2='10361X3.final1.fq 10361X3.final2.fq' pe3='10361X1.final1.fq 10361X1.final2.fq' pe4='9988X1.final1.fq 9988X1.final2.fq' se='UU0018MY.finalunpaired.fq 10361X3.finalunpaired.fq 10361X1.finalunpaired.fq 9988X1.finalunpaired.fq' mp1='11308X4_A_R1.fastq 11308X4_A_R2.fastq' mp2='11308X4_B_R1.fastq 11308X4_B_R2.fastq' mp3='11308X4_C_R1.fastq 11308X4_C_R2.fastq' mp4='11308X4_D_R1.fastq 11308X4_D_R2.fastq'
cp -r *{sampleID}_kmer* /pylon2/bi4s86p/phuong/geographus.genome/assemblies/abyss
""".format(**variables)
#this bit of code executes the command
cmd_list = commands.split("\n")
for cmd in cmd_list:
os.system(cmd)
mylist = []
args = get_args() #this is where the arguments from the def args code gets called upon
with open(args.map) as rfile:
for line in rfile:
line = line.strip()
mylist.append(line)
#this bit is really not necessary. I could have done this by not having 'def main()' and just starting with the args=get_args() line, but the following code follows the logic of what preceded it.
pool = multiprocessing.Pool(1)
pool.map(align, mylist)
| [
"[email protected]"
] | |
0e043652c0c6321e999e76558a46f1c3bb9d060d | 98f7a31ee122cea4b9ed61300c8ee3be456b4850 | /ws-tests/test_study_get.py | 4a483310814437cf889f8e5e8a4583590d922dfe | [
"BSD-2-Clause"
] | permissive | BioinformaticsArchive/phylesystem-api | 4bd30bd32fba29497ca4c4df4a4cc5e85f0c7dfc | 08a77e3f49a1607ec1bc5d835977b1d2c365e291 | refs/heads/master | 2021-01-16T21:38:30.651003 | 2015-04-09T18:08:18 | 2015-04-09T18:08:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | #!/usr/bin/env python
import sys, os
from opentreetesting import test_http_json_method, config
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/10'
data = {'output_nexml2json':'1.2'}
if test_http_json_method(SUBMIT_URI, 'GET', data=data, expected_status=200):
sys.exit(0)
sys.exit(1) | [
"[email protected]"
] | |
8d7793e068d44fdf86b78fc3ce1096b6349256b3 | df15792f5a82fcea25e62714b13e2d502485eb00 | /peil/peil/migrations/0009_auto_20170507_2253.py | a180183bba7ba0b86a466b593a969a1c33e7bac5 | [] | no_license | acaciawater/peilstok | c6fcdab232d938a3f281e9843c81cfbf59cc4eea | 179e1981a656e78a8ebdac6e50ef73f852df7630 | refs/heads/master | 2020-12-30T17:11:27.871514 | 2017-12-08T13:14:35 | 2017-12-08T13:14:35 | 91,066,224 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-07 20:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('peil', '0008_auto_20170507_2159'),
]
operations = [
migrations.RemoveField(
model_name='basemodule',
name='devid',
),
migrations.RemoveField(
model_name='basemodule',
name='serial',
),
]
| [
"[email protected]"
] | |
d9d098d9fb7eebef77cb4e80498df3812f35e9be | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /mail_bot/models/mail_channel.py | b3486e0f9c67d8c5f70a64944b335d665f4eec2a | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | # -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from odoo import api, models, _
class Channel(models.Model):
_inherit = 'mail.channel'
def _execute_command_help(self, **kwargs):
super(Channel, self)._execute_command_help(**kwargs)
self.env['mail.bot']._apply_logic(self, kwargs, command="help") # kwargs are not usefull but...
@api.model
def init_odoobot(self):
if self.env.user.odoobot_state == 'not_initialized':
partner = self.env.user.partner_id
odoobot_id = self.env['ir.model.data'].xmlid_to_res_id("base.partner_root")
channel = self.with_context({"mail_create_nosubscribe": True}).create({
'channel_partner_ids': [(4, partner.id), (4, odoobot_id)],
'public': 'private',
'channel_type': 'chat',
'email_send': False,
'name': 'ALWAFIBot'
})
message = _("Hello,<br/>ALWAFI's chat helps employees collaborate efficiently. I'm here to help you discover its features.<br/><b>Try to send me an emoji :)</b>")
channel.sudo().message_post(body=message, author_id=odoobot_id, message_type="comment", subtype="mail.mt_comment")
self.env.user.odoobot_state = 'onboarding_emoji'
return channel
| [
"[email protected]"
] | |
5d49fd435ae7ec0bbe55d65716280de38585f320 | 66c3ff83c3e3e63bf8642742356f6c1817a30eca | /.vim/tmp/neocomplete/include_cache/=+home=+abel=+.virtualenvs=+django=+lib=+python2.7=+site-packages=+django=+utils=+decorators.py | 114714f9021f8e730c05ca6ac3837a9848baf0d0 | [] | no_license | pacifi/vim | 0a708e8bc741b4510a8da37da0d0e1eabb05ec83 | 22e706704357b961acb584e74689c7080e86a800 | refs/heads/master | 2021-05-20T17:18:10.481921 | 2020-08-06T12:38:58 | 2020-08-06T12:38:58 | 30,074,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | [{'word': '_dec', 'menu': '[I]', 'kind': 'f', 'abbr': 'def _dec(func):'}, {'word': '_decorator', 'menu': '[I]', 'kind': 'f', 'abbr': 'def _decorator(view_func):'}, {'word': '_make_decorator', 'menu': '[I]', 'kind': 'f', 'abbr': 'def _make_decorator(*m_args, **m_kwargs):'}, {'word': '_wrapped_view', 'menu': '[I]', 'kind': 'f', 'abbr': 'def _wrapped_view(request, *args, **kwargs):'}, {'word': '_wrapper', 'menu': '[I]', 'kind': 'f', 'abbr': 'def _wrapper(self, *args, **kwargs):'}, {'word': 'available_attrs', 'menu': '[I]', 'kind': 'f', 'abbr': 'def available_attrs(fn):'}, {'word': 'bound_func', 'menu': '[I]', 'kind': 'f', 'abbr': 'bound_func'}, {'word': 'classonlymethod', 'menu': '[I]', 'kind': 'c', 'abbr': 'classonlymethod(classmethod): <class>'}, {'word': 'decorator_from_middleware', 'menu': '[I]', 'kind': 'f', 'abbr': 'def decorator_from_middleware(middleware_class):'}, {'word': 'decorator_from_middleware_with_args', 'menu': '[I]', 'kind': 'f', 'abbr': 'def decorator_from_middleware_with_args(middleware_class):'}, {'word': 'dummy', 'menu': '[I]', 'kind': 'f', 'abbr': 'def dummy(*args, **kwargs):'}, {'word': 'make_middleware_decorator', 'menu': '[I]', 'kind': 'f', 'abbr': 'def make_middleware_decorator(middleware_class):'}, {'word': 'method_decorator', 'menu': '[I]', 'kind': 'f', 'abbr': 'def method_decorator(decorator):'}]
| [
"[email protected]"
] | |
43daa355f596415d794bf815f82a5c47f5f64f4e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03268/s110149697.py | 306d09930e8015c8cc774fe726aa8960b05be863 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | n, k = map(int, input().split())
if k % 2 == 0:
d = k//2
ans = ((n // d)-(n//k)) ** 3
else:
ans = 0
ans += (n // k) ** 3
print(ans) | [
"[email protected]"
] | |
acba81390f9885788faef08ef3a0b042ce1e6ced | f5cf699c09c4bf2fd94285004ebc7766b9464532 | /scripts2/has_release.py | aebe6aa3b0abba1d3f9fea32914f2aa20c91398c | [] | no_license | marcoacierno/autodeploy-tests | 056f7351a94d3316d9a984e9307c4d42b0848be6 | f3bc97235cc17481ecc9529149ac7b361fbbe701 | refs/heads/master | 2022-01-08T02:52:41.418328 | 2019-05-11T10:46:33 | 2019-05-11T10:46:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | import sys
import os
import re
sys.path.append(os.path.dirname(__file__)) # noqa
from datetime import datetime
from base import run_process, RELEASE_FILE, CHANGELOG_FILE
if __name__ == "__main__":
if not os.path.exists(RELEASE_FILE):
print("Not releasing a new version because there isn't a RELEASE.md file.")
run_process(["circleci", "step", "halt"])
| [
"[email protected]"
] | |
e291e05c5bc7d1bb9602b61922781b54f61194f1 | cc2a00ce7e05245327ce8da85d0e3aa01d9635b9 | /P_controller/Tank_2/models/environment.py | 4b71bb3680bde2d87dcab15ee3e7ea14fb3ac54f | [] | no_license | puttak/Reinforcement-Learning-in-Process-Control | f7c05a0ed41826cb1d7248caffdb3c47bbe66df0 | 852967e97b2fb0b6c5022365c9ef62906c099832 | refs/heads/master | 2020-05-03T21:56:33.515929 | 2019-03-21T08:28:36 | 2019-03-21T08:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,962 | py | from models.tank_model.tank import Tank
from visualize.window import Window
import matplotlib.pyplot as plt
from drawnow import drawnow
class Environment:
"Parameters are set in the params.py file"
def __init__(self, TANK_PARAMS_LIST, TANK_DIST_LIST, MAIN_PARAMS):
self.model = []
for i, TANK_PARAMS in enumerate(TANK_PARAMS_LIST):
tank = Tank(
height=TANK_PARAMS["height"],
radius=TANK_PARAMS["width"],
max_level=TANK_PARAMS["max_level"],
min_level=TANK_PARAMS["min_level"],
pipe_radius=TANK_PARAMS["pipe_radius"],
init_level=TANK_PARAMS["init_level"],
dist=TANK_DIST_LIST[i],
)
self.model.append(tank)
self.running = True
self.episode = 0
self.all_rewards = []
self.terminated = False
self.show_rendering = MAIN_PARAMS["RENDER"]
self.live_plot = MAIN_PARAMS["LIVE_REWARD_PLOT"]
if self.show_rendering:
self.window = Window(self.model)
if self.live_plot:
plt.ion() # enable interactivity
plt.figure(num="Rewards per episode") # make a figure
def get_next_state(self, z, i, t, q_out):
"""
Calculates the dynamics of the agents action and
gives back the next state
"""
dldt, q_out = self.model[i].get_dhdt(z, t, q_out)
self.model[i].change_level(dldt)
# Check terminate state
if self.model[i].level < self.model[i].min:
self.terminated = True
self.model[i].level = self.model[i].min
elif self.model[i].level > self.model[i].max:
self.terminated = True
self.model[i].level = self.model[i].max
return self.model[i].level, q_out
def render(self, action):
"Draw the water level of the tank in pygame"
if self.render:
running = self.window.Draw(action)
if not running:
self.running = False
def get_reward(self, h):
h = h / self.model.h
reward = (h - 0.5) ** 2
return reward
if h > 0.49 and h < 0.51:
return 5
if h > 0.45 and h < 0.55:
return 4
if h > 0.4 and h < 0.6:
return 3
if h > 0.3 and h < 0.7:
return 2
if h > 0.2 and h < 0.8:
return 1
else:
return 0
def plot_rewards(self):
"drawnow plot of the reward"
plt.plot(
self.all_rewards,
label="Exploration rate: {} %".format(self.epsilon * 100),
)
plt.legend()
def plot(self, all_rewards, epsilon):
"Live plot of the reward"
self.all_rewards = all_rewards
self.epsilon = round(epsilon, 4)
try:
drawnow(self.plot_rewards)
except KeyboardInterrupt:
print("Break")
| [
"[email protected]"
] | |
ba858d836612f5c7033f224fdde159303b0860e2 | ef2e354ae06e9994b7bc65f9685f8769ec56dc28 | /offerride/urls.py | 62a8e3f206ab88fb18f3719babe863841c06b08e | [] | no_license | akhilpatil123/FareShare | 45e634b07749f507a40eeb08be710b2090844ab9 | a0d89ba324ef5cf74fe5c54cf641f0d3625bd373 | refs/heads/master | 2020-04-30T00:21:56.041455 | 2019-03-19T11:55:56 | 2019-03-19T11:55:56 | 176,501,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | from django.conf.urls import url
from . import views
app_name = 'offerride'
urlpatterns = [
url(r'^$',views.index, name='index'),
url(r'^rides/$', views.rides, name='rides'),
url(r'^addride/$', views.addride, name='addride'),
url(r'^submitride/$', views.submitride, name='submitride'),
url(r'^join/$', views.join, name='join'),
url(r'^addcar/$', views.addcar, name='addcar'),
url(r'^submitcar/$', views.submitcar, name='submitcar'),
url(r'^seat/$', views.seat, name='seat'),
url(r'^maps/$', views.maps, name='maps'),
] | [
"[email protected]"
] | |
70026a40fd339a0274b5f4b28aa1d9800b33d7aa | 673f9b85708affe260b892a4eb3b1f6a0bd39d44 | /Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/scipy/integrate/_ivp/dop853_coefficients.py | 5cc5c9e4c41c8bc9fa2287554b1308777aaa5172 | [
"GPL-3.0-or-later",
"BSD-3-Clause",
"GPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"BSD-2-Clause",
"GCC-exception-3.1",
"Qhull",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Python-2.0"
] | permissive | i2tResearch/Ciberseguridad_web | feee3fe299029bef96b158d173ce2d28ef1418e4 | e6cccba69335816442c515d65d9aedea9e7dc58b | refs/heads/master | 2023-07-06T00:43:51.126684 | 2023-06-26T00:53:53 | 2023-06-26T00:53:53 | 94,152,032 | 14 | 0 | MIT | 2023-09-04T02:53:29 | 2017-06-13T00:21:00 | Jupyter Notebook | UTF-8 | Python | false | false | 7,303 | py | from __future__ import absolute_import, division, print_function
import numpy as np
N_STAGES = 12
N_STAGES_EXTENDED = 16
INTERPOLATOR_POWER = 7
C = np.array([0.0,
0.526001519587677318785587544488e-01,
0.789002279381515978178381316732e-01,
0.118350341907227396726757197510,
0.281649658092772603273242802490,
0.333333333333333333333333333333,
0.25,
0.307692307692307692307692307692,
0.651282051282051282051282051282,
0.6,
0.857142857142857142857142857142,
1.0,
1.0,
0.1,
0.2,
0.777777777777777777777777777778])
A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED))
A[1, 0] = 5.26001519587677318785587544488e-2
A[2, 0] = 1.97250569845378994544595329183e-2
A[2, 1] = 5.91751709536136983633785987549e-2
A[3, 0] = 2.95875854768068491816892993775e-2
A[3, 2] = 8.87627564304205475450678981324e-2
A[4, 0] = 2.41365134159266685502369798665e-1
A[4, 2] = -8.84549479328286085344864962717e-1
A[4, 3] = 9.24834003261792003115737966543e-1
A[5, 0] = 3.7037037037037037037037037037e-2
A[5, 3] = 1.70828608729473871279604482173e-1
A[5, 4] = 1.25467687566822425016691814123e-1
A[6, 0] = 3.7109375e-2
A[6, 3] = 1.70252211019544039314978060272e-1
A[6, 4] = 6.02165389804559606850219397283e-2
A[6, 5] = -1.7578125e-2
A[7, 0] = 3.70920001185047927108779319836e-2
A[7, 3] = 1.70383925712239993810214054705e-1
A[7, 4] = 1.07262030446373284651809199168e-1
A[7, 5] = -1.53194377486244017527936158236e-2
A[7, 6] = 8.27378916381402288758473766002e-3
A[8, 0] = 6.24110958716075717114429577812e-1
A[8, 3] = -3.36089262944694129406857109825
A[8, 4] = -8.68219346841726006818189891453e-1
A[8, 5] = 2.75920996994467083049415600797e1
A[8, 6] = 2.01540675504778934086186788979e1
A[8, 7] = -4.34898841810699588477366255144e1
A[9, 0] = 4.77662536438264365890433908527e-1
A[9, 3] = -2.48811461997166764192642586468
A[9, 4] = -5.90290826836842996371446475743e-1
A[9, 5] = 2.12300514481811942347288949897e1
A[9, 6] = 1.52792336328824235832596922938e1
A[9, 7] = -3.32882109689848629194453265587e1
A[9, 8] = -2.03312017085086261358222928593e-2
A[10, 0] = -9.3714243008598732571704021658e-1
A[10, 3] = 5.18637242884406370830023853209
A[10, 4] = 1.09143734899672957818500254654
A[10, 5] = -8.14978701074692612513997267357
A[10, 6] = -1.85200656599969598641566180701e1
A[10, 7] = 2.27394870993505042818970056734e1
A[10, 8] = 2.49360555267965238987089396762
A[10, 9] = -3.0467644718982195003823669022
A[11, 0] = 2.27331014751653820792359768449
A[11, 3] = -1.05344954667372501984066689879e1
A[11, 4] = -2.00087205822486249909675718444
A[11, 5] = -1.79589318631187989172765950534e1
A[11, 6] = 2.79488845294199600508499808837e1
A[11, 7] = -2.85899827713502369474065508674
A[11, 8] = -8.87285693353062954433549289258
A[11, 9] = 1.23605671757943030647266201528e1
A[11, 10] = 6.43392746015763530355970484046e-1
A[12, 0] = 5.42937341165687622380535766363e-2
A[12, 5] = 4.45031289275240888144113950566
A[12, 6] = 1.89151789931450038304281599044
A[12, 7] = -5.8012039600105847814672114227
A[12, 8] = 3.1116436695781989440891606237e-1
A[12, 9] = -1.52160949662516078556178806805e-1
A[12, 10] = 2.01365400804030348374776537501e-1
A[12, 11] = 4.47106157277725905176885569043e-2
A[13, 0] = 5.61675022830479523392909219681e-2
A[13, 6] = 2.53500210216624811088794765333e-1
A[13, 7] = -2.46239037470802489917441475441e-1
A[13, 8] = -1.24191423263816360469010140626e-1
A[13, 9] = 1.5329179827876569731206322685e-1
A[13, 10] = 8.20105229563468988491666602057e-3
A[13, 11] = 7.56789766054569976138603589584e-3
A[13, 12] = -8.298e-3
A[14, 0] = 3.18346481635021405060768473261e-2
A[14, 5] = 2.83009096723667755288322961402e-2
A[14, 6] = 5.35419883074385676223797384372e-2
A[14, 7] = -5.49237485713909884646569340306e-2
A[14, 10] = -1.08347328697249322858509316994e-4
A[14, 11] = 3.82571090835658412954920192323e-4
A[14, 12] = -3.40465008687404560802977114492e-4
A[14, 13] = 1.41312443674632500278074618366e-1
A[15, 0] = -4.28896301583791923408573538692e-1
A[15, 5] = -4.69762141536116384314449447206
A[15, 6] = 7.68342119606259904184240953878
A[15, 7] = 4.06898981839711007970213554331
A[15, 8] = 3.56727187455281109270669543021e-1
A[15, 12] = -1.39902416515901462129418009734e-3
A[15, 13] = 2.9475147891527723389556272149
A[15, 14] = -9.15095847217987001081870187138
B = A[N_STAGES, :N_STAGES]
E3 = np.zeros(N_STAGES + 1)
E3[:-1] = B.copy()
E3[0] -= 0.244094488188976377952755905512
E3[8] -= 0.733846688281611857341361741547
E3[11] -= 0.220588235294117647058823529412e-1
E5 = np.zeros(N_STAGES + 1)
E5[0] = 0.1312004499419488073250102996e-1
E5[5] = -0.1225156446376204440720569753e+1
E5[6] = -0.4957589496572501915214079952
E5[7] = 0.1664377182454986536961530415e+1
E5[8] = -0.3503288487499736816886487290
E5[9] = 0.3341791187130174790297318841
E5[10] = 0.8192320648511571246570742613e-1
E5[11] = -0.2235530786388629525884427845e-1
# First 3 coefficients are computed separately.
D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED))
D[0, 0] = -0.84289382761090128651353491142e+1
D[0, 5] = 0.56671495351937776962531783590
D[0, 6] = -0.30689499459498916912797304727e+1
D[0, 7] = 0.23846676565120698287728149680e+1
D[0, 8] = 0.21170345824450282767155149946e+1
D[0, 9] = -0.87139158377797299206789907490
D[0, 10] = 0.22404374302607882758541771650e+1
D[0, 11] = 0.63157877876946881815570249290
D[0, 12] = -0.88990336451333310820698117400e-1
D[0, 13] = 0.18148505520854727256656404962e+2
D[0, 14] = -0.91946323924783554000451984436e+1
D[0, 15] = -0.44360363875948939664310572000e+1
D[1, 0] = 0.10427508642579134603413151009e+2
D[1, 5] = 0.24228349177525818288430175319e+3
D[1, 6] = 0.16520045171727028198505394887e+3
D[1, 7] = -0.37454675472269020279518312152e+3
D[1, 8] = -0.22113666853125306036270938578e+2
D[1, 9] = 0.77334326684722638389603898808e+1
D[1, 10] = -0.30674084731089398182061213626e+2
D[1, 11] = -0.93321305264302278729567221706e+1
D[1, 12] = 0.15697238121770843886131091075e+2
D[1, 13] = -0.31139403219565177677282850411e+2
D[1, 14] = -0.93529243588444783865713862664e+1
D[1, 15] = 0.35816841486394083752465898540e+2
D[2, 0] = 0.19985053242002433820987653617e+2
D[2, 5] = -0.38703730874935176555105901742e+3
D[2, 6] = -0.18917813819516756882830838328e+3
D[2, 7] = 0.52780815920542364900561016686e+3
D[2, 8] = -0.11573902539959630126141871134e+2
D[2, 9] = 0.68812326946963000169666922661e+1
D[2, 10] = -0.10006050966910838403183860980e+1
D[2, 11] = 0.77771377980534432092869265740
D[2, 12] = -0.27782057523535084065932004339e+1
D[2, 13] = -0.60196695231264120758267380846e+2
D[2, 14] = 0.84320405506677161018159903784e+2
D[2, 15] = 0.11992291136182789328035130030e+2
D[3, 0] = -0.25693933462703749003312586129e+2
D[3, 5] = -0.15418974869023643374053993627e+3
D[3, 6] = -0.23152937917604549567536039109e+3
D[3, 7] = 0.35763911791061412378285349910e+3
D[3, 8] = 0.93405324183624310003907691704e+2
D[3, 9] = -0.37458323136451633156875139351e+2
D[3, 10] = 0.10409964950896230045147246184e+3
D[3, 11] = 0.29840293426660503123344363579e+2
D[3, 12] = -0.43533456590011143754432175058e+2
D[3, 13] = 0.96324553959188282948394950600e+2
D[3, 14] = -0.39177261675615439165231486172e+2
D[3, 15] = -0.14972683625798562581422125276e+3
| [
"[email protected]"
] | |
8a2e361d427501a46d5935226ee01779753093ca | eef3fd0eba25725aa045f4913304c4d2dd93ba7e | /deeplearning_tensorflow_p/p58_transpose_resnet.py | 2463164107c0a9da57793cd2bb02a1bff48cb30e | [] | no_license | provenclei/tensorflow_cv | c613e686ab6827a5eedcbaf00ef1317da0b94e81 | c8827e74e0db42fa617c91f1d14b71abcff8780a | refs/heads/master | 2022-12-01T05:52:31.365257 | 2020-08-16T00:24:11 | 2020-08-16T00:24:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,078 | py | # -*- coding: utf-8 -*-
"""
@Author : LEITENG
@Version :
------------------------------------
@File : p56_ResNet.py
@Description :
@CreateTime : 2020/7/10 09:21
------------------------------------
@ModifyTime : 反 Resnet 框架
"""
import tensorflow as tf
RESNET18 = 'ResNet18'
RESNET34 = 'ResNet34'
RESNET50 = 'ResNet50'
RESNET101 = 'ResNet101'
RESNET152 = 'ResNet152'
SETTING = {
RESNET18: {"bottleneck": False, 'repeats': [2, 2, 2, 2]},
RESNET34: {"bottleneck": False, 'repeats': [3, 4, 6, 3]},
RESNET50: {"bottleneck": True, 'repeats': [3, 4, 6, 3]},
RESNET101: {"bottleneck": True, 'repeats': [3, 4, 23, 3]},
RESNET152: {"bottleneck": True, 'repeats': [3, 8, 36, 3]},
}
_name_id = 1
class TransposeResNet:
def __init__(self, name):
self.bottleneck = SETTING[name]['bottleneck']
self.repeats = SETTING[name]['repeats']
def __call__(self, x, size: int, training: bool, name=None):
'''
进行反卷积操作
:param x: 输入值 [-1, -1]
:param size: 输出大小,必须为 32 的倍数,默认为224
:param training:
:param name:
:return:
'''
height, width = _check(size)
if name is None:
global _name_id
name = 'transpose_resnet_%d' % _name_id
_name_id += 1
with tf.variable_scope(name):
filters = 2048 if self.bottleneck else 512
# [-1, 2048] 或 [-1, 512]
x = tf.layers.dense(x, filters, name='fc', activation=tf.nn.relu)
# [-1, 1, 1, 2048] 或 [-1, 1, 1, 512]
x = tf.reshape(x, [-1, 1, 1, filters])
# [-1, 7, 7, 2048] 或 [-1, 7, 7, 512]
x = tf.layers.conv2d_transpose(x, filters, (height // 32, width // 32), 1,
name='deconv1', activation=tf.nn.relu)
# -> [-1, 56, 56, 64]
x = self._repeat(x, training)
# 池化对应操作为反卷积
# x: [-1, 56, 56, 64] -> [-1, 112, 112, 64]
x = tf.layers.conv2d_transpose(x, 64, 3, 2, 'same', name='decov2', activation=tf.nn.relu)
# [-1, 112, 112, 64] -> [-1, 224, 224, 3]
x = tf.layers.conv2d_transpose(x, 3, (height // 32, width // 32), 2, 'same', name='decov3')
return x
def _repeat(self, x, training):
# [-1, 7, 7, 2048] 或 [-1, 7, 7, 512] -> [-1, 56, 56, 64]
filters = x.shape[-1].value
for num_i, num in zip(range(len(self.repeats) - 1, -1, -1), reversed(self.repeats)):
for i in range(num-1, -1, -1):
x = self._transpose_residual(x, num_i, i, filters, training)
filters //= 2
return x
def _transpose_residual(self, x, num_i, i, filters, training):
strides = 2 if num_i > 0 and i == 0 else 1
if self.bottleneck:
left = _my_deconv(x, filters, 1, 1, 'same', name='res_%d_%d_left_myconv1' % (num_i, i),
training=training)
filters //= 4
left = _my_deconv(left, filters, 3, 1, 'same', name='res_%d_%d_left_myconv2' % (num_i, i),
training=training)
left = _my_deconv(left, filters, 1, strides, 'same', name='res_%d_%d_left_myconv3' % (num_i, i),
training=training, active=False)
else:
left = _my_deconv(x, filters, 3, 1, 'same', name='res_%d_%d_left_myconv1' % (num_i, i),
training=training)
left = _my_deconv(left, filters, 3, strides, 'same', name='res_%d_%d_left_myconv2' % (num_i, i),
training=training)
if filters != x.shape[-1].value or strides > 1:
# 如果右侧通道数或图片大小不相等,则通过卷积
right = _my_deconv(x, filters, 1, strides, 'same', name='res_%d_%d_right_myconv1' % (num_i, i),
training=training, active=False)
else:
right = x
return tf.nn.relu(left + right)
def _my_deconv(x, filters, kernel_size, strides, padding, name, training, active: bool=True):
with tf.variable_scope(name):
x = tf.layers.conv2d_transpose(x, filters, kernel_size, strides, padding, name='deconv')
x = tf.layers.batch_normalization(x, [1, 2, 3], epsilon=1e-6, training=training, name='bn')
if active:
x = tf.nn.relu(x)
return x
def _check(size):
if type(size) == int:
size = (size, size)
height, width = size
assert height % 32 == 0
assert width % 32 == 0
return height, width
def main():
net = TransposeResNet(RESNET50)
# 调用 __call__ 函数
x = net(tf.random_normal([20, 123]), 224, True) # 使用 ()就可以调用魔法函数__call__'
print(x.shape)
net = TransposeResNet(RESNET101)
x = net(tf.random_normal([20, 123]), 224, True) # 使用 ()就可以调用魔法函数__call__'
print(x.shape)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
9a1ee8365a695e26f0101708baa476b7bd48369c | dac498d66ec02ad9b52c4c3b074b3bd68d4aee00 | /joke/login.py | 694a6614380599af7dfe3625d1387b7a6e54a06c | [] | no_license | wangjian2254/haha | 3f95c22166af0495098783a40cd1a5d5326cc6e6 | d856cf43bbb12d49334078432a74cbe1ef47cf98 | refs/heads/master | 2016-09-05T20:41:38.188010 | 2014-03-22T11:22:24 | 2014-03-22T11:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,027 | py | #coding=utf-8
from datetime import datetime
import urllib
from pubweibo import weibo
from t4py.tblog.tblog import TBlog
from tools.page import Page
import webSetting
from models.model import User
__author__ = 'Administrator'
#from weibopy import OAuthHandler, oauth, WeibopError
from google.appengine.api import memcache
from qqweibo import OAuthHandler as qqOAuthHandler
#
#class WebOAuthHandler(OAuthHandler):
# user_id=None
# def get_authorization_url_with_callback(self, callback, signin_with_twitter=False):
# """Get the authorization URL to redirect the user"""
# try:
# # get the request token
# self.request_token = self._get_request_token()
#
# # build auth request and return as url
# if signin_with_twitter:
# url = self._get_oauth_url('authenticate')
# else:
# url = self._get_oauth_url('authorize')
# request = oauth.OAuthRequest.from_token_and_callback(
# token=self.request_token, callback=callback, http_url=url
# )
# return request.to_url()
# except Exception, e:
# raise WeibopError(e)
#class MainPage(PublicPage):
# def get(self):
##def _get_referer_url(request):
# referer_url = request.META.get('HTTP_REFERER', '/')
# host = request.META['HTTP_HOST']
# if referer_url.startswith('http') and host not in referer_url:
# referer_url = '/' # 避免外站直接跳到登录页而发生跳转错误
# return referer_url
#
#def _oauth():
# """获取oauth认证类"""
# return WebOAuthHandler(webSetting.xlconsumer_key, webSetting.xlconsumer_secret)
class Login(Page):
def get(self):
username=self.request.get('username')
website=self.request.get('website')
if not username:
return
# 获取oauth认证url
if 'sina'==website:
login_backurl =webSetting.WEIBOURL+'/login_check?username='+username+'&data='+str(datetime.now())+'&website=sina'
auth_client = weibo.APIClient(webSetting.xlconsumer_key, webSetting.xlconsumer_secret, login_backurl)
auth_url = auth_client.get_authorize_url()
# 保存request_token,用户登录后需要使用它来获取access_token
# memcache.Client().set(username+"_request_token1",auth_client.request_token,36000)
# 跳转到登录页面
return self.redirect(auth_url)
elif 'wy'==website:
login_backurl =webSetting.WEIBOURL+'/login_check?username='+username+'&data='+str(datetime.now())+'&website=wy'
t = TBlog(webSetting.wyconsumer_key, webSetting.wyconsumer_secret)
t.get_request_token()
url=t.get_auth_url(login_backurl)
# 保存request_token,用户登录后需要使用它来获取access_token
memcache.Client().set(username+"_request_token3",t,36000)
return self.redirect(url)
elif 'teng'==website:
login_backurl=webSetting.WEIBOURL+'/login_check?username='+username+'&data='+str(datetime.now())+'&website=teng'
auth=qqOAuthHandler(webSetting.qqconsumer_key,webSetting.qqconsumer_secret,callback=login_backurl)
url=auth.get_authorization_url()
# 保存request_token,用户登录后需要使用它来获取access_token
memcache.Client().set(username+"_request_token4",auth,36000)
return self.redirect(url)
class Login_check(Page):
def get(self):
website=self.request.get('website')
username=self.request.get('username')
user=User.get_by_key_name('u'+username)
msg=u'您开通了'
# userAccessToken=UserAccessToken().all().filter('username =',username).fetch(1)
# if userAccessToken:
# userAccessToken=userAccessToken[0]
# else:
# userAccessToken=UserAccessToken()
# userAccessToken.username=username
if 'sina'==website:
"""用户成功登录授权后,会回调此方法,获取access_token,完成授权"""
# http://mk2.com/?oauth_token=c30fa6d693ae9c23dd0982dae6a1c5f9&oauth_verifier=603896
# verifier = self.request.get('oauth_verifier', None)
# if not username:
# return
# auth_client = _oauth()
# # 设置之前保存在session的request_token
# # request_token = request.session['oauth_request_token']
# request_token=memcache.Client().get(username+"_request_token1")
# if not request_token:
# return
# memcache.Client().delete(username+"_request_token1")
# # del request.session['oauth_request_token']
#
# auth_client.set_request_token(request_token.key, request_token.secret)
# access_token = auth_client.get_access_token(verifier)
code=self.request.get('code','')
client = weibo.APIClient(webSetting.xlconsumer_key, webSetting.xlconsumer_secret,webSetting.WEIBOURL+'/login_check?username='+username+'&data='+str(datetime.now())+'&website=sina')
r = client.request_access_token(code)
# logging.info('access token: %s' % json.dumps(r))
access_token, expires_in, uid = r.access_token, r.expires_in, r.uid
# client.set_access_token(access_token, expires_in)
# logging.info('access token: %s' % json.dumps(access_token))
# u = client.users.show.get(uid=uid)
# 保存access_token,以后访问只需使用access_token即可
# userAccessToken.sinaSecret=access_token.secret
# userAccessToken.sinaToken=access_token.key
# userAccessToken.sinaExpires=expires_in
# userAccessToken.sinauserid=uid
# userAccessToken.sinaisright=True
# userAccessToken.put()
# 保存access_token,以后访问只需使用access_token即可
user.sinaSecret=access_token
user.sinaExpires=str(expires_in)
user.put()
# pam={'key0':'sinaSecret','value0':access_token.secret,'key1':'sinaToken','value1':access_token.key}
# syncMogu(username,pam)
# syncWeiboShouQuan(username,'sina')
msg+=u'新浪'
# userAccessToken.sinaSecret=access_token.secret
# userAccessToken.sinaToken=access_token.key
# userAccessToken.sinauserid=auth_client.user_id
# userAccessToken.sinaisright=True
# userAccessToken.put()
# sinaadd=memcache.Client().get("SinaAdd")
# if not sinaadd:
# sinaadd= []
# sinaadd.append(username)
# memcache.Client().set("SinaAdd",sinaadd,360000)
elif 'wy'==website:
# t = TBlog(weboSetting.wyconsumer_key, weboSetting.wyconsumer_secret)
request_token=memcache.Client().get(username+"_request_token3")
if not request_token:
return
memcache.Client().delete(username+"_request_token3")
# t._request_handler.request_token=request_token
# request_token.get_auth_url()
# pin=self.request.get('pin', None)
pin=self.request.get('oauth_token', None)
s=request_token.get_access_token(pin)
user.wySecret=s.secret
user.wyToken=s.key
user.put()
# pam={'key0':'wySecret','value0':s.secret,'key1':'wyToken','value1':s.key}
# syncMogu(username,pam)
# syncWeiboShouQuan(username,'wy')
msg+=u'网易'
# userAccessToken.wySecret=s.secret
# userAccessToken.wyToken=s.key
# userAccessToken.wyisright=True
# userAccessToken.put()
# wyadd=memcache.Client().get("WyAdd")
# if not wyadd:
# wyadd= []
# wyadd.append(username)
# memcache.Client().set("WyAdd",wyadd,360000)
elif 'teng'==website:
request_token=memcache.Client().get(username+"_request_token4")
if not request_token:
return
memcache.Client().delete(username+"_request_token4")
verifier = self.request.get('oauth_verifier', None)
access_token = request_token.get_access_token(verifier)
user.tengSecret=access_token.secret
user.tengToken=access_token.key
user.put()
# pam={'key0':'qqSecret','value0':access_token.secret,'key1':'qqToken','value1':access_token.key}
# syncMogu(username,pam)
# syncWeiboShouQuan(username,'teng')
msg+=u'腾讯'
# userAccessToken.qqSecret=access_token.secret
# userAccessToken.qqToken=access_token.key
# #userAccessToken.qquserid=auth_client.user_id
# userAccessToken.qqisright=True
# userAccessToken.put()
# qqadd=memcache.Client().get("QQAdd")
# if not qqadd:
# qqadd= []
# qqadd.append(username)
# memcache.Client().set("QQAdd",qqadd,360000)
msg+=u'微博。'
p={'msg':msg.encode('utf-8')}
return self.redirect(str('joke://jokeweibo/weibo'+'?msg='+urllib.urlencode(p).split('=')[1]+'&website='+website))
#def syncMogu(userName,pam={}):
# pam['UserName']=userName
# pam['appid']=weboSetting.WEIBOAPPCODE
#
# result = urlfetch.fetch(
# url =weboSetting.DBPHOTOWEBURL+'/changeAppData',
# payload = urllib.urlencode(pam),
# method = urlfetch.POST,
# headers = {'Content-Type':'application/x-www-form-urlencoded',
# 'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6'},
# follow_redirects = False,deadline=30)
# if result.status_code==200:
# return True
# else:
# return False
#def syncWeiboShouQuan(userName,web,do=None):
# login_url =weboSetting.DBPHOTOWEBURL+ '/WeiboCheck'
# pam={'username':userName,'web':web}
# if do:
# pam['do']='del'
# login_data = urllib.urlencode(pam)
# result = urlfetch.fetch(
# url =login_url,
# payload = urllib.urlencode(pam),
# method = urlfetch.POST,
# headers = {'Content-Type':'application/x-www-form-urlencoded',
# 'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6'},
# follow_redirects = False,deadline=20)
# if result.status_code==200:
# return True
# else:
# return False
| [
"[email protected]"
] | |
2f05ffdb5318a803ae57b08a2a6dfa42c899af36 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03644/s583061806.py | 4f4c5fa1513661aa6021ecf76f19f6b4993b52f6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | N = int(input())
result = []
for i in range(1, N+1):
bin_i = bin(i)
bin_i_c = bin_i.count("0")-1
result.append(bin_i_c)
sorted_result = sorted(result, reverse = True)
print(2 ** sorted_result[0]) | [
"[email protected]"
] | |
3f75c5564960d37842a25e9698020572528ca25f | 3a7359459d10b417540bc62facddcce9fee08323 | /pui.py | 02f28d5ee62cf3b4fad57e7499c37ebf1b1ce120 | [
"MIT"
] | permissive | tuxnani/pywikipedia-scripts | f98bb5155bb6d056aa7d4e67be8cfe715d1bf900 | 40795597e94970395c9f9e4ea9641731e7950a4f | refs/heads/master | 2021-01-17T22:19:10.974908 | 2013-02-02T08:21:40 | 2013-02-02T08:21:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | #!/usr/bin/python
# -*- coding: utf-8 -*
#$ -m ae
"""
Copyright (C) 2008-2012 Legoktm
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import re
import pywikibot
site = pywikibot.Site()
page = pywikibot.Page(site,'Wikipedia:Possibly unfree files')
wikitext = page.get()
search = re.compile(r'\n==New listings==', re.IGNORECASE)
wikitext = search.sub(r'\n*[[/{{subst:#time:Y F j|-8 days}}]]\n==New listings==', wikitext)
EditMsg = 'Adding new day to holding cell'
page.put(wikitext, EditMsg)
| [
"[email protected]"
] | |
3d67d667f7c635238b62f637bbb7bca5a7604a8d | dbd8180d9c02c22b42baa5227437714ff352fd8e | /1-100/L250.py | 5488dcdc339fc4b78d91939d00a3f4171395ad11 | [] | no_license | k8godzilla/-Leetcode | 92953dfffc0f06907fa7bd0beea7bc27b16f9efa | 58d5384155f481b1d1b0a7ca69566245dd779554 | refs/heads/master | 2020-06-12T15:35:43.380979 | 2019-08-07T11:14:49 | 2019-08-07T11:14:49 | 194,348,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,115 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 28 17:29:57 2019
@author: sunyin
"""
'''
给定一个二叉树,统计该二叉树数值相同的子树个数。
同值子树是指该子树的所有节点都拥有相同的数值。
示例:
输入: root = [5,1,5,5,5,null,5]
5
/ \
1 5
/ \ \
5 5 5
输出: 4
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/count-univalue-subtrees
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def countUnivalSubtrees(self, root: TreeNode) -> int:
self.res = 0
b, v = self.helper(root)
return self.res
def helper(self, node: TreeNode):
if node.left is None and node.right is None:
self.res += 1
return True, node.val
elif node.left is None:
b, v = self.helper(node.right)
if b and v == node.val:
self.res += 1
return b, v
else:
return False, -1
elif node.right is None:
b, v = self.helper(node.left)
if b and v == node.val:
self.res += 1
return b, v
else:
return False, -1
else:
bRight, vRight = self.helper(node.right)
bLeft, vLeft = self.helper(node.left)
if bRight and bLeft and vRight == node.val and vLeft == node.val:
self.res += 1
return True, node.val
else:
return False, -1
| [
"[email protected]"
] | |
a32a139770dcea6321e2c857c8f1d62509740d59 | dd1b38d6d953fae7ace7b9c5f86821ac24936b1d | /stutorial/items.py | 2376d466616a39f112cdf068f151165ddf3d4b94 | [] | no_license | ranafge/scrapy_project | 81e1345cc793e65061ba8a43afa78ec91c0680a9 | 8b921a64f9e499ac56cb985b6ccaf680258a2b2f | refs/heads/master | 2020-04-11T04:58:28.982076 | 2018-12-12T19:22:12 | 2018-12-12T19:22:12 | 161,533,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class StutorialItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"[email protected]"
] | |
81a10088604bb25b97c23ba1ef2a1604af597a9f | 0c3cd4e933afef9dd15c2983d6b1a8413063ae80 | /alpaca_paper/tests/test_screeners.py | f9d9c8afeb1f5fc0c0b87f8372bca7a71743e206 | [] | no_license | webclinic017/paper_trading_bot | 405ae2cad7fd50b393509a90973d674b5f59ce8c | a172ce2dc150183be4ddb5b218dfcb7006027f69 | refs/heads/master | 2023-07-24T06:31:12.093572 | 2021-07-20T01:32:32 | 2021-07-20T01:32:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | import pytest
from alpaca_paper.screeners import MarketWatch
@pytest.fixture
def mw():
return MarketWatch()
def test_premarket(mw):
pre_market = mw.pre_market()
assert isinstance(pre_market, (dict,))
assert isinstance(pre_market['gainers'], (list,))
assert isinstance(pre_market['loosers'], (list,))
assert isinstance(pre_market['most_actives'], (list,))
def test_vol_to_float():
assert 221770 == MarketWatch.vol_to_float('221.77K')
assert 2189000 == MarketWatch.vol_to_float('2.189M')
assert 3316 == MarketWatch.vol_to_float('3,316') | [
"[email protected]"
] | |
566b7fac7c1540a03e7ba419a8e74227534f307e | fb124e51024917d6479fa626d9607ff10f7a3aba | /storm-control/storm_control/hal4000/illumination/button_editor_ui.py | 6340c37f39723a6c6e0a3e99238c3c19bfe496a8 | [
"MIT"
] | permissive | BehnamAbaie/storm-control | 054bd7bbd903ed9635e4d1121c30544f58473c4f | 0c686321142eccad62ce3365eae22c3b69229b0d | refs/heads/main | 2023-06-18T08:04:01.108874 | 2021-07-14T00:51:15 | 2021-07-14T00:51:15 | 342,049,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'button_editor.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(493, 380)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.scrollArea = QtWidgets.QScrollArea(Dialog)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 473, 327))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Power Button Editor"))
| [
"[email protected]"
] | |
07b979d778c19399b58b80724820c6b94da8d9ad | 30f6633a24d799fddd51672c528e4baee649d8cd | /6.01/designLab09/simulator/core/search/__init__.py | a5aa67e832e04dcfdfb63b10e576b88f6c972342 | [] | no_license | Rajpratik71/mit-courses | e12c864435a1af2c8b7034af956fd2f53d559cfc | 86a06a3192e17230a05c5c7beeed5699df73be22 | refs/heads/master | 2023-06-22T21:05:37.240985 | 2023-01-26T06:44:49 | 2023-01-26T06:44:49 | 192,182,074 | 0 | 2 | null | 2023-04-05T04:00:47 | 2019-06-16T11:15:24 | TeX | UTF-8 | Python | false | false | 72 | py | """
core.search
"""
__author__ = '[email protected] (Michael Mekonnen)'
| [
"[email protected]"
] | |
73ce4b0aa5bd1da2a82a5945ab40c2a984e8af2e | f2f26e263da6d3cfe66e78fc326744ac512ff7f9 | /boostedhiggs/htautauprocessor.py | bb52be16b21bb403e7687867bb2a4f342a7a3abf | [] | no_license | drankincms/boostedhiggs | 64b231a18abd49eee82630501df1ae815d58569f | 31d15d7dc165c3eb081793d79a3770973b7abc21 | refs/heads/dev | 2023-06-25T18:04:01.088815 | 2021-10-26T15:36:13 | 2021-10-26T15:36:13 | 227,409,855 | 0 | 4 | null | 2022-04-15T17:41:58 | 2019-12-11T16:22:26 | Python | UTF-8 | Python | false | false | 36,333 | py | from functools import partial
import numpy as np
from coffea import processor, hist
from uproot_methods import TLorentzVectorArray
import awkward
from copy import deepcopy
from .common import (
getBosons,
matchedBosonFlavor,
matchedBosonFlavorLep,
getHTauTauDecayInfo,
isOverlap,
)
from .corrections import (
corrected_msoftdrop,
n2ddt_shift,
add_pileup_weight,
add_VJets_NLOkFactor,
add_jetTriggerWeight,
add_TriggerWeight,
)
#from .btag import BTagEfficiency, BTagCorrector
# for old pancakes
from coffea.nanoaod.methods import collection_methods, FatJet
collection_methods['CustomAK8Puppi'] = FatJet
collection_methods['CustomAK8PuppiSubjet'] = FatJet
FatJet.subjetmap['CustomAK8Puppi'] = 'CustomAK8PuppiSubjet'
class HtautauProcessor(processor.ProcessorABC):
def __init__(self, year='2017'):
self._year = year
#self._btagSF = BTagCorrector(year, 'medium')
self._btagWPs = {
'medium': {
'2016': 0.6321,
'2017': 0.4941,
'2018': 0.4184,
},
}
self._metFilters = {
'2016': [
"goodVertices",
"globalSuperTightHalo2016Filter",
"HBHENoiseFilter",
"HBHENoiseIsoFilter",
"EcalDeadCellTriggerPrimitiveFilter",
"BadPFMuonFilter",
],
'2017': [
"goodVertices",
"globalSuperTightHalo2016Filter",
"HBHENoiseFilter",
"HBHENoiseIsoFilter",
"EcalDeadCellTriggerPrimitiveFilter",
"BadPFMuonFilter",
"BadChargedCandidateFilter",
"eeBadScFilter",
"ecalBadCalibFilter",
],
'2018': [
"goodVertices",
"globalSuperTightHalo2016Filter",
"HBHENoiseFilter",
"HBHENoiseIsoFilter",
"EcalDeadCellTriggerPrimitiveFilter",
"BadPFMuonFilter",
"BadChargedCandidateFilter",
"eeBadScFilter",
"ecalBadCalibFilterV2",
],
}
self._hadel_triggers = {
'2016': [
#'Ele35_WPTight_Gsf',
'Ele50_CaloIdVT_GsfTrkIdT_PFJet165','Ele115_CaloIdVT_GsfTrkIdT',
#"Ele15_IsoVVVL_PFHT450_PFMET50",
"Ele15_IsoVVVL_PFHT600",
'PFHT800',
'PFHT900',
'AK8PFJet360_TrimMass30',
'AK8PFHT700_TrimR0p1PT0p03Mass50',
'PFHT650_WideJetMJJ950DEtaJJ1p5',
'PFHT650_WideJetMJJ900DEtaJJ1p5',
#'AK8DiPFJet280_200_TrimMass30_BTagCSV_p20',
'PFJet450',
],
'2017': [
#'Ele35_WPTight_Gsf',
'Ele50_CaloIdVT_GsfTrkIdT_PFJet165','Ele115_CaloIdVT_GsfTrkIdT',
#"Ele15_IsoVVVL_PFHT450_PFMET50",
"Ele15_IsoVVVL_PFHT600",
#'AK8PFJet330_PFAK8BTagCSV_p17',
'PFHT1050',
'AK8PFJet400_TrimMass30',
'AK8PFJet420_TrimMass30',
'AK8PFHT800_TrimMass50',
'PFJet500',
'AK8PFJet500',
],
'2018': [
#'Ele35_WPTight_Gsf',
'Ele50_CaloIdVT_GsfTrkIdT_PFJet165','Ele115_CaloIdVT_GsfTrkIdT',
#"Ele15_IsoVVVL_PFHT450_PFMET50",
"Ele15_IsoVVVL_PFHT600",
'AK8PFJet400_TrimMass30',
'AK8PFJet420_TrimMass30',
'AK8PFHT800_TrimMass50',
'PFHT1050',
'PFJet500',
'AK8PFJet500',
# 'AK8PFJet330_PFAK8BTagCSV_p17', not present in 2018D?
#'AK8PFJet330_TrimMass30_PFAK8BoostedDoubleB_np4',
#'AK4PFJet30',
],
}
self._hadmu_triggers = {
'2016': [
'Mu50','Mu55',
#"Mu15_IsoVVVL_PFHT450_PFMET50",
"Mu15_IsoVVVL_PFHT600",
'PFHT800',
'PFHT900',
'AK8PFJet360_TrimMass30',
'AK8PFHT700_TrimR0p1PT0p03Mass50',
'PFHT650_WideJetMJJ950DEtaJJ1p5',
'PFHT650_WideJetMJJ900DEtaJJ1p5',
#'AK8DiPFJet280_200_TrimMass30_BTagCSV_p20',
'PFJet450',
],
'2017': [
'Mu50',#'Mu55',
#"Mu15_IsoVVVL_PFHT450_PFMET50",
"Mu15_IsoVVVL_PFHT600",
#'AK8PFJet330_PFAK8BTagCSV_p17',
'PFHT1050',
'AK8PFJet400_TrimMass30',
'AK8PFJet420_TrimMass30',
'AK8PFHT800_TrimMass50',
'PFJet500',
'AK8PFJet500',
],
'2018': [
'Mu50',#'Mu55',
#"Mu15_IsoVVVL_PFHT450_PFMET50",
"Mu15_IsoVVVL_PFHT600",
'AK8PFJet400_TrimMass30',
'AK8PFJet420_TrimMass30',
'AK8PFHT800_TrimMass50',
'PFHT1050',
'PFJet500',
'AK8PFJet500',
# 'AK8PFJet330_PFAK8BTagCSV_p17', not present in 2018D?
#'AK8PFJet330_TrimMass30_PFAK8BoostedDoubleB_np4',
#'AK4PFJet30',
],
}
self._hadhad_triggers = {
'2016': [
'PFHT800',
'PFHT900',
'AK8PFJet360_TrimMass30',
'AK8PFHT700_TrimR0p1PT0p03Mass50',
'PFHT650_WideJetMJJ950DEtaJJ1p5',
'PFHT650_WideJetMJJ900DEtaJJ1p5',
#'AK8DiPFJet280_200_TrimMass30_BTagCSV_p20',
'PFJet450',
'DoubleMediumChargedIsoPFTau35_Trk1_TightID_eta2p1_Reg',
'DoubleMediumChargedIsoPFTau35_Trk1_eta2p1_Reg',
'DoubleMediumChargedIsoPFTau40_Trk1_TightID_eta2p1_Reg',
'DoubleMediumChargedIsoPFTau40_Trk1_eta2p1_Reg',
],
'2017': [
#'AK8PFJet330_PFAK8BTagCSV_p17',
'PFHT1050',
'AK8PFJet400_TrimMass30',
'AK8PFJet420_TrimMass30',
'AK8PFHT800_TrimMass50',
'PFJet500',
'AK8PFJet500',
'DoubleMediumChargedIsoPFTau35_Trk1_TightID_eta2p1_Reg',
'DoubleMediumChargedIsoPFTau35_Trk1_eta2p1_Reg',
'DoubleMediumChargedIsoPFTau40_Trk1_TightID_eta2p1_Reg',
'DoubleMediumChargedIsoPFTau40_Trk1_eta2p1_Reg',
'MediumChargedIsoPFTau180HighPtRelaxedIso_Trk50_eta2p1',
'MediumChargedIsoPFTau180HighPtRelaxedIso_Trk50_eta2p1_1pr',
],
'2018': [
'AK8PFJet400_TrimMass30',
'AK8PFJet420_TrimMass30',
'AK8PFHT800_TrimMass50',
'PFHT1050',
'PFJet500',
'AK8PFJet500',
# 'AK8PFJet330_PFAK8BTagCSV_p17', not present in 2018D?
#'AK8PFJet330_TrimMass30_PFAK8BoostedDoubleB_np4',
#'AK4PFJet30',
'DoubleMediumChargedIsoPFTauHPS35_Trk1_TightID_eta2p1_Reg',
'DoubleMediumChargedIsoPFTauHPS35_Trk1_eta2p1_Reg',
'DoubleMediumChargedIsoPFTauHPS40_Trk1_TightID_eta2p1_Reg',
'DoubleMediumChargedIsoPFTauHPS40_Trk1_eta2p1_Reg',
'MediumChargedIsoPFTau180HighPtRelaxedIso_Trk50_eta2p1',
'MediumChargedIsoPFTau180HighPtRelaxedIso_Trk50_eta2p1_1pr',
'MediumChargedIsoPFTau200HighPtRelaxedIso_Trk50_eta2p1',
'MediumChargedIsoPFTau220HighPtRelaxedIso_Trk50_eta2p1',
],
}
jet_pt_bin = hist.Bin('jet_pt', r'Jet $p_{T}$ [GeV]', 20, 200, 1200)
jet_eta_bin = hist.Bin('jet_eta', r'Jet $\eta$', 20, -3., 3.)
jet_msd_bin = hist.Bin('jet_msd', r'Jet $m_{sd}$ [GeV]', 34, 40, 210.)
nn_disc_bin = hist.Bin('nn_disc',r'$NN$', [0.,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.96,0.97,0.98,0.99,0.995,1.])
mt_lepmet_bin = hist.Bin('mt_lepmet', r'$m_{T}(\ell, MET)$', 30, 0., 150.)
oppbjet_pt_bin = hist.Bin('oppbjet_pt', r'Max opp. deepCSV-bjet $p_{T}$ [GeV]', 20, 0., 500)
oppbtag_bin = hist.Bin('oppbtag', r'Max opp. deepCSV-b ', 20, 0., 1)
lep_pt_bin = hist.Bin('lep_pt', r'Lepton $p_{T}$ [GeV]', 40, 0, 800)
lep_eta_bin = hist.Bin('lep_eta', r'Lepton $\eta$', 20, -3., 3.)
jet_lsf3_bin = hist.Bin('lsf3', r'Jet LSF$_3$', 20, 0., 1.)
lep_jet_dr_bin = hist.Bin('lep_jet_dr', r'$\Delta R(jet,lepton)$', 40, 0., 4.)
#lep_miso_bin = hist.Bin('miso', r'Lepton miniIso', 20, 0., 0.1)
lep_miso_bin = hist.Bin('miso', r'Lepton miniIso', [0.,0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.])
jet_jetlep_m_bin = hist.Bin('jetlep_m', r'Jet+lepton $m$ [GeV]', 20, 0, 600.)
jet_jetmet_m_bin = hist.Bin('jetmet_m', r'Jet+MET $m$ [GeV]', 20, 0, 600.)
jet_jetlepmet_m_bin = hist.Bin('jetlepmet_m', r'Jet+lepton+MET $m$ [GeV]', 20, 0, 600.)
jetmet_dphi_bin = hist.Bin('jetmet_dphi', r'$\Delta\phi(jet,MET)$', 20, 0., 2.)
met_pt_bin = hist.Bin('met_pt', r'MET [GeV]', 20, 0, 800)
h_pt_bin = hist.Bin('h_pt', r'h $p_{T}$ [GeV]', 20, 200, 1200)
ntau_bin = hist.Bin('ntau',r'Number of taus',64,-0.5,63.5)
genhtt_bin = hist.Bin('genhtt',r'hh,eh,mh,em,ee,mm (- for dr > 0.8)',13,-6.5,6.5)
gentau1had_bin = hist.Bin('gentau1had',r'1pr,1pr+pi0,3pr',4,-0.5,3.5)
gentau2had_bin = hist.Bin('gentau2had',r'1pr,1pr+pi0,3pr',4,-0.5,3.5)
self._accumulator = processor.dict_accumulator({
# dataset -> sumw
'sumw': processor.defaultdict_accumulator(float),
# dataset -> cut -> count
'cutflow_hadhad': processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, float)),
'cutflow_hadhad_cr_mu': processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, float)),
'cutflow_hadel': processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, float)),
'cutflow_hadmu': processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, float)),
'cutflow_hadel_cr_b': processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, float)),
'cutflow_hadmu_cr_b': processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, float)),
'cutflow_hadel_cr_w': processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, float)),
'cutflow_hadmu_cr_w': processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, float)),
'cutflow_hadel_cr_qcd': processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, float)),
'cutflow_hadmu_cr_qcd': processor.defaultdict_accumulator(partial(processor.defaultdict_accumulator, float)),
#'btagWeight': hist.Hist('Events', hist.Cat('dataset', 'Dataset'), hist.Bin('val', 'BTag correction', 50, 0, 2)), #FIXME
'jet_kin': hist.Hist(
'Events',
hist.Cat('dataset', 'Dataset'),
hist.Cat('region', 'Region'),
jet_pt_bin, jet_eta_bin, jet_msd_bin
),
'b_kin': hist.Hist(
'Events',
hist.Cat('dataset', 'Dataset'),
hist.Cat('region', 'Region'),
jet_pt_bin, oppbjet_pt_bin, oppbtag_bin,
),
'lep_kin': hist.Hist(
'Events',
hist.Cat('dataset', 'Dataset'),
hist.Cat('region', 'Region'),
lep_pt_bin, lep_jet_dr_bin, lep_miso_bin,
),
'mass_kin': hist.Hist(
'Events',
hist.Cat('dataset', 'Dataset'),
hist.Cat('region', 'Region'),
jet_pt_bin, jet_msd_bin, genhtt_bin, #jet_jetmet_m_bin, jet_jetlepmet_m_bin,
),
'evt_kin': hist.Hist(
'Events',
hist.Cat('dataset', 'Dataset'),
hist.Cat('region', 'Region'),
met_pt_bin, lep_pt_bin, jet_pt_bin, #h_pt_bin,
),
})
@property
def accumulator(self):
return self._accumulator
def process(self, events):
dataset = events.metadata['dataset']
isRealData = 'genWeight' not in events.columns
selection = processor.PackedSelection()
weights = processor.Weights(len(events))
output = self.accumulator.identity()
if not isRealData:
output['sumw'][dataset] += events.genWeight.sum()
trigger_hadhad = np.zeros(events.size, dtype='bool')
for t in self._hadhad_triggers[self._year]:
trigger_hadhad = trigger_hadhad | events.HLT[t]
trigger_hadmu = np.zeros(events.size, dtype='bool')
for t in self._hadmu_triggers[self._year]:
trigger_hadmu = trigger_hadmu | events.HLT[t]
trigger_hadel = np.zeros(events.size, dtype='bool')
for t in self._hadel_triggers[self._year]:
trigger_hadel = trigger_hadel | events.HLT[t]
#print(np.histogram(trigger))
if (isRealData): overlap_removal = isOverlap(events,dataset,self._hadhad_triggers[self._year]+self._hadmu_triggers[self._year]+self._hadel_triggers[self._year])
else: overlap_removal = np.ones(events.size, dtype='bool')
met_filters = np.ones(events.size, dtype='bool')
for t in self._metFilters[self._year]:
met_filters = met_filters & events.Flag[t]
selection.add('hadhad_trigger', trigger_hadhad & overlap_removal & met_filters)
selection.add('hadmu_trigger', trigger_hadmu & overlap_removal & met_filters)
selection.add('hadel_trigger', trigger_hadel & overlap_removal & met_filters)
try:
fatjets = events.FatJet
except AttributeError:
# early pancakes
fatjets = events.CustomAK8Puppi
fatjets['msdcorr'] = corrected_msoftdrop(fatjets)
fatjets['rho'] = 2 * np.log(fatjets.msdcorr / fatjets.pt)
fatjets['n2ddt'] = fatjets.n2b1 - n2ddt_shift(fatjets, year=self._year)
candidatejets = fatjets[
# https://github.com/DAZSLE/BaconAnalyzer/blob/master/Analyzer/src/VJetLoader.cc#L269
(fatjets.pt > 300)
#& (abs(fatjets.eta) < 2.5)
#& (fatjets.isTight)
]#[:, :2]
met_p4 = TLorentzVectorArray.from_ptetaphim(awkward.JaggedArray.fromiter([[v] for v in events.MET.pt]), awkward.JaggedArray.fromiter([[v] for v in np.zeros(events.size)]), awkward.JaggedArray.fromiter([[v] for v in events.MET.phi]), awkward.JaggedArray.fromiter([[v] for v in np.zeros(events.size)]))
ak8_met_pair = candidatejets.cross(met_p4)
ak8_met_dphi = abs(ak8_met_pair.i0.delta_phi(ak8_met_pair.i1))
#aligned_jet = ak8_met_dphi == ak8_met_dphi.min()
#best_jet_idx = (ak8_met_pair.i0 + aligned_jet * ak8_met_pair.i1).pt.argmax()
best_jet_idx = ak8_met_dphi.argmin()
#best_jet_idx = candidatejets.pt.argmax()
candidatejet = candidatejets[best_jet_idx]
jetmet_dphi = ak8_met_dphi[best_jet_idx]
nn_disc_hadhad = awkward.JaggedArray.fromiter([[v] for v in events.IN.hadhad_v4p1])[candidatejet.pt.pad(1, clip=True).fillna(0.)>300.]
nn_disc_hadel = awkward.JaggedArray.fromiter([[v] for v in events.GRU.hadel_v6p1])[candidatejet.pt.pad(1, clip=True).fillna(0.)>300.]
nn_disc_hadmu = awkward.JaggedArray.fromiter([[v] for v in events.GRU.hadmu_v6p1])[candidatejet.pt.pad(1, clip=True).fillna(0.)>300.]
candidatejet_rho = 2 * np.log(candidatejet.msdcorr / candidatejet.pt)
selection.add('jetacceptance', (
(candidatejet.pt > 300)
& (candidatejet.msdcorr > 40.)
& (abs(candidatejet.eta) < 2.4)
& (candidatejet_rho > -6.)
#& (candidatejet_rho < -2.1)
& (candidatejet_rho < -1.75)
).any())
selection.add('jetacceptance400', (
(candidatejet.pt > 400)
& (candidatejet.msdcorr > 40.)
& (abs(candidatejet.eta) < 2.4)
& (candidatejet_rho > -6.)
& (candidatejet_rho < -2.)
).any())
selection.add('jetacceptance450', (
(candidatejet.pt > 450)
& (candidatejet.msdcorr > 40.)
& (abs(candidatejet.eta) < 2.4)
& (candidatejet_rho > -6.)
& (candidatejet_rho < -2.1)
).any())
selection.add('jetid', (candidatejet.isTight).any())
selection.add('n2ddt', (candidatejet.n2ddt < 0.).any())
#print(np.histogram(candidatejet.pt.fillna(0).flatten()))
jets = events.Jet[
(events.Jet.pt > 30.)
& (abs(events.Jet.eta) < 2.5)
& (events.Jet.isTight)
]
# only consider first 4 jets to be consistent with old framework
jets = jets[:, :4]
ak4_ak8_pair = jets.cross(candidatejet, nested=True)
ak4_ak8_dphi = abs(ak4_ak8_pair.i0.delta_phi(ak4_ak8_pair.i1))
ak4_ak8_dr = ak4_ak8_pair.i0.delta_r(ak4_ak8_pair.i1)
ak4_opposite = jets[(ak4_ak8_dphi > np.pi / 2).all()]
#selection.add('antiak4btagMediumOppHem', ak4_opposite.btagDeepB.max() < BTagEfficiency.btagWPs[self._year]['medium'])
selection.add('antiak4btagMediumOppHem', ak4_opposite.btagDeepB.max() < self._btagWPs['medium'][self._year])
ak4_away = jets[(ak4_ak8_dr > 0.8).all()]
#selection.add('ak4btagMedium08', ak4_away.btagDeepB.max() > BTagEfficiency.btagWPs[self._year]['medium'])
selection.add('ak4btagMedium08', ak4_away.btagDeepB.max() > self._btagWPs['medium'][self._year])
selection.add('met', events.MET.pt > 50.)
selection.add('methard', events.MET.pt > 150.)
el_loose_cuts = [(np.bitwise_and(np.right_shift(events.Electron.vidNestedWPBitmap,events.Electron.vidNestedWPBitmap.ones_like()*(3*k)),events.Electron.vidNestedWPBitmap.ones_like()*7) >= events.Electron.LOOSE) for k in range(10) if k != 7]
el_tight_cuts = [(np.bitwise_and(np.right_shift(events.Electron.vidNestedWPBitmap,events.Electron.vidNestedWPBitmap.ones_like()*(3*k)),events.Electron.vidNestedWPBitmap.ones_like()*7) >= events.Electron.TIGHT) for k in range(10) if k != 7]
#el_veto_cuts = [(np.bitwise_and(np.right_shift(events.Electron.vidNestedWPBitmap,events.Electron.vidNestedWPBitmap.ones_like()*(3*k)),events.Electron.vidNestedWPBitmap.ones_like()*7) >= events.Electron.VETO) for k in range(10) if k != 7]
# (MinPtCut,GsfEleSCEtaMultiRangeCut,GsfEleDEtaInSeedCut,GsfEleDPhiInCut,GsfEleFull5x5SigmaIEtaIEtaCut,GsfEleHadronicOverEMEnergyScaledCut,GsfEleEInverseMinusPInverseCut,GsfEleRelPFIsoScaledCut,GsfEleConversionVetoCut,GsfEleMissingHitsCut)
# 0 ,1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9
elmask_loose = el_loose_cuts[0].ones_like().astype(bool)
for m in el_loose_cuts: elmask_loose = elmask_loose & m
elmask_tight = el_tight_cuts[0].ones_like().astype(bool)
for m in el_tight_cuts: elmask_tight = elmask_tight & m
#elmask_veto = el_veto_cuts[0].ones_like().astype(bool)
#for m in el_veto_cuts: elmask_veto = elmask_veto & m
goodmuon = (
(events.Muon.pt > 25)
& (np.abs(events.Muon.eta) < 2.4)
#& (events.Muon.sip3d < 4)
#& (np.abs(events.Muon.dz) < 0.1)
#& (np.abs(events.Muon.dxy) < 0.02)
& (events.Muon.mediumId).astype(bool)
#& (events.Muon.highPtId).astype(bool)
)
ngoodmuons = goodmuon.sum()
leadingmuon = events.Muon[goodmuon].pad(1, clip=True)
goodelec = (
(events.Electron.pt > 25)
& (abs(events.Electron.eta) < 2.5)
#& (events.Electron.cutBased >= events.Electron.TIGHT)
#& (events.Electron.cutBased_HEEP).astype(bool)
#& elmask_tight
& events.Electron.mvaFall17V2noIso_WP80
)
ngoodelecs = goodelec.sum()
leadingelec = events.Electron[goodelec].pad(1, clip=True)
nmuons = (
(events.Muon.pt > 10)
& (abs(events.Muon.eta) < 2.4)
#& (events.Muon.pfRelIso04_all < 0.25)
#& (np.abs(events.Muon.dz) < 0.1)
#& (np.abs(events.Muon.dxy) < 0.05)
& (events.Muon.looseId).astype(bool)
#& (events.Muon.highPtId).astype(bool)
).sum()
nelectrons = (
(events.Electron.pt > 10)
& (abs(events.Electron.eta) < 2.5)
& (events.Electron.cutBased >= events.Electron.LOOSE)
#& (events.Electron.cutBased_HEEP).astype(bool)
#& elmask_loose
).sum()
if self._year=='2018':
tauAntiEleId = events.Tau.idAntiEle2018
else:
tauAntiEleId = events.Tau.idAntiEle
goodtaus = (
(events.Tau.pt > 20)
& (abs(events.Tau.eta) < 2.3)
& (tauAntiEleId >= 8)
& (events.Tau.idAntiMu >= 1)
)
taus_p4 = TLorentzVectorArray.from_ptetaphim(events.Tau[goodtaus].pt.fillna(0),events.Tau[goodtaus].eta.fillna(0),events.Tau[goodtaus].phi.fillna(0),events.Tau[goodtaus].mass.fillna(0))
tau_ak8_pair = taus_p4.cross(candidatejet)
taus_dr = (tau_ak8_pair.i0.delta_r(tau_ak8_pair.i1) < 0.8).any()
selection.add('antiLepId',taus_dr)
#ntaus = (
# (events.Tau.pt > 20)
# & (events.Tau.idDecayMode).astype(bool)
# # bacon iso looser than Nano selection
#).sum()
ntaus = np.zeros(events.size, dtype='bool')
lepsel = ((nmuons <= 1) & (nelectrons == 0) & (ntaus == 0) & (ngoodelecs == 0) & (ngoodmuons == 1)) | ((nmuons == 0) & (nelectrons <= 1) & (ntaus == 0) & (ngoodmuons == 0) & (ngoodelecs == 1))
mu_p4 = TLorentzVectorArray.from_ptetaphim(leadingmuon.pt.fillna(0)*lepsel,leadingmuon.eta.fillna(0)*lepsel,leadingmuon.phi.fillna(0)*lepsel,leadingmuon.mass.fillna(0)*lepsel)
#[(goodmuon & ((nmuons == 1) & (nelectrons == 0) & (ntaus == 0) & (ngoodmuons == 1)))]
muon_ak8_pair = mu_p4.cross(candidatejet, nested=True)
el_p4 = TLorentzVectorArray.from_ptetaphim(leadingelec.pt.fillna(0)*lepsel,leadingelec.eta.fillna(0)*lepsel,leadingelec.phi.fillna(0)*lepsel,leadingelec.mass.fillna(0)*lepsel)
#[(goodelec & ((nmuons == 0) & (nelectrons == 1) & (ntaus == 0) & (ngoodelecs == 1)))]
elec_ak8_pair = el_p4.cross(candidatejet, nested=True)
#leadinglep = awkward.concatenate([mu_p4, el_p4], axis=1).pad(1, clip=True)
leadinglep = mu_p4 + el_p4
mu_miso = leadingmuon.miniPFRelIso_all.fillna(0)*lepsel
el_miso = leadingelec.miniPFRelIso_all.fillna(0)*lepsel
leadinglep_miso = mu_miso + el_miso
leadinglep_miso = leadinglep_miso.pad(1, clip=True)
mt_lepmet = np.sqrt(2.*leadinglep.pt*met_p4.pt*(leadinglep.pt.ones_like()-np.cos(leadinglep.delta_phi(met_p4))))
selection.add('mt_lepmet', (mt_lepmet.flatten() < 80.))
selection.add('mt_lepmetInv', (mt_lepmet.flatten() >= 80.))
selection.add('noleptons', (nmuons == 0) & (nelectrons == 0) & (ntaus == 0) & (ngoodmuons == 0) & (ngoodelecs == 0))
selection.add('onemuon', (nmuons <= 1) & (nelectrons == 0) & (ntaus == 0) & (ngoodelecs == 0) & (ngoodmuons == 1))
selection.add('oneelec', (nmuons == 0) & (nelectrons <= 1) & (ntaus == 0) & (ngoodmuons == 0) & (ngoodelecs == 1))
selection.add('muonkin', (
(leadingmuon.pt > 25.)
& (abs(leadingmuon.eta) < 2.1)
).all())
selection.add('muonkinhard', (
(leadingmuon.pt > 60.)
& (abs(leadingmuon.eta) < 2.1)
).all())
selection.add('muonDphiAK8', (
abs(muon_ak8_pair.i0.delta_phi(muon_ak8_pair.i1)) > 2*np.pi/3
).all().all())
selection.add('eleckin', (
(leadingelec.pt > 25.)
& (abs(leadingelec.eta) < 2.4)
).all())
selection.add('eleckinhard', (
(leadingelec.pt > 60.)
& (abs(leadingelec.eta) < 2.4)
).all())
selection.add('elecDphiAK8', (
abs(elec_ak8_pair.i0.delta_phi(elec_ak8_pair.i1)) > 2*np.pi/3
).all().all())
lep_ak8_pair = leadinglep.cross(candidatejet)#, nested=True)
selection.add('lepDrAK8', (
(lep_ak8_pair.i0.delta_r(lep_ak8_pair.i1) < 0.8).all()
#(lep_ak8_pair.i0.delta_r(lep_ak8_pair.i1) < 99.0).all()
))
#selection.add('jetlsf', (
# (candidatejet.lsf3 > 0.7).any()
#))
selection.add('miniIso', (
(leadinglep_miso < 0.1).any()
))
selection.add('miniIsoInv', (
(leadinglep_miso >= 0.1).any()
))
jet_lep_p4 = lep_ak8_pair.i0 + lep_ak8_pair.i1
met_jl_pair = met_p4.cross(jet_lep_p4)#, nested=True)
jet_lep_met_p4 = met_jl_pair.i0 + met_jl_pair.i1
jet_met_p4 = ak8_met_pair.i0[best_jet_idx] + ak8_met_pair.i1[best_jet_idx]
if isRealData:
genflavor = candidatejet.pt.zeros_like()
w_hadhad = deepcopy(weights)
w_hadel = deepcopy(weights)
w_hadmu = deepcopy(weights)
genHTauTauDecay = candidatejet.pt.zeros_like()
genHadTau1Decay = candidatejet.pt.zeros_like()
genHadTau2Decay = candidatejet.pt.zeros_like()
genHadTau2Decay = candidatejet.pt.zeros_like()
gentautaudecay = candidatejet.pt.zeros_like()
else:
weights.add('genweight', events.genWeight)
add_pileup_weight(weights, events.Pileup.nPU, self._year, dataset)
bosons = getBosons(events)
genBosonPt = bosons.pt.pad(1, clip=True).fillna(0)
add_VJets_NLOkFactor(weights, genBosonPt, self._year, dataset)
genflavor = matchedBosonFlavor(candidatejet, bosons)
genHTauTauDecay, genHadTau1Decay, genHadTau2Decay = getHTauTauDecayInfo(events)
gentautaudecay = awkward.JaggedArray.fromiter([[v] for v in genHTauTauDecay])
w_hadhad = deepcopy(weights)
w_hadel = deepcopy(weights)
w_hadmu = deepcopy(weights)
#add_TriggerWeight(w_hadhad, candidatejet.msdcorr, candidatejet.pt, leadinglep.pt, self._year, "hadhad")
#add_TriggerWeight(w_hadel, candidatejet.msdcorr, candidatejet.pt, leadinglep.pt, self._year, "hadel")
#add_TriggerWeight(w_hadmu, candidatejet.msdcorr, candidatejet.pt, leadinglep.pt, self._year, "hadmu")
#output['btagWeight'].fill(dataset=dataset, val=self._btagSF.addBtagWeight(weights, ak4_away)) #FIXME
regions = {
'hadhad_signal': ['jetacceptance450', 'hadhad_trigger', 'jetid', 'antiak4btagMediumOppHem', 'met', 'noleptons', 'antiLepId'],
'hadhad_cr_mu': ['jetacceptance400', 'hadmu_trigger', 'jetid', 'ak4btagMedium08', 'met', 'onemuon', 'muonkinhard', 'muonDphiAK8','antiLepId'],#,'jetlsf'],
'hadmu_signal': ['jetacceptance', 'hadmu_trigger', 'jetid', 'antiak4btagMediumOppHem', 'met', 'onemuon', 'muonkin', 'lepDrAK8', 'antiLepId', 'mt_lepmet', 'miniIso'],#, 'jetlsf'],
'hadel_signal': ['jetacceptance', 'hadel_trigger', 'jetid', 'antiak4btagMediumOppHem', 'met', 'oneelec', 'eleckin', 'lepDrAK8', 'antiLepId', 'mt_lepmet', 'miniIso'],#, 'jetlsf'],
'hadmu_cr_qcd': ['jetacceptance', 'hadmu_trigger', 'jetid', 'antiak4btagMediumOppHem', 'met', 'onemuon', 'muonkin', 'lepDrAK8', 'antiLepId', 'mt_lepmet', 'miniIsoInv'],#,'jetlsf'],
'hadel_cr_qcd': ['jetacceptance', 'hadel_trigger', 'jetid', 'antiak4btagMediumOppHem', 'met', 'oneelec', 'eleckin', 'lepDrAK8', 'antiLepId', 'mt_lepmet', 'miniIsoInv'],#,'jetlsf'],
'hadmu_cr_b': ['jetacceptance', 'hadmu_trigger', 'jetid', 'ak4btagMedium08', 'met', 'onemuon', 'muonkin', 'lepDrAK8', 'antiLepId', 'mt_lepmet', 'miniIso'],#,'jetlsf'],
'hadel_cr_b': ['jetacceptance', 'hadel_trigger', 'jetid', 'ak4btagMedium08', 'met', 'oneelec', 'eleckin', 'lepDrAK8', 'antiLepId', 'mt_lepmet', 'miniIso'],#,'jetlsf'],
'hadmu_cr_w': ['jetacceptance', 'hadmu_trigger', 'jetid', 'antiak4btagMediumOppHem', 'met', 'onemuon', 'muonkin', 'lepDrAK8', 'antiLepId', 'mt_lepmetInv', 'miniIso'],#,'jetlsf'],
'hadel_cr_w': ['jetacceptance', 'hadel_trigger', 'jetid', 'antiak4btagMediumOppHem', 'met', 'oneelec', 'eleckin', 'lepDrAK8', 'antiLepId', 'mt_lepmetInv', 'miniIso'],#,'jetlsf'],
#'noselection': [],
}
w_dict = {
'hadhad_signal': w_hadhad,
'hadhad_cr_mu': w_hadmu,
'hadmu_signal': w_hadmu,
'hadel_signal': w_hadel,
'hadmu_cr_qcd': w_hadmu,
'hadel_cr_qcd': w_hadel,
'hadmu_cr_b': w_hadmu,
'hadel_cr_b': w_hadel,
'hadmu_cr_w': w_hadmu,
'hadel_cr_w': w_hadel,
}
allcuts_hadel = set()
allcuts_hadmu = set()
allcuts_hadel_cr_b = set()
allcuts_hadmu_cr_b = set()
allcuts_hadel_cr_w = set()
allcuts_hadmu_cr_w = set()
allcuts_hadel_cr_qcd = set()
allcuts_hadmu_cr_qcd = set()
allcuts_hadhad = set()
allcuts_hadhad_cr_mu = set()
output['cutflow_hadel'][dataset]['none'] += float(w_dict['hadel_signal'].weight().sum())
output['cutflow_hadmu'][dataset]['none'] += float(w_dict['hadmu_signal'].weight().sum())
output['cutflow_hadel_cr_b'][dataset]['none'] += float(w_dict['hadel_cr_b'].weight().sum())
output['cutflow_hadmu_cr_b'][dataset]['none'] += float(w_dict['hadmu_cr_b'].weight().sum())
output['cutflow_hadel_cr_w'][dataset]['none'] += float(w_dict['hadel_cr_w'].weight().sum())
output['cutflow_hadmu_cr_w'][dataset]['none'] += float(w_dict['hadmu_cr_w'].weight().sum())
output['cutflow_hadel_cr_qcd'][dataset]['none'] += float(w_dict['hadel_cr_qcd'].weight().sum())
output['cutflow_hadmu_cr_qcd'][dataset]['none'] += float(w_dict['hadmu_cr_qcd'].weight().sum())
output['cutflow_hadhad'][dataset]['none'] += float(w_dict['hadhad_signal'].weight().sum())
output['cutflow_hadhad_cr_mu'][dataset]['none'] += float(w_dict['hadhad_cr_mu'].weight().sum())
for cut in regions['hadel_signal']:
allcuts_hadel.add(cut)
output['cutflow_hadel'][dataset][cut] += float(w_dict['hadel_signal'].weight()[selection.all(*allcuts_hadel)].sum())
for cut in regions['hadmu_signal']:
allcuts_hadmu.add(cut)
output['cutflow_hadmu'][dataset][cut] += float(w_dict['hadmu_signal'].weight()[selection.all(*allcuts_hadmu)].sum())
for cut in regions['hadel_cr_b']:
allcuts_hadel_cr_b.add(cut)
output['cutflow_hadel_cr_b'][dataset][cut] += float(w_dict['hadel_cr_b'].weight()[selection.all(*allcuts_hadel_cr_b)].sum())
for cut in regions['hadmu_cr_b']:
allcuts_hadmu_cr_b.add(cut)
output['cutflow_hadmu_cr_b'][dataset][cut] += float(w_dict['hadmu_cr_b'].weight()[selection.all(*allcuts_hadmu_cr_b)].sum())
for cut in regions['hadel_cr_w']:
allcuts_hadel_cr_w.add(cut)
output['cutflow_hadel_cr_w'][dataset][cut] += float(w_dict['hadel_cr_w'].weight()[selection.all(*allcuts_hadel_cr_w)].sum())
for cut in regions['hadmu_cr_w']:
allcuts_hadmu_cr_w.add(cut)
output['cutflow_hadmu_cr_w'][dataset][cut] += float(w_dict['hadmu_cr_w'].weight()[selection.all(*allcuts_hadmu_cr_w)].sum())
for cut in regions['hadel_cr_qcd']:
allcuts_hadel_cr_qcd.add(cut)
output['cutflow_hadel_cr_qcd'][dataset][cut] += float(w_dict['hadel_cr_qcd'].weight()[selection.all(*allcuts_hadel_cr_qcd)].sum())
for cut in regions['hadmu_cr_qcd']:
allcuts_hadmu_cr_qcd.add(cut)
output['cutflow_hadmu_cr_qcd'][dataset][cut] += float(w_dict['hadmu_cr_qcd'].weight()[selection.all(*allcuts_hadmu_cr_qcd)].sum())
for cut in regions['hadhad_signal']:
allcuts_hadhad.add(cut)
output['cutflow_hadhad'][dataset][cut] += float(w_dict['hadhad_signal'].weight()[selection.all(*allcuts_hadhad)].sum())
for cut in regions['hadhad_cr_mu']:
allcuts_hadhad_cr_mu.add(cut)
output['cutflow_hadhad_cr_mu'][dataset][cut] += float(w_dict['hadhad_cr_mu'].weight()[selection.all(*allcuts_hadhad_cr_mu)].sum())
systematics = [
None,
#'jet_triggerUp',
#'jet_triggerDown',
#'btagWeightUp',
#'btagWeightDown',
#'btagEffStatUp',
#'btagEffStatDown',
]
def fill(region, systematic, wmod=None):
selections = regions[region]
cut = selection.all(*selections)
sname = 'nominal' if systematic is None else systematic
if wmod is None:
weight = w_dict[region].weight(modifier=systematic)[cut]
else:
weight = w_dict[region].weight()[cut] * wmod[cut]
def normalize(val):
return val[cut].pad(1, clip=True).fillna(0).flatten()
if 'hadhad' in region:
nn_disc = nn_disc_hadhad
if 'hadel' in region:
nn_disc = nn_disc_hadel
if 'hadmu' in region:
nn_disc = nn_disc_hadmu
output['jet_kin'].fill(
dataset=dataset,
region=region,
jet_pt=normalize(candidatejet.pt),
jet_eta=normalize(candidatejet.eta),
jet_msd=normalize(candidatejet.msdcorr),
weight=weight,
)
bmaxind = ak4_opposite.btagDeepB.argmax()
output['b_kin'].fill(
dataset=dataset,
region=region,
jet_pt=normalize(candidatejet.pt),
oppbjet_pt=normalize(ak4_opposite[bmaxind].pt),
oppbtag=normalize(ak4_opposite[bmaxind].btagDeepB),
weight=weight,
)
output['lep_kin'].fill(
dataset=dataset,
region=region,
lep_pt=normalize(leadinglep.pt),
#lep_eta=normalize(leadinglep.eta),
#lsf3=normalize(candidatejet.lsf3),
lep_jet_dr=normalize(lep_ak8_pair.i0.delta_r(lep_ak8_pair.i1)),
miso=normalize(leadinglep_miso),
weight=weight,
)
output['mass_kin'].fill(
dataset=dataset,
region=region,
jet_pt=normalize(candidatejet.pt),
jet_msd=normalize(candidatejet.msdcorr),
genhtt=normalize(gentautaudecay),
#jetlep_m=normalize(jet_lep_p4.mass),
#jetmet_m=normalize(jet_met_p4.mass),
#jetlepmet_m=normalize(jet_lep_met_p4.mass),
weight=weight,
)
output['evt_kin'].fill(
dataset=dataset,
region=region,
met_pt=normalize(met_p4.pt),
lep_pt=normalize(leadinglep.pt),
jet_pt=normalize(candidatejet.pt),
#h_pt=normalize(bosons[events.GenPart.pdgId==25].pt),
weight=weight,
)
for region in regions:
for systematic in systematics:
fill(region, systematic)
# if 'GluGluHToTauTau' in dataset:
# for i in range(9):
# fill(region, 'LHEScale_%d' % i, events.LHEScaleWeight[:, i])
# for c in events.LHEWeight.columns[1:]:
# fill(region, 'LHEWeight_%s' % c, events.LHEWeight[c])
return output
def postprocess(self, accumulator):
return accumulator
| [
"[email protected]"
] | |
eab876d87c22914ef3b369338b868419b7af5f42 | cb94a4cdd7a9df17f9c6f1a03f8f4ff12c916cf3 | /Learning_Python_Generators/Exercise_Files/Ch3/03_04/coroutine_decorator.py | a55630fb3f470ad6df497cd864cbf4893c44f0e7 | [] | no_license | sedstan/LinkedIn-Learning-Python-Course | 2b936d0f00703a6e66a872220ed47572123dc7fd | b4584218355bf07aa3d2939b950911eae67adb0b | refs/heads/master | 2021-10-11T10:19:13.675662 | 2019-01-24T17:55:20 | 2019-01-24T17:55:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py |
def coroutine_decorator(func):
def wrap(*args, **kwargs):
cr = func(*args, **kwargs)
cr.next()
return cr
return wrap
@coroutine_decorator
def coroutine_example():
while True:
x = yield
#do something with x
print (x)
| [
"[email protected]"
] | |
f87cd3733e9279862ac119d90af6bf4ea02c10ae | 627cca9406c31ce30c493ff7502f79eb4c57eee3 | /xcha/wallet/lineage_proof.py | ca16b4fed04cad1847319ac28818c391c418eb37 | [
"Apache-2.0"
] | permissive | blockchiansea/xcha-blockchain | 40c6d36813f671e94316a522904238f495f39f6b | 7de0ba89056236e30069aef12fe25843f6093bcf | refs/heads/master | 2023-07-26T02:36:57.654196 | 2021-09-06T06:04:21 | 2021-09-06T06:04:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from dataclasses import dataclass
from typing import Optional
from xcha.types.blockchain_format.sized_bytes import bytes32
from xcha.util.ints import uint64
from xcha.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class LineageProof(Streamable):
parent_name: bytes32
inner_puzzle_hash: Optional[bytes32]
amount: uint64
| [
"[email protected]"
] | |
71dc32b622749cfff8398ad2dde76627857ea6a3 | 17e3234ab01fd93233cc453f1495d50424c3bd8f | /latte/dashboard/doctype/dashboard_data_slice/__init__.py | b3c7ec22aa81e9680d69eb7ddacef402ae0c4349 | [
"MIT"
] | permissive | sunnyakaxd/latte | 8943dbf70ce934e04e51b147a54e6dd02dfe43db | de74065122a1f858bd75f8e1a36fca3b23981f4c | refs/heads/master | 2023-06-11T10:25:31.217047 | 2021-07-06T06:40:19 | 2021-07-06T06:40:19 | 383,363,137 | 0 | 0 | NOASSERTION | 2021-07-06T06:26:49 | 2021-07-06T06:26:49 | null | UTF-8 | Python | false | false | 1,568 | py | import frappe
import jwt
import time
from latte.utils.caching import cache_me_if_you_can
@frappe.whitelist()
def run(slice_name=None, data_source_name=None, filters=None):
return run_cached(slice_name, data_source_name, filters)
# @cache_me_if_you_can(expiry=20)
def run_cached(slice_name=None, data_source_name=None, filters=None):
if not slice_name:
frappe.throw('Dashboard Name Required')
dataslice_doc = frappe.get_doc('Dashboard Data Slice', slice_name)
response, status = dataslice_doc.execute(data_source_name, filters)
return frappe._dict({
'response': response,
'status': status
})
@frappe.whitelist()
def get_metabase_url(name, resource_type, metabase_site_url=None, metabase_secret_key=None):
if frappe.conf.metabase_site_url:
metabase_site_url = frappe.conf.metabase_site_url
if frappe.conf.metabase_secret_key:
metabase_secret_key = frappe.conf.metabase_secret_key
payload = {
'resource': {resource_type: int(name)},
'params': {},
'exp': round(time.time()) + (60 * 100) # 100 minute expiration
}
token = jwt.encode(payload, metabase_secret_key, algorithm='HS256')
iframeUrl = metabase_site_url + '/embed/'+ resource_type +'/' + token.decode('utf8') + '#bordered=true&titled=false'
return iframeUrl
@frappe.whitelist()
def save_chart_config(data_slice, config):
data_slice_doc = frappe.get_doc("Dashboard Data Slice", data_slice)
data_slice_doc.chart_default_config = config
data_slice_doc.save()
return "Success" | [
"[email protected]"
] | |
77ee9b20a0817d31a073534a684b32b631dcca13 | e41651d8f9b5d260b800136672c70cb85c3b80ff | /Notification_System/temboo/Library/Google/Gmail/InboxFeed.py | 8764414118792dd2115f21eea4921bec6e6e5563 | [] | no_license | shriswissfed/GPS-tracking-system | 43e667fe3d00aa8e65e86d50a4f776fcb06e8c5c | 1c5e90a483386bd2e5c5f48f7c5b306cd5f17965 | refs/heads/master | 2020-05-23T03:06:46.484473 | 2018-10-03T08:50:00 | 2018-10-03T08:50:00 | 55,578,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,883 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# InboxFeed
# Allows you to access a read-only Gmail feed that contains a list of unread emails.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class InboxFeed(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the InboxFeed Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(InboxFeed, self).__init__(temboo_session, '/Library/Google/Gmail/InboxFeed')
def new_input_set(self):
return InboxFeedInputSet()
def _make_result_set(self, result, path):
return InboxFeedResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return InboxFeedChoreographyExecution(session, exec_id, path)
class InboxFeedInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the InboxFeed
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid Access Token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new Access Token.)
"""
super(InboxFeedInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(InboxFeedInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(InboxFeedInputSet, self)._set_input('ClientSecret', value)
def set_Label(self, value):
"""
Set the value of the Label input for this Choreo. ((optional, string) The name of a Gmail Label to retrieve messages from (e.g., important, starred, sent, junk-e-mail, all).)
"""
super(InboxFeedInputSet, self)._set_input('Label', value)
def set_Mode(self, value):
"""
Set the value of the Mode input for this Choreo. ((optional, string) Used when an XPath query is provided. Valid values are "select" or "recursive". Select mode will return the first match of the query. In recursive mode, the XPath query will be applied within a loop.)
"""
super(InboxFeedInputSet, self)._set_input('Mode', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((optional, password) A Google App-specific password that you've generated after enabling 2-Step Verification (Note: authenticating with OAuth credentials is the preferred authentication method).)
"""
super(InboxFeedInputSet, self)._set_input('Password', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth Refresh Token used to generate a new Access Token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(InboxFeedInputSet, self)._set_input('RefreshToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format for the response. Valid values are JSON and XML. This will be ignored when providng an XPath query because results are returned as a string or JSON depending on the Mode specified.)
"""
super(InboxFeedInputSet, self)._set_input('ResponseFormat', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((optional, string) Your full Google email address e.g., [email protected] (Note: authenticating with OAuth credentials is the preferred authentication method).)
"""
super(InboxFeedInputSet, self)._set_input('Username', value)
def set_XPath(self, value):
"""
Set the value of the XPath input for this Choreo. ((optional, string) An XPATH query to run.)
"""
super(InboxFeedInputSet, self)._set_input('XPath', value)
class InboxFeedResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the InboxFeed Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_FullCount(self):
"""
Retrieve the value for the "FullCount" output from this Choreo execution. ((integer) The number of unread messages. This is parsed from the Google XML response. Note that when using the Label input to retrieve messages from a particular Gmail label, the full count element may be 0.)
"""
return self._output.get('FullCount', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Google. This will contain the data from the Gmail feed, or if the XPath input is provided, it will contain the result of the XPath query.)
"""
return self._output.get('Response', None)
class InboxFeedChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return InboxFeedResultSet(response, path)
| [
"[email protected]"
] | |
97d98121e3aad07d0cc73ab82f9883743bfc3be7 | 556db265723b0cc30ad2917442ed6dad92fd9044 | /tensorflow/python/ops/ragged/ragged_print_op_test.py | 2b612d463d0eb70eebb13c4486f0d5d159173771 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | graphcore/tensorflow | c1669b489be0e045b3ec856b311b3139858de196 | 085b20a4b6287eff8c0b792425d52422ab8cbab3 | refs/heads/r2.6/sdk-release-3.2 | 2023-07-06T06:23:53.857743 | 2023-03-14T13:04:04 | 2023-03-14T13:48:43 | 162,717,602 | 84 | 17 | Apache-2.0 | 2023-03-25T01:13:37 | 2018-12-21T13:30:38 | C++ | UTF-8 | Python | false | false | 7,777 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.print with ragged tensors.
Note: ragged support for tf.print is implemented by RaggedPrintV2Dispatcher in
ragged_dispatch.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tempfile
from absl.testing import parameterized
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedPrintV2Test(test_util.TensorFlowTestCase, parameterized.TestCase):
# pylint: disable=g-long-lambda
@parameterized.named_parameters([
dict(
testcase_name='2d_int_values',
inputs=lambda: [ragged_factory_ops.constant([[1, 2], [3]])],
expected='[[1, 2], [3]]\n'),
dict(
testcase_name='3d_int_values',
inputs=lambda: [ragged_factory_ops.constant([[[1, 2], [3]], [[4]]])],
expected='[[[1, 2], [3]], [[4]]]\n'),
dict(
testcase_name='2d_str_values',
inputs=lambda: [ragged_factory_ops.constant([['a', 'b'], ['c']])],
expected="[['a', 'b'], ['c']]\n"),
dict(
testcase_name='2d_str_values_with_escaping',
inputs=lambda: [ragged_factory_ops.constant([["a'b"], ['c"d']])],
expected="[['a\\'b'], ['c\"d']]\n"),
dict(
testcase_name='two_ragged_values',
inputs=lambda: [
ragged_factory_ops.constant([[1, 2], [3]]),
ragged_factory_ops.constant([[5], [], [6, 7, 8]])
],
expected='[[1, 2], [3]] [[5], [], [6, 7, 8]]\n'),
dict(
testcase_name='ragged_value_and_non_tensor_values',
inputs=lambda:
['a', 5, True,
ragged_factory_ops.constant([[1, 2], [3]]), 'c'],
expected='a 5 True [[1, 2], [3]] c\n'),
dict(
testcase_name='ragged_value_and_dense_value',
inputs=lambda: [
ragged_factory_ops.constant([[1, 2], [3]]),
constant_op.constant([[1, 2], [3, 4]])
],
expected='[[1, 2], [3]] [[1 2]\n [3 4]]\n'),
dict(
testcase_name='ragged_value_and_sparse_value',
inputs=lambda: [
ragged_factory_ops.constant([[1, 2], [3]]),
sparse_ops.from_dense([[1]])
],
expected=(
'[[1, 2], [3]] '
"'SparseTensor(indices=[[0 0]], values=[1], shape=[1 1])'\n")),
dict(
testcase_name='summarize_default',
inputs=lambda: [
ragged_factory_ops.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9], [10], [
], [], [], [], [11, 12]])
],
expected=('[[1, 2, 3, ..., 7, 8, 9], [10], [], '
'..., '
'[], [], [11, 12]]\n')),
dict(
testcase_name='summarize_2',
inputs=lambda: [
ragged_factory_ops.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9], [10], [
], [], [], [], [11, 12]])
],
summarize=2,
expected='[[1, 2, ..., 8, 9], [10], ..., [], [11, 12]]\n'),
dict(
testcase_name='summarize_neg1',
inputs=lambda: [
ragged_factory_ops.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9], [10], [
], [], [], [], [11, 12]])
],
summarize=-1,
expected=('[[1, 2, 3, 4, 5, 6, 7, 8, 9], [10], '
'[], [], [], [], [11, 12]]\n')),
])
def testRaggedPrint(self, inputs, expected, summarize=None):
if callable(inputs):
inputs = inputs()
with tempfile.TemporaryDirectory() as tmpdirname:
path = os.path.join(tmpdirname, 'print_output')
kwargs = {'output_stream': 'file://{}'.format(path)}
if summarize is not None:
kwargs.update(summarize=summarize)
self.evaluate(logging_ops.print_v2(*inputs, **kwargs))
actual = open(path, 'r').read()
self.assertEqual(repr(actual), repr(expected))
@test_util.run_all_in_graph_and_eager_modes
class RaggedToStringTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters([
('2d_int', [[1, 2], [], [3, 4, 5]], '[[1, 2], [], [3, 4, 5]]'),
('2d_str', [['a'], ['b'], ['c', 'd']], "[['a'], ['b'], ['c', 'd']]"),
('3d_int', [[[1, 2], []], [[3, 4, 5]]], '[[[1, 2], []], [[3, 4, 5]]]'),
('escape', [["a'b"], [r'c\d']], r"[['a\'b'], ['c\\d']]"),
dict(testcase_name='2d_empty', rt=[], ragged_rank=1, expected='[]'),
dict(testcase_name='3d_empty', rt=[], ragged_rank=2, expected='[]'),
dict(
testcase_name='3d_rrank1',
rt=[[[1, 2], [3, 4]], [], [[5, 6]]],
ragged_rank=1,
expected='[[[1, 2], [3, 4]], [], [[5, 6]]]'),
dict(
testcase_name='2d_empty_row', rt=[[]], ragged_rank=1,
expected='[[]]'),
dict(
testcase_name='3d_empty_row', rt=[[]], ragged_rank=2,
expected='[[]]'),
dict(
testcase_name='summarize_1',
rt=[[1, 2, 3, 4, 5], [], [6], [7], [8, 9]],
summarize=1,
expected='[[1, ..., 5], ..., [8, 9]]'),
dict(
testcase_name='summarize_2',
rt=[[1, 2, 3, 4, 5], [], [6], [7], [8, 9]],
summarize=2,
expected='[[1, 2, ..., 4, 5], [], ..., [7], [8, 9]]'),
])
def testRaggedToString(self, rt, expected, summarize=None, ragged_rank=None):
rt = ragged_factory_ops.constant(rt, ragged_rank=ragged_rank)
actual = ragged_string_ops.ragged_tensor_to_string(rt, summarize=summarize)
self.assertAllEqual(actual, expected)
@parameterized.named_parameters([
('maxelts_BadType', [[1]], "Expected summarize .*, got 'foo'", 'foo'),
('maxelts_0', [[1]], 'Expected summarize to be .*, got 0', 0),
('maxelts_Neg2', [[1]], 'Expected summarize to be .*, got -2', -2),
])
def testRaggedToStringErrors(self,
rt,
error,
summarize=None,
exception=ValueError):
rt = ragged_factory_ops.constant(rt)
with self.assertRaisesRegex(exception, error):
self.evaluate(
ragged_string_ops.ragged_tensor_to_string(rt, summarize=summarize))
def testRaggedToStringUnknownRank(self):
@def_function.function(
input_signature=[ragged_tensor.RaggedTensorSpec(ragged_rank=1)])
def f(rt):
return ragged_string_ops.ragged_tensor_to_string(rt)
with self.assertRaisesRegex(
ValueError, 'RaggedTensor to_string requires '
'that rt.shape.rank is not None'):
f(ragged_factory_ops.constant([[1, 2], [3]]))
if __name__ == '__main__':
googletest.main()
| [
"[email protected]"
] | |
723eeca76900ebe35f0b732286e7bd9845b4ffac | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/powerbi/v20200601/__init__.py | ce0d971ca28040740542ec1f335f01eed4ab4889 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .get_private_endpoint_connection import *
from .power_bi_resource import *
from .private_endpoint_connection import *
from ._inputs import *
from . import outputs
| [
"[email protected]"
] | |
fe1d3e30f4cdfd8651a3f9176b4620a6452e5d01 | 4a9995871447a406a7e6307a030503700cd41226 | /script/testCase/Y3me项目/人力资源/薪资核算/审批流_定调薪.py | 6a0c29d1fa8b09c17d7ae30c81a1f876d8ab676d | [] | no_license | juntaoh1234/12122003 | 96a107ce22d930e8d9517810736d8f6ce92dc7ad | 4bee39286c3708d7a0df3001e0daa9da51478170 | refs/heads/master | 2020-10-01T18:20:01.572599 | 2019-12-12T12:04:08 | 2019-12-12T12:04:08 | 227,596,967 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,960 | py | # -*-CodeIng:utf-8 -*-
# @time :2019/10/31 20:02
# @author:HuangJunTao
# @email:[email protected]
# @file:审批流_薪资.py
# @SoftWare:PyCharm
from time import sleep
from SRC.common.decorator import codeException_dec
from SRC.unittest.case import TestCase
from SRC.webdriver.switchTo import SwitchTo
from script.common import utils
from selenium.webdriver import ActionChains
class EasyCase(TestCase):
str_regular = None
def __init__(self, webDriver, paramsList):
# 请不要修改该方法124421
super(EasyCase, self).__init__(webDriver, paramsList)
@codeException_dec('3')
def runTest(self):
driver = self.getDriver()
driver.implicitly_wait(30)
param = self.param
tool = utils
driver.refresh()
# driver.find_element_by_xpath('//*[@id="_dropdown_popcontainer"]/div/i').click()
# driver.find_element_by_xpath('//*[@id="home_header"]//div[text()="UI测试专属"]').click()
# driver.find_element_by_xpath(
# '//button[@class="u-button btn__style___37bsb u8c_primary__style___RFibc btn__style___20DQM "]').click()
# sleep(2)
# 左上方公共节点
driver.find_element_by_class_name('lebra-navbar-left-icon').click()
sleep(2)
# #进入社交协同
# driver.find_element_by_xpath('//*[text()="数字化建模"]').click()
# sleep(2)
# 进入一级节点
menu2 = driver.find_element_by_css_selector('span[title="流程管理"]')
actions = ActionChains(driver)
actions.move_to_element(menu2)
actions.click(menu2)
actions.perform()
sleep(1)
# 进入二级节点
driver.find_element_by_xpath('//li[@title="模型管理"]').click()
sleep(1)
# 跳转模型管理iframe
iframe = driver.find_element_by_id('XTLCZX0006')
# driver.switch_to.frame(iframe)
SwitchTo(driver).frame(iframe)
sleep(1)
# 点击左侧树按钮
driver.find_element_by_xpath('//*[@id="app"]//span[text()="人力资源"]').click()
sleep(1)
# 点击左侧树按钮
driver.find_element_by_xpath('//*[@id="app"]//span[text()="薪资核算"]').click()
sleep(1)
wins0 = driver.window_handles
# 选中请假
driver.find_element_by_xpath('//*[@id="app"]//span[text()="定调薪"]').click()
# 点击新增按钮
driver.find_element_by_xpath('//div[@class="btns-wrapper"]//button[1]').click()
sleep(2)
# 输入流程名称
driver.find_element_by_xpath('//label[text()="名称"]/following-sibling::div//input').send_keys('定调薪流程')
# 备注信息
driver.find_element_by_xpath('//textarea').send_keys("备注信息0002")
# 点击确定按钮
driver.find_element_by_xpath(
'//span[text()="新增流程模型"]/ancestor::div[@class="el-dialog__wrapper"]//button[2]').click()
sleep(1)
# 断言
self.assertEqual("创建成功", driver.find_element_by_xpath('//p[text()="创建成功"]').text)
# 点击设计按钮
driver.find_element_by_xpath(
'//*[@id="app"]//table[@class="el-table__body"]/tbody/tr/td[4]/div/span[2]').click()
# 跳转新页面
wins = driver.window_handles
driver.switch_to_window(wins[-1])
sleep(2)
# 双击主管审批
# driver.find_element_by_xpath('//*[@id="app"]/div/div/div/div[1]/div[2]/button[2]').click()
# driver.find_element_by_xpath('//span[@title="主管审批"]').doubleClick()
driver.find_element_by_xpath('//div[@id="designer"]//div/span[1]/span').doubleClick()
sleep(2)
# 输入节点名称
driver.find_element_by_xpath('//input[@placeholder="请输入流程环节名称"]').send_keys("提交")
# 移动滚动条
action = ActionChains(driver)
ele = driver.find_element_by_xpath(
'//*[@id="app"]/div/div/div/div[4]/div/div/div[2]/div[2]/div[1]/div/div[4]/div')
action.drag_and_drop_by_offset(ele, 1, 110)
action.perform()
sleep(1)
# 点击审批流的发起人
driver.find_element_by_xpath(
'//*[@id="app"]/div/div/div/div[4]/div/div/div[2]/div[2]/div[1]/div/div[2]/div[2]/div[2]/label[1]/span[2]').click()
# 点击保存
driver.find_element_by_xpath('//button[@class="yy-btn yy-btn-primary"]').click()
# 点击保存并发布
driver.find_element_by_xpath('//button[@class="right-run yy-btn yy-btn-primary"]').click()
driver.close()
# 跳转回原来的页面
win1 = driver.window_handles
driver.switch_to_window(win1[0])
# 关闭当前页面
sleep(2)
driver.switch_to.default_content()
sleep(1)
driver.find_element_by_xpath('//*[@id="home_header"]/div/div[3]/ul/li/div').click() | [
"[email protected]"
] | |
dba9081536b727de5f6fa261fcca19e44c3acbc6 | 5b4312ddc24f29538dce0444b7be81e17191c005 | /autoware.ai/1.12.0/devel/lib/python2.7/dist-packages/vector_map_msgs/msg/_LaneArray.py | 75571a42fa4d6bf0fb1530dd40fb1e960b288b9f | [
"MIT"
] | permissive | muyangren907/autoware | b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2 | 5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38 | refs/heads/master | 2020-09-22T13:08:14.237380 | 2019-12-03T07:12:49 | 2019-12-03T07:12:49 | 225,167,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | /home/myr907/autoware.ai/devel/.private/vector_map_msgs/lib/python2.7/dist-packages/vector_map_msgs/msg/_LaneArray.py | [
"[email protected]"
] | |
17a92b1f3e8481c39a88bfcce5206a41d042f85e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03003/s061738428.py | e296bd1653a5eb16609ce574bdaa3369516bc4d2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | #ABC130-E Common Subsequence
"""
問題:
整数列のリストが与えられる
空であるものも含めて、sとtの部分列として等しいものの個数を求めよ
解法:
LCSのmaxではなくsumのバージョンでかつ、
数え上げなので重複をなくさなければならない。
具体的には、
dp[i][j]:sをi文字目迄見た時のtをj文字目迄見た時に、
dp0:横方向(j方向)を優先して遷移した後に、縦方向(i方向)への遷移を行った時の合計
dp1:縦方向、横方向への遷移を行った時の合計
として、dp1[-1][-1]が答え。
"""
import sys
readline = sys.stdin.buffer.readline
def even(n): return 1 if n%2==0 else 0
n,m = map(int,readline().split())
s = list(map(int,readline().split())) + [-1]
t = list(map(int,readline().split())) + [-2]
mod = 10**9+7
#dp table
dp0 = [[0]*(m+2) for _ in range(n+2)]
dp1 = [[0]*(m+2) for _ in range(n+2)]
dp0[0][0] = 1
#process1
for i in range(n+1):
for j in range(m+1):
dp0[i+1][j] += dp0[i][j]%mod
dp1[i][j] += dp0[i][j]%mod
dp1[i][j+1] += dp1[i][j]%mod
if s[i] == t[j]:
dp0[i+1][j+1] += dp1[i][j]%mod
print(dp1[n][m]%mod)
| [
"[email protected]"
] | |
d6c44a3ffa12b15844a15eec93e1fb8552b1bdaa | c09decad4cb64b151913c25192eaa13348f63833 | /228_middle_of_linked_list.py | a6d7fca7e7cbd173247de187ff3f6bccce225a7d | [] | no_license | YI-DING/Lintcode | ef890e53282b9668064a7306329ecd0599dd114b | 89a4cf703eb7a79bd62b6cc41f87242489692b88 | refs/heads/master | 2020-06-17T12:57:44.184391 | 2019-07-19T16:31:32 | 2019-07-19T16:31:32 | 195,931,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | """
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: the head of linked list.
@return: a middle node of the linked list
"""
def middleNode(self, head):
if not head:
return None
slow = fast = ListNode(0)
slow.next = head
while fast:
if not fast.next:
break
slow = slow.next
fast = fast.next.next
return slow
| [
"[email protected]"
] | |
500fc4e6c97a72dc6703594efe00e2d77f4fdff6 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/1935.py | 793ff52e55241fc12bc0b977c6c0e8c244a5f7aa | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,812 | py | from itertools import groupby
import math
"""
I KNOW ITS SLOW BUT I WANTED TO TRY THIS SOLUTION ANYWAY
"""
FREE = False
OCCUPIED = True
def get_stall_value(stalls, stall_index):
left_free_space = right_free_space = 0
tmp_index = stall_index
while True:
tmp_index -= 1
if stalls[tmp_index] == OCCUPIED:
break
left_free_space += 1
tmp_index = stall_index
while True:
tmp_index += 1
if stalls[tmp_index] == OCCUPIED:
break
right_free_space += 1
return left_free_space, right_free_space
def go_into_next_stall(stalls):
final_index = 0
grouped = groupby(stalls)
max_len = 0
for key, group in groupby(stalls):
if key == FREE:
max_len = max(max_len, len(list(group)))
for key, group in grouped:
group = list(group)
group_len = len(group)
if key == OCCUPIED or group_len != max_len:
final_index += group_len
else:
final_index += int((group_len - 1) / 2)
l_val, r_val = math.ceil((group_len - 1) / 2), math.floor((group_len - 1) / 2)
break
stalls[final_index] = OCCUPIED
return l_val, r_val
def get_values(nbr_stalls, nbr_people):
stalls = [FREE] * nbr_stalls
stalls = [OCCUPIED] + stalls + [OCCUPIED]
for people in range(nbr_people):
l_val, r_val = go_into_next_stall(stalls)
return l_val, r_val
def main():
nbr_rows = int(input())
for nbr_row in range(1, nbr_rows + 1):
nbr_stalls, nbr_people = map(int, input().split())
l_val, r_val = get_values(nbr_stalls, nbr_people)
print("Case #{nbr_rows}: {l_val} {r_val}".format(
nbr_rows=nbr_row, l_val=l_val, r_val=r_val))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
aa358d4290d3d085e65216cf41db3ad6bfd413da | 3888104cebd79de74f33dda628505b491e32be09 | /lcs4.py | f5845eba44ced2722b53996fef018e3d81623f78 | [] | no_license | babiswas/Dynamic-Programming | 788f7c35aa927228a728da6025657554487285f5 | 957e150577fd5bbccde33cb393c78dcad07860c1 | refs/heads/master | 2022-12-24T05:24:03.282098 | 2020-09-27T14:55:05 | 2020-09-27T14:55:05 | 299,054,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | def lcs(str1,str2,m,n,T):
for i in range(m+1):
for j in range(n+1):
if i==0 or j==0:
T[i][j]=0
for i in range(1,m+1):
for j in range(1,n+1):
if str1[i-1]==str2[j-1]:
T[i][j]=1+T[i-1][j-1]
elif str1[i-1]!=str2[j-1]:
T[i][j]=max(T[i-1][j],T[i][j-1])
return T[m][n]
def lcs_util(str1,str2,m,n):
T=[[-1 for i in range(n+1)] for j in range(m+1)]
return lcs(str1,str2,m,n,T)
if __name__=="__main__":
print(lcs_util("abcdgh","abedfhr",len("abcdgh"),len("abedfhr")))
| [
"[email protected]"
] | |
67f9eb131a4fe209142b2e9cde4c78e0d5898318 | f0cddf6fb1b58f4e80e169eda4897a3ab864cd48 | /login/app.py | aee1a989b6b7a52299d82633094ed169ca07511c | [] | no_license | skrstv123/LEARNING-FLASK | 8a3134bf2198051601a2ff8f92df8cd2a2ed7b90 | 2d3912fd251b763deb5f7f7468d9a5e79bf7ef4f | refs/heads/master | 2022-12-10T19:06:28.623200 | 2020-01-19T20:08:28 | 2020-01-19T20:08:28 | 229,042,034 | 0 | 0 | null | 2022-12-08T03:27:21 | 2019-12-19T11:38:17 | Python | UTF-8 | Python | false | false | 2,279 | py | from myproject import app,db
from flask import render_template, redirect, request, url_for, flash,abort
from flask_login import login_user,login_required,logout_user
from myproject.models import User
from myproject.forms import LoginForm, RegistrationForm
from werkzeug.security import generate_password_hash, check_password_hash
@app.route('/')
def home():
return render_template('home.html')
@app.route('/welcome')
@login_required
def welcome_user():
return render_template('welcome_user.html')
@app.route('/logout')
@login_required
def logout():
logout_user()
flash('You logged out!')
return redirect(url_for('home'))
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
# Grab the user from our User Models table
user = User.query.filter_by(email=form.email.data).first()
# Check that the user was supplied and the password is right
# The verify_password method comes from the User object
# https://stackoverflow.com/questions/2209755/python-operation-vs-is-not
if user is not None and user.check_password(form.password.data):
#Log in the user
login_user(user)
flash('Logged in successfully.')
# If a user was trying to visit a page that requires a login
# flask saves that URL as 'next'.
next = request.args.get('next')
# So let's now check if that next exists, otherwise we'll go to
# the welcome page.
if next == None or not next[0]=='/':
next = url_for('welcome_user')
return redirect(next)
return render_template('login.html', form=form)
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
flash('Thanks for registering! Now you can login!')
return redirect(url_for('login'))
return render_template('register.html', form=form)
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
0ae899d23ae015fa404ce12fddaeb90360443dcc | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_374/ch26_2020_03_23_11_49_24_338094.py | cc35e8b2f881012697174064d14183bc52630ce9 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | val = int(input("Digite o valor da casa "))
sal = int(input("Digite o valor do seu salário"))
ano = int(input("Digite o tempo em anos que pretende pagar "))
calculo = (val/ano)
if calculo <= 0.3*sal:
print("Empréstimo não aprovado")
else:
print("Empréstimo aprovado") | [
"[email protected]"
] | |
d194acc581ca1a2dabbfb09e565826189cda4fbc | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/tensorflow 2/python/ops/array_ops.py | 752790e486e2c8177d53c2e0e801774b2ff01fb4 | [] | no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | version https://git-lfs.github.com/spec/v1
oid sha256:c3fab6161a6ca581784be67954e7b9c2792e559bfc3a79286f79410909df8ec9
size 225087
| [
"[email protected]"
] | |
0eec7a8cf3a3e4a155feae3b08a5c930173d74bf | 9452f681ea486fc53ad88d05392aed5fc450805c | /data_language_all/python/python_420.txt | f69fb14b5ec0f087ef9410f16cbb9d2d0193f595 | [] | no_license | CoryCollins/src-class | 11a6df24f4bd150f6db96ad848d7bfcac152a695 | f08a2dd917f740e05864f51ff4b994c368377f97 | refs/heads/master | 2023-08-17T11:53:28.754781 | 2021-09-27T21:13:23 | 2021-09-27T21:13:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | txt | #!/usr/bin/env python
__all__ = ['baomihua_download', 'baomihua_download_by_id']
from ..common import *
import urllib
def baomihua_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False, **kwargs):
html = get_html('http://play.baomihua.com/getvideourl.aspx?flvid=%s&devicetype=phone_app' % id)
host = r1(r'host=([^&]*)', html)
assert host
type = r1(r'videofiletype=([^&]*)', html)
assert type
vid = r1(r'&stream_name=([^&]*)', html)
assert vid
dir_str = r1(r'&dir=([^&]*)', html).strip()
url = "http://%s/%s/%s.%s" % (host, dir_str, vid, type)
_, ext, size = url_info(url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge = merge)
def baomihua_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
html = get_html(url)
title = r1(r'<title>(.*)</title>', html)
assert title
id = r1(r'flvid\s*=\s*(\d+)', html)
assert id
baomihua_download_by_id(id, title, output_dir=output_dir, merge=merge, info_only=info_only)
site_info = "baomihua.com"
download = baomihua_download
download_playlist = playlist_not_supported('baomihua')
| [
"[email protected]"
] | |
2db97a28852186e87dec880bb875aaf5529e8812 | 500bca3e22bd0c30c79b74918e9847742b3c428e | /sdk/python/jobs/pipelines/1j_pipeline_with_pipeline_component/nyc_taxi_data_regression_with_pipeline_component/train_pipeline/predict_src/predict.py | fde23606901aec040fa25345734e835f96c02c9f | [
"MIT"
] | permissive | Azure/azureml-examples | 2304c862fd2e36e6640ecc4d09f69c5ed93b48ab | e5f7b247d4753f115a8f7da30cbe25294f71f9d7 | refs/heads/main | 2023-08-31T00:10:14.107509 | 2023-08-30T17:29:22 | 2023-08-30T17:29:22 | 289,334,021 | 1,219 | 1,074 | MIT | 2023-09-14T16:00:55 | 2020-08-21T18:04:26 | Jupyter Notebook | UTF-8 | Python | false | false | 2,022 | py | import argparse
import pandas as pd
import os
from pathlib import Path
from sklearn.linear_model import LinearRegression
import mlflow
mlflow.sklearn.autolog()
parser = argparse.ArgumentParser("predict")
parser.add_argument("--model_input", type=str, help="Path of input model")
parser.add_argument("--test_data", type=str, help="Path to test data")
parser.add_argument("--predictions", type=str, help="Path of predictions")
args = parser.parse_args()
print("hello scoring world...")
lines = [
f"Model path: {args.model_input}",
f"Test data path: {args.test_data}",
f"Predictions path: {args.predictions}",
]
for line in lines:
print(line)
# Load and split the test data
print("mounted_path files: ")
arr = os.listdir(args.test_data)
print(arr)
test_data = pd.read_csv(Path(args.test_data) / "test_data.csv")
testy = test_data["cost"]
# testX = test_data.drop(['cost'], axis=1)
testX = test_data[
[
"distance",
"dropoff_latitude",
"dropoff_longitude",
"passengers",
"pickup_latitude",
"pickup_longitude",
"store_forward",
"vendor",
"pickup_weekday",
"pickup_month",
"pickup_monthday",
"pickup_hour",
"pickup_minute",
"pickup_second",
"dropoff_weekday",
"dropoff_month",
"dropoff_monthday",
"dropoff_hour",
"dropoff_minute",
"dropoff_second",
]
]
print(testX.shape)
print(testX.columns)
# Load the model from input port
model = mlflow.sklearn.load_model(args.model_input)
# Make predictions on testX data and record them in a column named predicted_cost
predictions = model.predict(testX)
testX["predicted_cost"] = predictions
print(testX.shape)
# Compare predictions to actuals (testy)
output_data = pd.DataFrame(testX)
output_data["actual_cost"] = testy
# Save the output data with feature columns, predicted cost, and actual cost in csv file
output_data = output_data.to_csv((Path(args.predictions) / "predictions.csv"))
| [
"[email protected]"
] | |
a6b07925ad745b8be7937bfeb0c1c2786ded3dab | e87d793b3a5facc6e54e0263fbd67703e1fbb382 | /duckietown-world-venv/lib/python3.6/site-packages/compmake/utils/system_stats.py | b91659969d09931f398a5f0c7b510618abb69b60 | [] | no_license | llingg/behaviour-benchmarking | a860bbe709309e13f3e1133d916944882199a40f | 85bbf1a9c2c628ba74480fe7abac3804d6afdac4 | refs/heads/v1 | 2022-10-06T08:21:29.068329 | 2020-06-11T07:02:46 | 2020-06-11T07:02:46 | 259,622,704 | 0 | 0 | null | 2020-06-02T17:52:46 | 2020-04-28T11:52:08 | C++ | UTF-8 | Python | false | false | 3,677 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
__all__ = [
'AvgSystemStats',
]
try:
import psutil # @UnusedImport
except ImportError:
from compmake import logger
logger.warning('Package "psutil" not found; load balancing '
'and system stats (CPU, MEM) not available.')
class AvgSystemStats(object):
""" Collects average statistics about the system using psutil. """
def __init__(self, interval, history_len):
"""
:param interval: Collect statistics according to this interval.
:param history_len: Use this many to compute avg/max statistics.
"""
self.interval = interval
self.history_len = history_len
try:
import psutil # @UnresolvedImport @Reimport
except:
self._available = False
else:
self._available = True
self.cpu = Collect('cpu', lambda: psutil.cpu_percent(interval=0),
interval, history_len)
try:
# new in 0.8
psutil.virtual_memory().percent
get_mem = lambda: psutil.virtual_memory().percent
except:
get_mem = lambda: psutil.phymem_usage().percent
self.mem = Collect('mem', get_mem, interval, history_len)
try:
# new in 0.8
psutil.swap_memory().percent
get_mem = lambda: psutil.swap_memory().percent
except:
get_mem = lambda: psutil.virtmem_usage().percent
self.swap_mem = Collect('swap', get_mem, interval, history_len)
def avg_cpu_percent(self):
self._check_available()
return self.cpu.get_avg()
def max_cpu_percent(self):
self._check_available()
return self.cpu.get_max()
def avg_phymem_usage_percent(self):
self._check_available()
return self.mem.get_avg()
def cur_phymem_usage_percent(self):
self._check_available()
return self.mem.get_cur()
def cur_virtmem_usage_percent(self):
self._check_available()
return self.swap_mem.get_cur()
def available(self):
""" returns false if psutil is not installed """
return self._available
def _check_available(self):
if not self._available:
msg = 'Sorry, psutil not available.'
raise ValueError(msg)
class Collect(object):
def __init__(self, name, function, interval, history_len):
self.name = name
self.function = function
self.interval = interval
self.history_len = history_len
self.last_time = None
self.values = []
def get_cur(self):
""" Returns the last value. """
self.update_if_necessary()
return self.values[-1]
def get_min(self):
self.update_if_necessary()
return min(self.values)
def get_max(self):
self.update_if_necessary()
return max(self.values)
def get_avg(self):
self.update_if_necessary()
return sum(self.values) * 1.0 / len(self.values)
def update_if_necessary(self):
if self.values and self.time_from_last() < self.interval:
return
self.values.append(self.function())
self.last_time = time.time()
if len(self.values) > self.history_len:
self.values.pop(0)
# print('%s: %s' % (self.name, self.values))
def time_from_last(self):
if self.last_time is None:
return self.interval * self.history_len * 2
else:
return time.time() - self.last_time
| [
"[email protected]"
] | |
02d1bcf15ae7ebbed8bbbdb8e3525273dfec8001 | 71acb7214efd91c0d327f6d8958e1798eadb4401 | /locations/spiders/mediamarkt_be.py | 237c2ef3f09038fd1a8b6bec4254303932a2be83 | [
"CC0-1.0",
"MIT"
] | permissive | alltheplaces/alltheplaces | 21b9f8b4ace1352e52ae7b8f8825a930d2cb033e | 1bcbb55cfcf06f2c714465570711f6e83f205c22 | refs/heads/master | 2023-08-30T19:45:35.098658 | 2023-08-30T17:51:54 | 2023-08-30T17:51:54 | 61,166,935 | 453 | 176 | NOASSERTION | 2023-09-14T17:16:40 | 2016-06-15T01:09:18 | Python | UTF-8 | Python | false | false | 1,477 | py | import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from locations.hours import DAYS_FR, OpeningHours
from locations.structured_data_spider import StructuredDataSpider
class MediaMarktBESpider(CrawlSpider, StructuredDataSpider):
name = "media_markt_be"
item_attributes = {"brand": "MediaMarkt", "brand_wikidata": "Q2381223"}
start_urls = ["https://www.mediamarkt.be/fr/marketselection.html"]
rules = [Rule(LinkExtractor(restrict_css=".all-markets-list"), callback="parse_sd")]
def post_process_item(self, item, response, ld_data, **kwargs):
name = response.xpath('//*[@id="my-market-content"]/h1/text()').get()
if name:
item["name"] = name
opening_hours = self.parse_hours(ld_data)
if opening_hours:
item["opening_hours"] = opening_hours
yield item
@staticmethod
def parse_hours(ld_data: dict):
opening_hours = OpeningHours()
regex = re.compile(r"(lu|ma|me|je|ve|sa|su)\s+(\d{2}:\d{2})\s*-(\d{2}:\d{2})")
for hours_str in ld_data["openingHours"]:
match = re.search(regex, hours_str)
if match:
day_of_week = match.group(1).capitalize()
open_time = match.group(2)
close_time = match.group(3)
opening_hours.add_range(day=DAYS_FR[day_of_week], open_time=open_time, close_time=close_time)
return opening_hours
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.