hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d00b60aaa781272c43b31aa8c0398a217c133f07 | 1,863 | py | Python | admin_reskin/templatetags/sort_menu_items.py | cuongnb14/django-admin-reskin | 9245b60195892e8a3d51294ec70692714452bc29 | [
"MIT"
]
| null | null | null | admin_reskin/templatetags/sort_menu_items.py | cuongnb14/django-admin-reskin | 9245b60195892e8a3d51294ec70692714452bc29 | [
"MIT"
]
| null | null | null | admin_reskin/templatetags/sort_menu_items.py | cuongnb14/django-admin-reskin | 9245b60195892e8a3d51294ec70692714452bc29 | [
"MIT"
]
| null | null | null | from django import template
from django.conf import settings
from ..models import Bookmark
register = template.Library()
RESKIN_MENU_APP_ORDER = settings.RESKIN_MENU_APP_ORDER
RESKIN_MENU_MODEL_ORDER = settings.RESKIN_MENU_MODEL_ORDER
RESKIN_APP_ICON = settings.RESKIN_APP_ICON
@register.filter
def sort_apps(apps):
max_index = len(apps)
for app in apps:
if app['app_label'] == 'auth':
app['name'] = 'Groups'
if RESKIN_APP_ICON.get(app['app_label']):
app['icon'] = RESKIN_APP_ICON.get(app['app_label'])
else:
app['icon'] = 'fas fa-layer-group'
apps.sort(
key=lambda x:
RESKIN_MENU_APP_ORDER.index(x['app_label'])
if x['app_label'] in RESKIN_MENU_APP_ORDER
else max_index
)
bookmarks = Bookmark.objects.filter(is_active=True).order_by('order')
bookmarks_model = []
for bookmark in bookmarks:
item = {
'name': bookmark.name,
'object_name': bookmark.name,
'perms': {'add': False, 'change': False, 'delete': False, 'view': True},
'admin_url': bookmark.url,
'view_only': True,
}
bookmarks_model.append(item)
if bookmarks_model:
bookmark_app = {
'name': 'Bookmark',
'icon': 'fas fa-bookmark',
'app_label': 'admin_reskin_bookmark',
'app_url': '/admin/admin_reskin/bookmark',
'has_module_perms': True,
'models': bookmarks_model,
}
apps = [bookmark_app] + apps
return apps
@register.filter
def sort_models(models):
max_index = len(models)
models.sort(
key=lambda x:
RESKIN_MENU_MODEL_ORDER.index(x['object_name'])
if x['object_name'] in RESKIN_MENU_MODEL_ORDER
else max_index
)
return models
| 27.397059 | 84 | 0.607085 | 0 | 0 | 0 | 0 | 1,576 | 0.845947 | 0 | 0 | 353 | 0.189479 |
d00bef4cf659464b2641f10ea3856a63d0a1dab5 | 1,537 | py | Python | fda/db.py | tsbischof/fda510k | 40065cc873547ceaf992bd0f51e24fe2b2ea4387 | [
"BSD-2-Clause"
]
| null | null | null | fda/db.py | tsbischof/fda510k | 40065cc873547ceaf992bd0f51e24fe2b2ea4387 | [
"BSD-2-Clause"
]
| 3 | 2021-08-31T14:00:17.000Z | 2021-09-01T20:47:06.000Z | fda/db.py | tsbischof/fda | 40065cc873547ceaf992bd0f51e24fe2b2ea4387 | [
"BSD-2-Clause"
]
| null | null | null | import os
import io
import urllib.request
import zipfile
import pandas
import fda
def get_510k_db(root_dir=os.path.join(fda.root_db_dir, "510k"),
force_download=False):
if not os.path.exists(root_dir):
os.makedirs(root_dir)
db_urls = [
"http://www.accessdata.fda.gov/premarket/ftparea/pmnlstmn.zip",
"http://www.accessdata.fda.gov/premarket/ftparea/pmn96cur.zip",
"http://www.accessdata.fda.gov/premarket/ftparea/pmn9195.zip",
"http://www.accessdata.fda.gov/premarket/ftparea/pmn8690.zip",
"http://www.accessdata.fda.gov/premarket/ftparea/pmn8185.zip",
"http://www.accessdata.fda.gov/premarket/ftparea/pmn7680.zip"
]
db = pandas.concat(map(lambda url: load_510k_db(url, root_dir,
force_download=force_download),
db_urls))
db = db.drop_duplicates().reset_index().drop("index", axis=1)
return(db)
def load_510k_db(url, root_dir, force_download=False):
db_filename = os.path.join(root_dir, os.path.basename(url))
if force_download or not os.path.exists(db_filename):
urllib.request.urlretrieve(url, db_filename)
frames = list()
with zipfile.ZipFile(db_filename) as db:
for filename in db.filelist:
raw = db.read(filename).decode("iso8859_2")
data = pandas.read_csv(io.StringIO(raw), delimiter="|")
frames.append(data)
return(pandas.concat(frames))
| 34.155556 | 84 | 0.63175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 395 | 0.256994 |
d00e57b669e23409bb8d461e39ac2d007f53bbe7 | 4,657 | py | Python | inpr/get_num_plate.py | patrickn699/INPR | 737a3454a4b83e51e50937bb227ac7f8bc01d0e9 | [
"MIT"
]
| 2 | 2021-09-25T06:00:40.000Z | 2021-10-14T13:24:43.000Z | inpr/get_num_plate.py | patrickn699/INPR | 737a3454a4b83e51e50937bb227ac7f8bc01d0e9 | [
"MIT"
]
| null | null | null | inpr/get_num_plate.py | patrickn699/INPR | 737a3454a4b83e51e50937bb227ac7f8bc01d0e9 | [
"MIT"
]
| 1 | 2022-01-27T11:39:10.000Z | 2022-01-27T11:39:10.000Z | import numpy as np
import matplotlib.pyplot as plt
import re as r
import easyocr
#import os
#os.environ['KMP_DUPLICATE_LIB_OK']='True'
re = easyocr.Reader(['en'])
#pl = []
chk = []
a = ''
a1 = ''
#pl = []
#sym = ['{', ']', '[', '}']
class get_number_plate:
def get_bboxes_from(self, output):
""" returns list of bboxes """
return output["instances"].__dict__['_fields']['pred_boxes'].__dict__['tensor']
def crop(self, bbox, in_img):
""" bbox is a list with xmin, ymin, xmax, ymax """
xmin, ymin, xmax, ymax = bbox
cropped_im = in_img[int(ymin):int(ymax), int(xmin):int(xmax)]
return cropped_im
def ocr(self, imm):
op = re.readtext(imm, add_margin=0.1, canvas_size=960000)
# stt = []
for i in range(len(op)):
for j in range(len(op[i])):
if type(op[i][j]) == str:
return (op[i][j].replace(" ", ""))
def remove_un(self, arr):
pl = []
sym = ['{', ']', '[', '}','.','/','(',')','-']
arr = list(arr)
#print(arr)
for m in arr:
if '.' in m:
arr.remove('.')
arr = ''.join(arr)
#print(arr)
'''
for j in arr:
if len(j) == 10 or len(j) >= 9:
for p in j:
#pl.append(p)
print(p)
'''
if len(arr) == 10 or len(arr) >= 9:
arr = list(arr)
arr = list(arr)
#print(type(arr))
for j in arr:
if j in sym:
arr.remove(j)
arr = (''.join(arr))
#print (arr)
return arr
def get_num_plate(self, lis, show_plates=False):
#print(lis)
#pl = []
sta = ['AP','AR','AS','BR','CG','GA','GJ','HR' ,'HP' ,'JK','JH','KA','KL','MP','MH','MN','ML','MZ','NL' ,'OD','PB' ,'RJ','SK','TN','TS','TR','UA','UK','UP','WB','AN','CH','DN','DD','DL' ,'LD','PY']
opp = []
#sym = ['{', ']', '[', '}']
#print(lis) # prints list of cropped num plate imgs
for p in range(len(lis)):
#print(lis[p])
pl1 = self.remove_un(lis[p])
#print(pl1)
pattern_1 = r.compile(r'\w\w\d\d\w\w\d\d\d\d')
pattern_2 = r.compile(r'\w\w\d\d\w\d\d\d\d')
global a
if r.search(pattern_1, pl1) or r.search(pattern_2, pl1):
a = 'pattern matched!!'
else:
a = 'pattern not matched!!'
global a1
a1 = pl1
#print(a1)
for p in sta:
if p == pl1[:2].upper():
#print('correct state')
opp.append(pl1.upper())
#print(opp) # prints a list of cleaned number plates
try:
opp.remove(a1)
except Exception as e:
print(' ')
#print(set(opp))
return set(opp)
'''
for i in lis:
if len(i) == 10 or len(i) >= 9:
for p in i:
pl.append(p)
for j in pl:
if j in sym:
pl.remove(j)
pl1 = (''.join(pl))
print(pl)
pattern_1 = r.compile(r'\w\w\d\d\w\w\d\d\d\d')
pattern_2 = r.compile(r'\w\w\d\d\w\d\d\d\d')
global a
if r.search(pattern_1, pl1) or r.search(pattern_2, pl1):
a = 'pattern matched!!'
else:
a = 'pattern not matched!!'
sta = ['AP','AR','AS','BR','CG','GA','GJ','HR' ,'HP' ,'JK','JH','KA','KL','MP','MH','MN','ML','MZ','NL' ,'OD','PB' ,'RJ','SK','TN','TS','TR','UA','UK','UP','WB','AN','CH','DN','DD','DL' ,'LD','PY']
for p in sta:
if p == pl1[:2].upper():
#print('correct state')
return pl1.upper(), a
'''
def disp(self, im):
oc = self.ocr(im)
#plt.imshow(im) # shows cropped num plate imgs
plt.show()
plt.close()
return oc
def run_easy_ocr(self, output, im, show_plates=False):
bboxes = self.get_bboxes_from(output)
#print(bboxes)
for bbox in bboxes:
crop_im = self.crop(bbox, in_img=im)
# display cropped image
ocr_op = self.disp(crop_im)
chk.append(ocr_op)
nump = self.get_num_plate(chk,show_plates = show_plates)
#print(nump) # prints list of num plates
return nump
| 26.01676 | 205 | 0.428817 | 4,418 | 0.948679 | 0 | 0 | 0 | 0 | 0 | 0 | 2,010 | 0.431608 |
d00eac7a88a79181fbec1ff905386e4e480a89db | 3,632 | py | Python | client/nodes/detector_docker/sign_filter_node.py | CanboYe/BusEdge | 2e53e1d1d82559fc3e9f0029b2f0faf4e356b210 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
]
| 2 | 2021-08-17T14:14:28.000Z | 2022-02-02T02:09:33.000Z | client/nodes/detector_docker/sign_filter_node.py | cmusatyalab/gabriel-BusEdge | 528a6ee337882c6e709375ecd7ec7e201083c825 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
]
| null | null | null | client/nodes/detector_docker/sign_filter_node.py | cmusatyalab/gabriel-BusEdge | 528a6ee337882c6e709375ecd7ec7e201083c825 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
]
| 1 | 2021-09-01T16:18:29.000Z | 2021-09-01T16:18:29.000Z | # SPDX-FileCopyrightText: 2021 Carnegie Mellon University
#
# SPDX-License-Identifier: Apache-2.0
import logging
import cv2
from busedge_protocol import busedge_pb2
from gabriel_protocol import gabriel_pb2
from sign_filter import SignFilter
logger = logging.getLogger(__name__)
import argparse
import multiprocessing
import time
import rospy
from cv_bridge import CvBridge
from sensor_msgs.msg import CompressedImage, Image, NavSatFix
from std_msgs.msg import UInt8MultiArray
DEFAULT_SOURCE_NAME = "sign_filter3"
CUR_GPS = NavSatFix()
def run_node(source_name):
cam_id = source_name[-1]
camera_name = "camera" + cam_id
rospy.init_node(camera_name + "_sign_filter_node")
rospy.loginfo("Initialized node sign_filter for " + camera_name)
model_dir = "./model/ssd_mobilenet_v1_mtsd_hunter/saved_model"
model = SignFilter(model_dir)
pub = rospy.Publisher(source_name, UInt8MultiArray, queue_size=1)
image_sub = rospy.Subscriber(
camera_name + "/image_raw/compressed",
CompressedImage,
img_callback,
callback_args=(model, camera_name, pub),
queue_size=1,
buff_size=2 ** 24,
)
gps_sub = rospy.Subscriber("/fix", NavSatFix, gps_callback, queue_size=1)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
def img_callback(image, args):
global CUR_GPS
model = args[0]
camera_name = args[1]
pub = args[2]
camera_id = int(camera_name[-1])
bridge = CvBridge()
frame = bridge.compressed_imgmsg_to_cv2(
image, desired_encoding="passthrough"
) # BGR images
frame = frame[:, :, ::-1] # BGR to RGB
frame_copy = frame.copy()
# FILTER
# send_flag = model.send(frame_copy, show_flag = True)
min_score_thresh = 0.75
output_dict = model.detect(frame_copy, min_score_thresh)
send_flag = output_dict["num_detections"] > 0
if send_flag == True:
_, jpeg_frame = cv2.imencode(".jpg", frame)
input_frame = gabriel_pb2.InputFrame()
input_frame.payload_type = gabriel_pb2.PayloadType.IMAGE
input_frame.payloads.append(jpeg_frame.tostring())
engine_fields = busedge_pb2.EngineFields()
engine_fields.gps_data.latitude = CUR_GPS.latitude
engine_fields.gps_data.longitude = CUR_GPS.longitude
engine_fields.gps_data.altitude = CUR_GPS.altitude
secs = image.header.stamp.secs
nsecs = image.header.stamp.nsecs
time_stamps = "_{:0>10d}_{:0>9d}".format(secs, nsecs)
image_filename = camera_name + time_stamps + ".jpg"
engine_fields.image_filename = image_filename
input_frame.extras.Pack(engine_fields)
serialized_message = input_frame.SerializeToString()
rospy.loginfo(
"Sent image msg with size {:.2f} KB".format(len(serialized_message) / 1024)
)
pub_data = UInt8MultiArray()
pub_data.data = serialized_message
pub.publish(pub_data)
time.sleep(0.1)
else:
pass
def gps_callback(data):
global CUR_GPS
if data.status.status == -1:
rospy.logdebug("Sign filter node cannot get valid GPS data")
else:
CUR_GPS = data
if __name__ == "__main__":
# run_node('camera3')
parser = argparse.ArgumentParser()
parser.add_argument(
"-n",
"--source-name",
nargs="+",
default=[DEFAULT_SOURCE_NAME],
help="Set source name for this pipeline",
)
args = parser.parse_args()
for source in args.source_name:
multiprocessing.Process(target=run_node, args=(source,)).start()
| 28.155039 | 87 | 0.681718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 632 | 0.174009 |
d01001102fad7912a59abc8be03d31f0294830cb | 3,095 | py | Python | collector/cli.py | mvinii94/aws-lambda-log-collector | 682850f282b70aa18663699c7e5e32bc4f6a8be1 | [
"MIT"
]
| 4 | 2019-11-13T12:49:31.000Z | 2020-11-19T06:59:45.000Z | collector/cli.py | mvinii94/aws-lambda-log-collector | 682850f282b70aa18663699c7e5e32bc4f6a8be1 | [
"MIT"
]
| null | null | null | collector/cli.py | mvinii94/aws-lambda-log-collector | 682850f282b70aa18663699c7e5e32bc4f6a8be1 | [
"MIT"
]
| null | null | null | import click
from pathlib import Path
# Local imports
from .__init__ import *
from .utils import parse_time, create_dir, write_file, get_profiles, compress, INVALID_PROFILE, INVALID_DATES
from .lambda_log_collector import LambdaLogCollector
@click.command()
@click.version_option()
@click.option("--function-name", "-f", type=str, help="i.e. HelloWorld", required=True)
@click.option("--profile", "-p", type=str, help="AWS profile name (i.e. dev)", required=True)
@click.option("--region", "-r", type=str, help="AWS region (i.e. eu-west-1)", required=True)
@click.option("--output", "-o", type=click.Path(dir_okay=True, resolve_path=True), help="i.e. /tmp/", required=True)
@click.option("--start-time", "-s", type=str, help="2019-10-30T12:00:00", required=True)
@click.option("--end-time", "-e", type=str, help="2019-10-31T12:00:00", required=True)
@click.option("--pattern", type=str, help="ERROR", required=True)
@click.option("--log-level", type=click.Choice(['INFO', 'ERROR', 'DEBUG']), help='logging level', default='INFO')
def cli(function_name, profile, region, output, start_time, end_time, pattern, log_level):
define_log_level(log_level)
# get start and end time in epoch
epoch_start_time = parse_time(start_time)
epoch_end_time = parse_time(end_time)
if epoch_start_time > epoch_end_time:
raise Exception(INVALID_DATES)
available_profiles = get_profiles()
if profile not in available_profiles:
raise Exception(INVALID_PROFILE % available_profiles)
# initiate LambdaLogCollector class
lambda_log_collector = LambdaLogCollector(region, profile, function_name, epoch_start_time, epoch_end_time, pattern)
# get lambda function configuration
lambda_configuration = lambda_log_collector.get_function_configuration()
if lambda_configuration is not False:
# find CloudWatch Logs between start_time and end_time
streams = lambda_log_collector.find_log_streams()
# collect logs from filtered log streams
logs = lambda_log_collector.collect_logs()
# replacing timestamp strings : to _ (windows support)
start_time = start_time.replace(":", "_")
end_time = end_time.replace(":", "_")
# create output dir
output_path = Path(output)
new_dir_name = function_name + "-" + start_time + "-" + end_time
new_dir = Path(output_path / new_dir_name)
create_dir(new_dir)
# write lambda config file
lambda_fn_config_file = function_name + "-config.json"
write_file(new_dir, lambda_fn_config_file, lambda_configuration)
# write streams file
if streams is not False:
lambda_fn_streams_file = function_name + "-streams-" + start_time + "-" + end_time + ".json"
write_file(new_dir, lambda_fn_streams_file, streams)
# write logs file
if logs is not False:
lambda_fn_logs_file = function_name + "-logs-" + start_time + "-" + end_time + ".json"
write_file(new_dir, lambda_fn_logs_file, logs)
compress(new_dir, new_dir_name)
| 41.824324 | 120 | 0.695315 | 0 | 0 | 0 | 0 | 2,850 | 0.92084 | 0 | 0 | 718 | 0.231987 |
d0108ea8045cdfac005ba75c029402f4c357985e | 78 | py | Python | Chapter11/flask-app.py | arifmudi/Data-Processing-with-Optimus | 4f909c893e51a55714a85cf379b8e817b995612d | [
"MIT"
]
| 6 | 2020-12-08T15:21:08.000Z | 2022-03-14T03:39:14.000Z | Chapter11/flask-app.py | arifmudi/Data-Processing-with-Optimus | 4f909c893e51a55714a85cf379b8e817b995612d | [
"MIT"
]
| null | null | null | Chapter11/flask-app.py | arifmudi/Data-Processing-with-Optimus | 4f909c893e51a55714a85cf379b8e817b995612d | [
"MIT"
]
| 2 | 2021-06-24T14:09:38.000Z | 2021-08-09T08:02:00.000Z | import sys
sys.path.append('../../../optimus')
from optimus.server import app | 19.5 | 35 | 0.705128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.230769 |
d0159e9e7bbcc76b698e4bc18244e26a32e8736f | 1,068 | py | Python | rllib/environment/mdps/__init__.py | shenao-zhang/DCPU | 0da9aa2b7878b54ba4ee4dca894c2e86cdc0d559 | [
"MIT"
]
| 8 | 2020-10-23T07:52:19.000Z | 2022-03-06T13:35:12.000Z | rllib/environment/mdps/__init__.py | shenao-zhang/DCPU | 0da9aa2b7878b54ba4ee4dca894c2e86cdc0d559 | [
"MIT"
]
| 3 | 2021-03-04T13:44:01.000Z | 2021-03-23T09:57:50.000Z | rllib/environment/mdps/__init__.py | shenao-zhang/DCPU | 0da9aa2b7878b54ba4ee4dca894c2e86cdc0d559 | [
"MIT"
]
| 3 | 2021-03-18T08:23:56.000Z | 2021-07-06T11:20:12.000Z | """Common MDPs in RL literature."""
from gym.envs.registration import register
from .baird_star import BairdStar
from .boyan_chain import BoyanChain
from .double_chain import DoubleChainProblem
from .grid_world import EasyGridWorld
from .random_mdp import RandomMDP
from .single_chain import SingleChainProblem
from .two_state import TwoStateProblem
register(id="BairdStar-v0", entry_point="rllib.environment.mdps.baird_star:BairdStar")
register(
id="BoyanChain-v0", entry_point="rllib.environment.mdps.boyan_chain:BoyanChain"
)
register(
id="DoubleChainProblem-v0",
entry_point="rllib.environment.mdps.double_chain:DoubleChainProblem",
)
register(
id="EasyGridWorld-v0", entry_point="rllib.environment.mdps.grid_world:EasyGridWorld"
)
register(id="RandomMDP-v0", entry_point="rllib.environment.mdps.random_mdp:RandomMDP")
register(
id="SingleChainProblem-v0",
entry_point="rllib.environment.mdps.single_chain:SingleChainProblem",
)
register(
id="TwoStateProblem-v0",
entry_point="rllib.environment.mdps.two_state:TwoStateProblem",
)
| 33.375 | 88 | 0.801498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 510 | 0.477528 |
d01640b2fef264dfd84ea3721e0ecaa46ce8a2a7 | 2,072 | py | Python | common/data_helper.py | ThisIsSoSteve/Project-Tensorflow-Cars | 6cdfedceffa56ac0885ce2253dae4549859b2dbf | [
"MIT"
]
| 1 | 2017-05-11T06:01:46.000Z | 2017-05-11T06:01:46.000Z | common/data_helper.py | ThisIsSoSteve/Project-Tensorflow-Cars | 6cdfedceffa56ac0885ce2253dae4549859b2dbf | [
"MIT"
]
| 2 | 2017-05-11T10:03:16.000Z | 2017-06-21T18:25:00.000Z | common/data_helper.py | ThisIsSoSteve/Project-Tensorflow-Cars | 6cdfedceffa56ac0885ce2253dae4549859b2dbf | [
"MIT"
]
| null | null | null | import glob
import pickle
from shutil import copy
from tqdm import tqdm
class DataHelper:
"""
helpers to transform and move data around add more as needed.
"""
def copy_specific_training_data_to_new_folder(self, source_folder_path, destination_folder_path,
track_name, track_variation):
"""
Copies filtered raw data from source_folder_path to destination_folder_path.
Keyword arguments:
source_folder_path -- where the dat will be read from
destination_folder_path -- where the filtered data will be saved
track_name -- filter by track name
track_variation -- filter by track variation (e.g short)
"""
listing = glob.glob(source_folder_path + '/*.png')
for filename in tqdm(listing):
filename = filename.replace('\\', '/')
filename = filename.replace('-image.png', '')
with open(filename + '-data.pkl', 'rb') as data:
project_cars_state = pickle.load(data)
#controller_state = pickle.load(data)
# only do Watkins Glen International track data
current_track = str(project_cars_state.mTrackLocation).replace(
"'", "").replace("b", "")
current_track_variation = str(
project_cars_state.mTrackVariation).replace("'", "").replace("b", "")
# if not on the correct track goto next track. *variation: #Short Circuit or #Grand Prix
if(current_track != track_name and current_track_variation != track_variation):
continue
copy(filename + '-data.pkl', destination_folder_path)
copy(filename + '-image.png', destination_folder_path)
#copy_specific_training_data_to_new_folder('F:/Project_Cars_Data/Raw',
#'F:/Project_Cars_Data/Watkins Glen International - Short Circuit',
# 'Watkins Glen International', 'Short Circuit')
# b'Watkins Glen International'
# b'Short Circuit'
# b'Watkins Glen International'
# b'Grand Prix' | 35.118644 | 100 | 0.639479 | 1,706 | 0.823359 | 0 | 0 | 0 | 0 | 0 | 0 | 993 | 0.479247 |
d017493522e0d4e934860f36259d7cd6e8ff4de0 | 1,009 | py | Python | swifitool/faults/flp.py | chenoya/swifi-tool | 9386fab56e12d83cbe14024b5d5edac0fd1e3baf | [
"MIT"
]
| null | null | null | swifitool/faults/flp.py | chenoya/swifi-tool | 9386fab56e12d83cbe14024b5d5edac0fd1e3baf | [
"MIT"
]
| null | null | null | swifitool/faults/flp.py | chenoya/swifi-tool | 9386fab56e12d83cbe14024b5d5edac0fd1e3baf | [
"MIT"
]
| null | null | null | from faults.faultmodel import FaultModel
from utils import *
class FLP(FaultModel):
name = 'FLP'
docs = ' FLP addr significance \t flip one specific bit'
nb_args = 2
def __init__(self, config, args):
super().__init__(config, args)
self.addr = parse_addr(args[0])
check_or_fail(len(self.addr) == 1, "FLP does not support address range")
try:
self.significance = int(args[1], 0)
check_or_fail(0 <= self.significance < 8,
"Significance must be between 0 and 7 : " + str(self.significance))
except ValueError:
check_or_fail(False, "Wrong significance format : " + args[1])
def edited_file_locations(self):
return [self.addr[0] * 8 + self.significance]
def apply(self, opened_file):
opened_file.seek(self.addr[0])
prev_value = ord(opened_file.read(1))
prev_value ^= (1 << self.significance)
set_bytes(opened_file, self.addr[0], prev_value)
| 34.793103 | 93 | 0.617443 | 945 | 0.936571 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.162537 |
d0199dc840ae15f108dd63b1047696f29f1a9218 | 1,488 | py | Python | tests/test_warp.py | Krande/ipygany | 471355d043e3952ac68052613135fd5a5ee3a41b | [
"BSD-3-Clause"
]
| 450 | 2019-11-29T07:19:42.000Z | 2022-03-27T08:38:18.000Z | tests/test_warp.py | Krande/ipygany | 471355d043e3952ac68052613135fd5a5ee3a41b | [
"BSD-3-Clause"
]
| 47 | 2019-12-14T00:57:14.000Z | 2022-01-27T16:14:40.000Z | tests/test_warp.py | Krande/ipygany | 471355d043e3952ac68052613135fd5a5ee3a41b | [
"BSD-3-Clause"
]
| 51 | 2019-11-29T07:19:48.000Z | 2022-03-25T13:07:19.000Z | import pytest
from traitlets import TraitError
from ipygany import PolyMesh, Warp
from .utils import get_test_assets
def test_default_input():
vertices, triangles, data_1d, data_3d = get_test_assets()
poly = PolyMesh(vertices=vertices, triangle_indices=triangles, data=[data_1d, data_3d])
warped_mesh = Warp(poly)
assert warped_mesh.input == (('1d', 'x'), 0, 0)
poly = PolyMesh(vertices=vertices, triangle_indices=triangles, data=[data_3d])
warped_mesh = Warp(poly)
assert warped_mesh.input == '3d'
def test_input():
vertices, triangles, data_1d, data_3d = get_test_assets()
poly = PolyMesh(vertices=vertices, triangle_indices=triangles, data=[data_1d, data_3d])
warped_mesh = Warp(poly)
with pytest.raises(TraitError):
warped_mesh.input = (('1d', 'x'), 0)
warped_mesh.input = ('1d', 0, 0)
assert warped_mesh.input == (('1d', 'x'), 0, 0)
warped_mesh.input = ('1d', 0, 32)
assert warped_mesh.input == (('1d', 'x'), 0, 32)
warped_mesh.input = (0, 0, '1d')
assert warped_mesh.input == (0, 0, ('1d', 'x'))
with pytest.raises(TraitError):
warped_mesh.input = ('3d', 0, 0)
warped_mesh = Warp(poly, input=('1d', 0, 0))
assert warped_mesh.input == (('1d', 'x'), 0, 0)
warped_mesh = Warp(poly, input=(0, 0, '1d'))
assert warped_mesh.input == (0, 0, ('1d', 'x'))
warped_mesh = Warp(warped_mesh, input=(0, '1d', 0))
assert warped_mesh.input == (0, ('1d', 'x'), 0)
| 26.571429 | 91 | 0.636425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.05914 |
d01a949b661519f2a818675ee51e8c4ae04571b0 | 3,120 | py | Python | MLGame/games/snake/ml/rule.py | Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING | f4a58d0d9f5832a77a4a86352e084065dc7bae50 | [
"MIT"
]
| null | null | null | MLGame/games/snake/ml/rule.py | Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING | f4a58d0d9f5832a77a4a86352e084065dc7bae50 | [
"MIT"
]
| null | null | null | MLGame/games/snake/ml/rule.py | Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING | f4a58d0d9f5832a77a4a86352e084065dc7bae50 | [
"MIT"
]
| null | null | null | """
The template of the script for playing the game in the ml mode
"""
class MLPlay:
def __init__(self):
"""
Constructor
"""
self.direction = 0#上下左右 :1,2,3,4
self.current_x = 0
self.current_y = 0
self.last_x = 0
self.last_y = 0
self.x_dir = 0
self.y_dir = 0
#pass
def update(self, scene_info):
"""
Generate the command according to the received scene information
"""
if scene_info["status"] == "GAME_OVER":
snake_body = scene_info["snake_body"]
print(len(snake_body))
return "RESET"
snake_head = scene_info["snake_head"]
food = scene_info["food"]
snake_body = scene_info["snake_body"]
if scene_info["frame"] == 0:
self.direction = 0 #上下左右 :1,2,3,4
self.current_x = snake_head[0]
self.current_y = snake_head[1]
self.last_x = snake_head[0]
self.last_y = snake_head[1]
self.x_dir = 0
self.y_dir = 0
else:
self.current_x = snake_head[0]
self.current_y = snake_head[1]
self.x_dir = self.current_x - self.last_x
self.y_dir = self.current_y - self.last_y
if self.x_dir > 0 and self.y_dir == 0:#right
self.direction = 4
if self.x_dir < 0 and self.y_dir == 0:#left
self.direction = 3
if self.x_dir == 0 and self.y_dir > 0:#down
self.direction = 2
if self.x_dir == 0 and self.y_dir < 0:#up
self.direction = 1
self.last_x = snake_head[0]
self.last_y = snake_head[1]
#重複繞滿整個場地
if (self.current_x != 0 and self.current_x != 10 and self.current_x != 290):
if(((self.current_y / 10) % 2) == 0):
return "RIGHT"
elif(((self.current_y / 10) % 2) == 1):
return "LEFT"
elif (self.current_x == 0):
if (self.current_y == 0):
return "RIGHT"
else:
return "UP"
elif (self.current_x == 10):
if(self.current_y == 290):
return "LEFT"
elif((self.current_y / 10) % 2 == 0):
return "RIGHT"
elif((self.current_y / 10) % 2 == 1):
return "DOWN"
elif (self.current_x == 290):
if((self.current_y / 10) % 2 == 0):
return "DOWN"
elif((self.current_y / 10) % 2 == 1):
return "LEFT"
if snake_head[0] > food[0]:
return "LEFT"
elif snake_head[0] < food[0]:
return "RIGHT"
elif snake_head[1] > food[1]:
return "UP"
elif snake_head[1] < food[1]:
return "DOWN"
def reset(self):
"""
Reset the status if needed
"""
pass
| 32.5 | 88 | 0.458013 | 3,079 | 0.97684 | 0 | 0 | 0 | 0 | 0 | 0 | 492 | 0.156091 |
d01ad5a73de06c489b92a116216a85d95752401d | 856 | py | Python | CodingInterviews/python/37_get_num_of_k_2.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
]
| null | null | null | CodingInterviews/python/37_get_num_of_k_2.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
]
| null | null | null | CodingInterviews/python/37_get_num_of_k_2.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# coding:utf-8
class Solution:
def GetNumberOfK(self, data, k):
if data == [] or k > data[-1]:
return 0
def binSearch(data, num):
left = 0
right = len(data) - 1
while left < right:
mid = left + (right - left) // 2
if data[mid] < num:
left = mid + 1
else:
right = mid
return left
if data[-1] == k:
stop = len(data)
else:
stop = binSearch(data, k + 0.5)
return stop - binSearch(data, k - 0.5)
if __name__ == "__main__":
# data = [1, 3, 3, 3, 3, 4, 5]
# k = 2
# k = 3
# k = 4
# k = 6
data = [1, 2, 3, 3, 3, 3]
k = 3
s = Solution()
ans = s.GetNumberOfK(data, k)
print(ans)
| 20.878049 | 48 | 0.408879 | 593 | 0.692757 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.121495 |
d01b679428fa012cfcf64fb9a0547f3b7aa6be3a | 2,850 | py | Python | sborl/errors.py | canonical/sborl | f821ecfcbf977d0605def66dca19ea5e8e39b5a3 | [
"Apache-2.0"
]
| null | null | null | sborl/errors.py | canonical/sborl | f821ecfcbf977d0605def66dca19ea5e8e39b5a3 | [
"Apache-2.0"
]
| null | null | null | sborl/errors.py | canonical/sborl | f821ecfcbf977d0605def66dca19ea5e8e39b5a3 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2022 Canonical Ltd.
# See LICENSE file for licensing details.
from typing import Union
from ops.model import Application, Relation, Unit
class BaseException(Exception):
"""Base exception for exceptions from this library."""
class InvalidRoleError(BaseException):
"""The specified role is not one of "provides", "requires", or "peer".
The following property is available:
* role: The role which was invalid.
"""
def __init__(self, role):
super().__init__(role)
self.role = role
class SchemaError(BaseException):
"""Base class for errors with the schema."""
class SchemaParseError(SchemaError):
"""There was an error parsing the schemas YAML."""
class InvalidSchemaError(SchemaError):
"""An invalid schema (not a valid JSONSchema) was found in the schema doc.
The following property is available:
* version: The version of the schema which was invalid.
"""
def __init__(self, version):
super().__init__(version)
self.version = version
class InvalidSchemaVersionError(InvalidSchemaError):
"""An invalid version (not an int, nor str of form "vX" where X is an int) was found
in the schema doc.
The following property is available:
* version: The schema version which was invalid.
"""
class RelationException(BaseException):
"""Base exception for relation exceptions from this library."""
def __init__(self, relation: Relation):
super().__init__(f"{relation.name}:{relation.id}")
self.relation = relation
class UnversionedRelation(RelationException):
"""The relation is not yet complete due to missing remote version info."""
class IncompleteRelation(RelationException):
"""The relation is not yet complete due to missing remote data."""
class RelationError(RelationException):
"""Base class for actual errors from this library."""
class IncompatibleVersionsError(RelationError):
"""The remote application does not support any common schema versions."""
class RelationParseError(RelationError):
"""An error was encountered parsing data from the relation."""
def __init__(self, relation: Relation, entity: Union[Application, Unit], key: str):
super().__init__(relation)
self.args = (f"{relation.name}:{relation.id} {entity.name} '{key}'",)
self.entity = entity
self.key = key
class RelationDataError(RelationError):
"""An error was encountered validating data against the schema."""
def __init__(self, relation: Relation, entity: Union[Application, Unit]):
super().__init__(relation)
self.args = (f"{relation.name}:{relation.id} {entity.name}",)
self.entity = entity
class RelationPermissionError(RelationDataError):
"""An attempt to write data to a disallowed bucket."""
| 28.217822 | 88 | 0.698246 | 2,658 | 0.932632 | 0 | 0 | 0 | 0 | 0 | 0 | 1,432 | 0.502456 |
d01bbe0df932770a9de781be883abde7e781fb15 | 23,356 | py | Python | PerceptualLoss.py | kirill-pinigin/DeepImageDenoiser | 9a228c821bd3960688a4ed35f47f4767d226b57c | [
"Apache-2.0"
]
| null | null | null | PerceptualLoss.py | kirill-pinigin/DeepImageDenoiser | 9a228c821bd3960688a4ed35f47f4767d226b57c | [
"Apache-2.0"
]
| null | null | null | PerceptualLoss.py | kirill-pinigin/DeepImageDenoiser | 9a228c821bd3960688a4ed35f47f4767d226b57c | [
"Apache-2.0"
]
| null | null | null | import torch
import torch.nn as nn
from torchvision import models
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from DeepImageDenoiser import LR_THRESHOLD, DIMENSION, LEARNING_RATE
from NeuralModels import SpectralNorm
ITERATION_LIMIT = int(1e6)
SQUEEZENET_CONFIG = {'dnn' : models.squeezenet1_1(pretrained=True).features, 'features' : [2, 5, 8, 13]}
VGG_16_CONFIG = {'dnn' : models.vgg16(pretrained=True).features, 'features' : [4, 9, 16, 23]}
VGG_16_BN_CONFIG = {'dnn' : models.vgg16_bn(pretrained=True).features, 'features' : [6, 13, 23, 33] }
VGG_19_CONFIG = {'dnn' : models.vgg19(pretrained=True).features, 'features' : [ 4, 9, 18, 36] }
VGG_19_BN_CONFIG = {'dnn': models.vgg19_bn(pretrained=True).features, 'features' : [6, 13, 23, 52]}
class BasicFeatureExtractor(nn.Module):
def __init__(self, vgg_config , feature_limit = 9):
super(BasicFeatureExtractor, self).__init__()
if DIMENSION == 3:
self.mean = Parameter(torch.tensor([0.485, 0.456, 0.406]).view(-1, 1, 1))
self.std = Parameter(torch.tensor([0.229, 0.224, 0.225]).view(-1, 1, 1))
elif DIMENSION == 1:
self.mean = Parameter(torch.tensor([0.449]).view(-1, 1, 1))
self.std = Parameter(torch.tensor([0.226]).view(-1, 1, 1))
else:
self.mean = Parameter(torch.zeros(DIMENSION).view(-1, 1, 1))
self.std = Parameter(torch.ones(DIMENSION).view(-1, 1, 1))
vgg_pretrained = vgg_config['dnn']
conv = BasicFeatureExtractor.configure_input(DIMENSION, vgg_pretrained)
self.slice1 = nn.Sequential(conv)
for x in range(1, feature_limit):
self.slice1.add_module(str(x), vgg_pretrained[x])
@staticmethod
def configure_input(dimension, vgg):
conv = nn.Conv2d(dimension, 64, kernel_size=3, padding=1)
if dimension == 1 or dimension == 3:
weight = torch.FloatTensor(64, DIMENSION, 3, 3)
parameters = list(vgg.parameters())
for i in range(64):
if DIMENSION == 1:
weight[i, :, :, :] = parameters[0].data[i].mean(0)
else:
weight[i, :, :, :] = parameters[0].data[i]
conv.weight.data.copy_(weight)
conv.bias.data.copy_(parameters[1].data)
return conv
def forward(self, x):
if DIMENSION == 1 or DIMENSION == 3:
if self.mean.device != x.device:
self.mean.to(x.device)
if self.std.device != x.device:
self.std.to(x.device)
x = (x - self.mean) / self.std
return self.slice1(x)
class BasicMultiFeatureExtractor(BasicFeatureExtractor):
def __init__(self, vgg_config , requires_grad):
super(BasicMultiFeatureExtractor, self).__init__(vgg_config, vgg_config['features'][0])
vgg_pretrained = vgg_config['dnn']
self.slice2 = torch.nn.Sequential()
for x in range(vgg_config['features'][0], vgg_config['features'][1]):
self.slice2.add_module(str(x), vgg_pretrained[x])
self.slice3 = torch.nn.Sequential()
for x in range(vgg_config['features'][1], vgg_config['features'][2]):
self.slice3.add_module(str(x), vgg_pretrained[x])
self.slice4 = torch.nn.Sequential()
for x in range(vgg_config['features'][2], vgg_config['features'][3]):
self.slice4.add_module(str(x), vgg_pretrained[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
h_relu1 = super(BasicMultiFeatureExtractor, self).forward(x)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
return h_relu1, h_relu2, h_relu3, h_relu4
class FastNeuralStyleExtractor(BasicMultiFeatureExtractor):
def __init__(self, requires_grad=False , bn = True):
features = VGG_16_BN_CONFIG if bn else VGG_16_CONFIG
super(FastNeuralStyleExtractor, self).__init__(features, requires_grad)
class FastNeuralStylePerceptualLoss(nn.Module):
def __init__(self, weight:float = 1e-3):
super(FastNeuralStylePerceptualLoss, self).__init__()
self.factors = [1e0 , 1e-1, 1e-2 , 1e-3]
self.weight = weight
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cudas = list(range(torch.cuda.device_count()))
self.features = FastNeuralStyleExtractor()
self.features.eval()
self.features.to(self.device)
self.criterion = nn.MSELoss()
def compute_gram_matrix(self, x):
b, ch, h, w = x.size()
f = x.view(b, ch, w * h)
f_T = f.transpose(1, 2)
G = f.bmm(f_T) / (h * w * ch)
return G
def forward(self, actual, desire):
actuals = torch.nn.parallel.data_parallel(module=self.features, inputs=actual, device_ids=self.cudas)
desires = torch.nn.parallel.data_parallel(module=self.features, inputs=desire, device_ids=self.cudas)
closs = 0.0
for i in range(len(actuals)):
closs += self.factors[i] * self.criterion(actuals[i], desires[i])
sloss = 0.0
if self.weight != 0:
self.weight * self.criterion(self.compute_gram_matrix(actuals[i]),
self.compute_gram_matrix(desires[i]))
self.loss = closs + sloss
return self.loss
def backward(self, retain_variables=True):
return self.loss.backward(retain_variables=retain_variables)
class FluentExtractor(BasicMultiFeatureExtractor):
def __init__(self):
super(BasicFeatureExtractor, self).__init__()
self.mean = Parameter(torch.zeros(DIMENSION).view(-1, 1, 1))
self.std = Parameter(torch.ones(DIMENSION).view(-1, 1, 1))
self.slice1 = torch.nn.Sequential(
nn.Conv2d(in_channels=DIMENSION, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice2 = torch.nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice3 = torch.nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice4 = torch.nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
)
class AdaptivePerceptualLoss(nn.Module):
def __init__(self):
super(AdaptivePerceptualLoss, self).__init__()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cudas = list(range(torch.cuda.device_count()))
self.features = FluentExtractor()
self.factors = [1e0, 1e-1, 1e-2, 1e-3]
self.predictor = nn.Sequential()
self.predictor.add_module('conv_9', nn.Conv2d(in_channels=512, out_channels=8, kernel_size=3, stride=2, padding=1, bias=False))
self.predictor.add_module('lrelu_9', nn.LeakyReLU(0.2))
self.predictor.add_module('fc', nn.Conv2d(8, 1, 1, 1, 0, bias=False))
self.predictor.add_module('sigmoid', nn.Sigmoid())
self.features.to(self.device)
self.predictor.to(self.device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=LEARNING_RATE)
self.ContentCriterion = nn.L1Loss()
self.AdversarialCriterion = nn.BCELoss()
self.loss = None
self.counter = int(0)
self.best_loss = float(100500)
self.current_loss = float(0)
self.relu = nn.ReLU()
self.margin = 1.0
def evaluate(self, actual, desire):
actual_features = torch.nn.parallel.data_parallel(module=self.features, inputs=actual, device_ids=self.cudas)
desire_features = torch.nn.parallel.data_parallel(module=self.features, inputs=desire, device_ids=self.cudas)
ploss = 0.0
for i in range(len(desire_features)):
ploss += self.factors[i]*self.ContentCriterion(actual_features[i], desire_features[i])
return actual_features, desire_features, ploss
def meta_optimize(self, lossD, length):
self.current_loss += float(lossD.item()) / length
if self.counter > ITERATION_LIMIT:
self.current_loss = self.current_loss / float(ITERATION_LIMIT)
if self.current_loss < self.best_loss:
self.best_loss = self.current_loss
print('! best_loss !', self.best_loss)
else:
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
if lr >= LR_THRESHOLD:
param_group['lr'] = lr * 0.2
print('! Decrease LearningRate in Perceptual !', lr)
self.counter = int(0)
self.current_loss = float(0)
self.counter += int(1)
def pretrain(self, dataloaders, num_epochs=20):
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
for phase in ['train', 'val']:
if phase == 'train':
self.features.train(True)
self.predictor.train(True)
else:
self.features.train(False)
self.predictor.train(False)
running_loss = 0.0
running_corrects = 0
for data in dataloaders[phase]:
inputs, targets = data
targets = targets.float()
inputs = Variable(inputs.to(self.device))
targets = Variable(targets.to(self.device))
self.optimizer.zero_grad()
features = torch.nn.parallel.data_parallel(module=self.features, inputs=inputs, device_ids=self.cudas)
outputs = torch.nn.parallel.data_parallel(module=self.predictor, inputs=features[-1].detach(), device_ids=self.cudas).view(-1)
loss = self.AdversarialCriterion(outputs, targets)
if phase == 'train':
loss.backward()
self.optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(torch.round(outputs.data) == targets.data)
self.meta_optimize(loss, float(targets.size(0)))
epoch_loss = float(running_loss) / float(len(dataloaders[phase].dataset))
epoch_acc = float(running_corrects) / float(len(dataloaders[phase].dataset))
print(' epoch_acc ', epoch_acc, ' epoch_loss ', epoch_loss)
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
print('curent best_acc ', best_acc)
self.optimizer = torch.optim.Adam(self.parameters(), lr=LEARNING_RATE)
def fit(self, actual, desire):
self.features.train()
self.predictor.train()
self.optimizer.zero_grad()
actual_features, desire_features, ploss = self.evaluate(actual, desire)
fake = torch.nn.parallel.data_parallel(module=self.predictor, inputs=actual_features[-1].detach(),device_ids=self.cudas).view(-1)
zeros = Variable(torch.zeros(fake.shape).to(self.device))
real = torch.nn.parallel.data_parallel(module=self.predictor, inputs=desire_features[-1].detach(), device_ids=self.cudas).view(-1)
ones = Variable(torch.ones(real.shape).to(self.device))
lossDreal = self.AdversarialCriterion(real, ones)
lossDfake = self.AdversarialCriterion(fake, zeros)
lossD = lossDreal + lossDfake + self.relu(self.margin - ploss).mean()
lossD.backward(retain_graph=True)
self.optimizer.step()
self.meta_optimize(lossD, float(actual.size(0)))
def forward(self, actual, desire):
self.predictor.eval()
self.features.eval()
actual_features, _, ploss = self.evaluate(actual, desire)
rest = self.predictor(actual_features[-1]).view(-1)
ones = Variable(torch.ones(rest.shape).to(self.device))
aloss = self.AdversarialCriterion(rest, ones)
self.loss = ploss + aloss + self.ContentCriterion(actual, desire)
self.fit(actual, desire)
return self.loss
def backward(self, retain_variables=True):
return self.loss.backward(retain_variables=retain_variables)
class MobileExtractor(BasicMultiFeatureExtractor):
def __init__(self, requires_grad=False, bn = True):
features = VGG_19_BN_CONFIG if bn else VGG_19_CONFIG
super(MobileExtractor, self).__init__(features, requires_grad)
class MobilePerceptualLoss(nn.Module):
def __init__(self):
super(MobilePerceptualLoss, self).__init__()
self.factors = [1e0, 1e-1, 1e-2, 1e-3]
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cudas = list(range(torch.cuda.device_count()))
self.features = MobileExtractor()
self.features.eval()
self.features.to(self.device)
self.criterion = nn.MSELoss()
def forward(self, actual, desire):
actuals = torch.nn.parallel.data_parallel(module=self.features, inputs=actual, device_ids=self.cudas)
desires = torch.nn.parallel.data_parallel(module=self.features, inputs=desire, device_ids=self.cudas)
loss = 0.0
for i in range(len(actuals)):
loss += self.factors[i]*self.criterion(actuals[i], desires[i])
self.loss = loss
return self.loss
def backward(self, retain_variables=True):
return self.loss.backward(retain_variables=retain_variables)
class SimpleExtractor(BasicFeatureExtractor):
def __init__(self, feat=1, bn = True):
features_list = VGG_19_BN_CONFIG['features'] if bn else VGG_19_CONFIG['features']
features_limit = features_list[1]
super(SimpleExtractor, self).__init__(VGG_19_CONFIG, features_limit)
class SimplePerceptualLoss(nn.Module):
def __init__(self, feat : int = 2):
super(SimplePerceptualLoss, self).__init__()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cudas = list(range(torch.cuda.device_count()))
self.features = SimpleExtractor(feat)
self.features.eval()
self.features.to(self.device)
self.criterion = nn.MSELoss()
def forward(self, actual, desire):
actuals = torch.nn.parallel.data_parallel(module=self.features, inputs=actual, device_ids=self.cudas)
desires = torch.nn.parallel.data_parallel(module=self.features, inputs=desire, device_ids=self.cudas)
loss = self.criterion(actuals, desires)
self.loss = loss
return self.loss
def backward(self, retain_variables=True):
return self.loss.backward(retain_variables=retain_variables)
class SqueezeExtractor(BasicMultiFeatureExtractor):
def __init__(self, requires_grad=False):
super(SqueezeExtractor, self).__init__(SQUEEZENET_CONFIG, requires_grad)
class SqueezeAdaptivePerceptualLoss(AdaptivePerceptualLoss):
def __init__(self):
super(SqueezeAdaptivePerceptualLoss, self).__init__()
self.features = SqueezeExtractor(requires_grad=True)
self.features.to(self.device)
self.predictor.to(self.device)
class SpectralFluentExtractor(BasicMultiFeatureExtractor):
def __init__(self):
super(BasicFeatureExtractor, self).__init__()
self.mean = Parameter(torch.zeros(DIMENSION).view(-1, 1, 1))
self.std = Parameter(torch.ones(DIMENSION).view(-1, 1, 1))
self.slice1 = torch.nn.Sequential(
nn.Conv2d(in_channels=DIMENSION, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice2 = torch.nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice3 = torch.nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice4 = torch.nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=512, out_channels=8, kernel_size=3, stride=2, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
)
class SpectralAdaptivePerceptualLoss(AdaptivePerceptualLoss):
def __init__(self):
super(SpectralAdaptivePerceptualLoss, self).__init__()
self.features = SpectralFluentExtractor()
self.predictor = nn.Sequential()
self.predictor.add_module('fc', SpectralNorm(nn.Conv2d(8, 1, 1, 1, 0, bias=False)))
self.features.to(self.device)
self.predictor.to(self.device)
def fit(self, actual, desire):
self.features.train()
self.predictor.train()
self.optimizer.zero_grad()
actual_features, desire_features, ploss = self.evaluate(actual, desire)
fake = torch.nn.parallel.data_parallel(module=self.predictor, inputs=actual_features[-1].detach(),
device_ids=self.cudas).view(-1)
real = torch.nn.parallel.data_parallel(module=self.predictor, inputs=desire_features[-1].detach(),
device_ids=self.cudas).view(-1)
lossDreal = self.relu(1.0 - real).mean()
lossDfake = self.relu(1.0 + fake).mean()
lossD = lossDreal + lossDfake + self.relu(self.margin - ploss).mean()
lossD.backward(retain_graph=True)
self.optimizer.step()
self.meta_optimize(lossD, float(actual.size(0)))
def forward(self, actual, desire):
self.predictor.eval()
self.features.eval()
actual_features, _, ploss = self.evaluate(actual, desire)
self.loss = ploss - self.predictor(actual_features[-1]).view(-1).mean() + self.ContentCriterion(actual, desire)
self.fit(actual, desire)
return self.loss
class WassersteinAdaptivePerceptualLoss(SpectralAdaptivePerceptualLoss):
def __init__(self):
super(WassersteinAdaptivePerceptualLoss, self).__init__()
self.predictor.add_module('sigmoid', nn.Sigmoid())
self.predictor.to(self.device)
def forward(self, actual, desire):
self.predictor.eval()
self.features.eval()
actual_features, _, ploss = self.evaluate(actual, desire)
result = self.predictor(actual_features[-1]).view(-1)
self.loss = ploss - result.view(-1).mean() + torch.nn.functional.binary_cross_entropy(result, torch.ones_like(result))
self.fit(actual, desire)
return self.loss
def fit(self, actual, desire):
self.features.train()
self.predictor.train()
self.optimizer.zero_grad()
actual_features, desire_features, ploss = self.evaluate(actual, desire)
fake = torch.nn.parallel.data_parallel(module=self.predictor, inputs=actual_features[-1].detach(),
device_ids=self.cudas).view(-1)
real = torch.nn.parallel.data_parallel(module=self.predictor, inputs=desire_features[-1].detach(),
device_ids=self.cudas).view(-1)
real_loss = torch.nn.functional.binary_cross_entropy(real, Variable(torch.ones_like(real)).to(self.device))
fake_loss = torch.nn.functional.binary_cross_entropy(fake, Variable(torch.zeros_like(fake)).to(self.device))
wgan_loss = fake.mean() - real.mean()
interpolates = 0.5 * desire + (1 - 0.5) * actual
interpolates = Variable(interpolates.clone(), requires_grad=True).to(self.device)
interpolatesl_features = torch.nn.parallel.data_parallel(module=self.features, inputs=interpolates, device_ids=self.cudas)
interpolates_discriminator_out = torch.nn.parallel.data_parallel(module=self.predictor, inputs=interpolatesl_features[-1], device_ids=self.cudas).view(-1)
buffer = Variable(torch.ones_like(interpolates_discriminator_out), requires_grad=True).to(self.device)
gradients = torch.autograd.grad(outputs=interpolates_discriminator_out, inputs=interpolates,
grad_outputs=buffer,
retain_graph=True,
create_graph=True)[0]
gradient_penalty = ((gradients.view(gradients.size(0), -1).norm(2, dim=1) - 1) ** 2).mean()
lossD = (real_loss + fake_loss) / 2.0 + wgan_loss + 1e-2*gradient_penalty
lossD.backward()
self.optimizer.step()
self.current_loss += float(lossD.item()) / float(actual.size(0))
if self.counter > ITERATION_LIMIT:
self.current_loss = self.current_loss / float(ITERATION_LIMIT)
if self.current_loss < self.best_loss:
self.best_loss = self.current_loss
print('! best_loss !', self.best_loss)
else:
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
if lr >= LR_THRESHOLD:
param_group['lr'] = lr * 0.2
print('! Decrease LearningRate in Perceptual !', lr)
self.counter = int(0)
self.current_loss = float(0)
self.counter += int(1)
| 45.088803 | 163 | 0.630074 | 22,527 | 0.964506 | 0 | 0 | 614 | 0.026289 | 0 | 0 | 490 | 0.02098 |
d01c9a230584278712c3050c6c3bb720320fb106 | 682 | py | Python | app/main/views.py | diellamutoni8/NewsAPPS | 09701c96f687e08de2992e56da4f261b1aadf93c | [
"MIT"
]
| null | null | null | app/main/views.py | diellamutoni8/NewsAPPS | 09701c96f687e08de2992e56da4f261b1aadf93c | [
"MIT"
]
| null | null | null | app/main/views.py | diellamutoni8/NewsAPPS | 09701c96f687e08de2992e56da4f261b1aadf93c | [
"MIT"
]
| null | null | null | from flask import render_template,request,redirect,url_for
from .import main
from ..request import get_sources,get_articles
from ..models import News_article,News_source
@main.route('/')
def index():
'''
Home page function returns news sources
'''
news_sources = get_sources()
title = "Welcome"
return render_template('index.html',title = title, sources = news_sources)
@main.route('/articles/<articles>')
def articles(articles):
'''
Displays articles from a specific news source
'''
news_article = get_articles(articles)
title=f"news-app articles"
return render_template('articles.html', title = title,articles = news_article) | 25.259259 | 82 | 0.717009 | 0 | 0 | 0 | 0 | 509 | 0.746334 | 0 | 0 | 197 | 0.288856 |
d01da04f511cea7e2cb3c255526d51bbef8b8016 | 724 | py | Python | models/todo.py | chidaobanjiu/Flask_Web | 7f8d33086ca307ae7f1b998ed7d52e27fc625388 | [
"MIT"
]
| 1 | 2017-02-01T07:13:37.000Z | 2017-02-01T07:13:37.000Z | models/todo.py | chidaobanjiu/mana2077 | 7f8d33086ca307ae7f1b998ed7d52e27fc625388 | [
"MIT"
]
| null | null | null | models/todo.py | chidaobanjiu/mana2077 | 7f8d33086ca307ae7f1b998ed7d52e27fc625388 | [
"MIT"
]
| null | null | null | from models import Mongua
class Todo(Mongua):
__field__ = Mongua.__fields__ + [
('title', str, ''),
('completed', bool, False),
]
@classmethod
def update(cls, id, form):
t = cls.find(id)
valid_names = [
'title',
'completed'
]
for key in form:
# 这里只应该更新我们想要更新的东西
if key in valid_names:
setattr(t, key, form[key])
t.save()
return t
@classmethod
def complete(cls, id, completed=True):
"""
用法很方便
Todo.complete(1)
Todo.complete(2, False)
"""
t = cls.find(id)
t.completed = completed
t.save()
return t
| 20.685714 | 42 | 0.476519 | 737 | 0.962141 | 0 | 0 | 598 | 0.780679 | 0 | 0 | 184 | 0.240209 |
d01f7d7cb1aafa6f0cf4dd66656aaf36d58e3686 | 371 | py | Python | CanTingSystem/Party.py | scottyyf/oodoop | d254b8a355c985c7672381495ab8a44d7d55e153 | [
"MIT"
]
| 2 | 2021-12-14T10:46:14.000Z | 2021-12-14T10:47:00.000Z | CanTingSystem/Party.py | scottyyf/oodoop | d254b8a355c985c7672381495ab8a44d7d55e153 | [
"MIT"
]
| 9 | 2021-12-06T06:16:15.000Z | 2021-12-20T06:39:50.000Z | CanTingSystem/Party.py | scottyyf/oodoop | d254b8a355c985c7672381495ab8a44d7d55e153 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: Party.py
Author: Scott Yang(Scott)
Email: [email protected]
Copyright: Copyright (c) 2021, Skybility Software Co.,Ltd. All rights reserved.
Description:
"""
class Party(object):
def __init__(self, capacity):
self._capacity = capacity
def get_capacity(self):
return self._capacity
| 19.526316 | 79 | 0.684636 | 147 | 0.396226 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.584906 |
d0205b5caed2d6f638ffecd766f2e084e27abd9b | 11,517 | py | Python | Python/Unittest/Fixtures/tests.py | Gjacquenot/training-material | 16b29962bf5683f97a1072d961dd9f31e7468b8d | [
"CC-BY-4.0"
]
| 115 | 2015-03-23T13:34:42.000Z | 2022-03-21T00:27:21.000Z | Python/Unittest/Fixtures/tests.py | Gjacquenot/training-material | 16b29962bf5683f97a1072d961dd9f31e7468b8d | [
"CC-BY-4.0"
]
| 56 | 2015-02-25T15:04:26.000Z | 2022-01-03T07:42:48.000Z | Python/Unittest/Fixtures/tests.py | Gjacquenot/training-material | 16b29962bf5683f97a1072d961dd9f31e7468b8d | [
"CC-BY-4.0"
]
| 59 | 2015-11-26T11:44:51.000Z | 2022-03-21T00:27:22.000Z | #!/usr/bin/env python
import os
import shutil
import sqlite3
import unittest
import init_db
'''name of database to use as master'''
master_name = 'projects.db'
def setUpModule():
'''create and fill the database'''
conn = sqlite3.connect(master_name)
init_db.execute_file(conn, 'create_db.sql')
init_db.execute_file(conn, 'fill_db.sql')
def tearDownModule():
'''remove database file once testing is done'''
os.remove(master_name)
class ContentsTest(unittest.TestCase):
test_name = 'test.db'
@classmethod
def setUpClass(cls):
'''copy original database'''
shutil.copyfile(master_name, cls.test_name)
def setUp(self):
'''open connection, create cursor'''
self._conn = sqlite3.connect(self.__class__.test_name)
self._conn.row_factory = sqlite3.Row
self._cursor = self._conn.cursor()
def tearDown(self):
'''close database connection'''
self._conn.close()
@classmethod
def tearDownClass(cls):
'''remove test database'''
os.remove(cls.test_name)
def test_num_projects(self):
'''test whether the projects table has the expected number of
rows'''
expected = 3
self._cursor.execute(''' SELECT COUNT(*) FROM projects;''')
nr_rows = self._cursor.fetchone()[0]
self.assertEqual(expected, nr_rows)
def test_num_researchers(self):
'''test whether the researchers table has the expected number of
rows'''
expected = 3
self._cursor.execute(''' SELECT COUNT(*) FROM researchers;''')
nr_rows = self._cursor.fetchone()[0]
self.assertEqual(expected, nr_rows)
def test_num_assignments(self):
'''test whether the number of staff assignments is the expected
number'''
expected = 2
self._cursor.execute(''' SELECT COUNT(*) FROM staff_assignments;''')
nr_rows = self._cursor.fetchone()[0]
self.assertEqual(expected, nr_rows)
def test_view_consistency(self):
'''test whether the staff assignments table has the same number
of rows as the project staffing view'''
self._cursor.execute(''' SELECT COUNT(*) FROM staff_assignments;''')
nr_table_rows = self._cursor.fetchone()[0]
self._cursor.execute(''' SELECT COUNT(*) FROM project_staffing;''')
nr_view_rows = self._cursor.fetchone()[0]
self.assertEqual(nr_table_rows, nr_view_rows)
def test_researcher_bob(self):
'''test whether there is a researcher named Bob, and whether
his last name is Dreary'''
expected_last_name = 'Dreary'
expected_nr_rows = 1
self._cursor.execute(
'''SELECT last_name FROM researchers
WHERE first_name = ?;''',
('Bob', )
)
nr_rows = 0
last_name = None
for row in self._cursor:
nr_rows += 1
last_name = row['last_name']
self.assertEqual(expected_nr_rows, nr_rows)
self.assertEqual(expected_last_name, last_name)
def test_projects_start_date(self):
'''test whether the projects table has the correct number of
projects starting before November 1, 2014, and check those are
the expected projects'''
expected_projects = ['project 1', 'project 2']
self._cursor.execute(
'''SELECT project_name FROM projects
WHERE start_date < ?
ORDER BY project_name ASC;''',
('2014-11-01', )
)
projects = []
for row in self._cursor:
projects.append(row['project_name'])
self.assertListEqual(expected_projects, projects)
def test_unassigned_researchers(self):
'''test whether the number of unassigned researchers is the
expected one, and that Carol is idle'''
expected_researchers = ['Carol']
self._cursor.execute(
'''SELECT first_name FROM researchers
WHERE researcher_id IN (
SELECT researcher_id
FROM researchers
EXCEPT SELECT researcher_id
FROM staff_assignments);'''
)
researchers = []
for row in self._cursor:
researchers.append(row['first_name'])
self.assertListEqual(expected_researchers, researchers)
def test_assigned_projects(self):
'''test whether the expected number of projects has been
assigned'''
expected_nr_assignments = 2
self._cursor.execute(
'''SELECT COUNT(DISTINCT project_id) FROM staff_assignments;'''
)
nr_assignments = self._cursor.fetchone()[0]
self.assertEqual(expected_nr_assignments, nr_assignments)
def test_samples_per_project(self):
'''tset whether each project has the correct number of samples
associated with it'''
expected_samples = {
'project 1': {'homo sapiens', 'felis catus'},
'project 2': {'felis catus'},
'project 3': set(),
}
self._cursor.execute(
'''SELECT p.project_name AS 'project_name',
COUNT(s.sample_id) AS 'nr_samples'
FROM projects AS p, samples AS s
WHERE s.project_id = p.project_id
GROUP BY p.project_id;'''
)
for row in self._cursor:
self.assertEqual(len(expected_samples[row['project_name']]),
row['nr_samples'])
for project_name in expected_samples:
self._cursor.execute(
'''SELECT s.organism AS organism
FROM projects AS p, samples AS s
WHERE p.project_name = ? AND
p.project_id = s.project_id;''',
(project_name, )
)
samples = set()
for row in self._cursor:
samples.add(row['organism'])
self.assertSetEqual(expected_samples[project_name], samples)
class ConstraintsTest(unittest.TestCase):
'''tests the table constraints and triggers defined on the schema'''
test_name = 'test.db'
def setUp(self):
'''copy original database, and open connection, create cursor'''
shutil.copyfile(master_name, self.__class__.test_name)
self._conn = sqlite3.connect(self.__class__.test_name)
self._conn.row_factory = sqlite3.Row
self._cursor = self._conn.cursor()
def tearDown(self):
'''close database connection and remove test database'''
self._conn.close()
os.remove(self.__class__.test_name)
def test_project_end_date(self):
'''inserting a project with an invaid end date should fail'''
project_name = 'project 4'
start_date = '2015-01-05'
end_date = '2014-12-15'
with self.assertRaises(sqlite3.IntegrityError):
self._cursor.execute(
'''INSERT INTO projects (project_name, start_date, end_date)
VALUES (?, ?, ?);''',
(project_name, start_date, end_date)
)
def test_project_name_uniqueness(self):
'''inserting a project with a name that is already in the table
should fail'''
project_name = 'project 2'
start_date = '2015-01-05'
end_date = '2015-12-15'
with self.assertRaises(sqlite3.IntegrityError):
self._cursor.execute(
'''INSERT INTO projects (project_name, start_date, end_date)
VALUES (?, ?, ?);''',
(project_name, start_date, end_date)
)
def test_double_assignment(self):
'''assigning a researcher to a project twice should fail'''
project_name = 'project 1'
first_name = 'Bob'
with self.assertRaises(sqlite3.IntegrityError):
self._cursor.execute(
'''INSERT INTO staff_assignments
(project_id, researcher_id)
SELECT p.project_id AS 'project_id',
r.researcher_id AS 'researcher_id'
FROM projects AS p, researchers AS r
WHERE p.project_name = ? AND
r.first_name = ?;''',
(project_name, first_name)
)
def test_researcher_delete_trigger(self):
'''when a researcher is deleted, the staff assignments for this
person should be deleted as well'''
expected_nr_rows = 1
project_name = 'project 2'
first_name = 'Bob'
# first, assign Bob to project 2 as well
self._cursor.execute(
'''INSERT INTO staff_assignments
(project_id, researcher_id)
SELECT p.project_id AS 'project_id',
r.researcher_id AS 'researcher_id'
FROM projects AS p, researchers AS r
WHERE p.project_name = ? AND
r.first_name = ?;''',
(project_name, first_name)
)
self._cursor.execute(
'''DELETE FROM researchers WHERE first_name = ?;''',
(first_name, )
)
self._cursor.execute(
'''SELECT COUNT(*) FROM staff_assignments;'''
)
nr_rows = 0
for row in self._cursor:
nr_rows += 1
self.assertEqual(expected_nr_rows, nr_rows)
self._cursor.execute(
'''SELECT COUNT(*) FROM project_staffing;'''
)
nr_rows = 0
for row in self._cursor:
nr_rows += 1
self.assertEqual(expected_nr_rows, nr_rows)
def test_project_delete_trigger(self):
'''when a project is deleted, the staff assignments for this
project should be deleted as well'''
project_name = 'project 1'
expected_staffed_projects = {'project 2'}
self._cursor.execute(
'''DELETE FROM projects
WHERE project_name = ?;''',
(project_name, )
)
self._cursor.execute(
'''SELECT p.project_name AS 'project_name'
FROM projects AS p, staff_assignments AS s
WHERE p.project_id = s.project_id;'''
)
staffed_projects = set()
for row in self._cursor:
staffed_projects.add(row['project_name'])
self.assertSetEqual(expected_staffed_projects, staffed_projects)
def test_sample_update_trigger(self):
'''when a project is deleted, samples for that project should
refer to NULL'''
project_name = 'project 1'
expected_nr_samples = 3
expected_nr_null_ref_samples = 2
self._cursor.execute(
'''DELETE FROM projects
WHERE project_name = ?;''',
(project_name, )
)
self._cursor.execute(
'''SELECT COUNT(*) FROM samples
WHERE project_id IS NULL;'''
)
nr_null_ref_samples = self._cursor.fetchone()[0]
self.assertEqual(expected_nr_null_ref_samples, nr_null_ref_samples)
self._cursor.execute(
'''SELECT COUNT(*) FROM samples;'''
)
nr_samples = self._cursor.fetchone()[0]
self.assertEqual(expected_nr_samples, nr_samples)
if __name__ == '__main__':
unittest.main()
| 36.33123 | 76 | 0.581488 | 11,001 | 0.955197 | 0 | 0 | 234 | 0.020318 | 0 | 0 | 5,024 | 0.436225 |
d021199fc85a8a81bc13417b44056945e03b66e3 | 3,668 | py | Python | backend/ids/views/ids.py | block-id/wallet | b5479df7df0e5b5733f0ae262ffc17f9b923347d | [
"Apache-2.0"
]
| null | null | null | backend/ids/views/ids.py | block-id/wallet | b5479df7df0e5b5733f0ae262ffc17f9b923347d | [
"Apache-2.0"
]
| null | null | null | backend/ids/views/ids.py | block-id/wallet | b5479df7df0e5b5733f0ae262ffc17f9b923347d | [
"Apache-2.0"
]
| 1 | 2021-12-31T17:27:44.000Z | 2021-12-31T17:27:44.000Z | import json
from django.http.response import JsonResponse
from django.db.models import Q
from django.contrib.auth import authenticate
from rest_framework import viewsets, mixins
from rest_framework.permissions import IsAuthenticated
from rest_framework.exceptions import ValidationError, AuthenticationFailed
from rest_framework.decorators import action
from jsonschema.exceptions import ValidationError as JsonValidationError
from ids.models import Id
from ids.serializers.id.create import IdCreateSerializer
from ids.serializers.id.list import IdListSerializer
from ids.actions import create_verifiable_presentation
from ids.utils import verify_json_id
from lib.json_ids.validate import validate_json_id
from lib.drf.pagination import DefaultPageNumberPagination
class IdViewset(
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet,
):
permission_classes = [IsAuthenticated]
pagination_class = DefaultPageNumberPagination
def get_queryset(self):
qs = Id.objects.filter(owner=self.request.user)
if self.action == "list":
query = self.request.GET.get("query")
if query:
qs = qs.filter(
Q(id_name__icontains=query) | Q(issuer_name__icontains=query)
)
type = self.request.GET.get("type")
if type:
qs = qs.filter(Q(type=type))
qs = qs.order_by("-id")
return qs
def get_serializer_class(self):
if self.action == "list":
return IdListSerializer
else:
return IdCreateSerializer
def create(self, request, *args, **kwargs):
if request.content_type != "application/json":
json_data = request.data.get("json")
json_id = json.loads(json_data)
else:
json_id = request.data.get("json")
# Validation
try:
validate_json_id(json_id)
except JsonValidationError as e:
error_path = "json." + ".".join(map(str, e.path))
raise ValidationError(f"{error_path}: {e.message}")
except (AssertionError, ValueError) as e:
raise ValidationError(str(e))
# ID signature verification
try:
verify_json_id(json_id)
except AssertionError as e:
raise ValidationError(str(e))
# Create ID
serializer = self.get_serializer(
data={
"owner": request.user.id,
"type": json_id["data"]["idType"],
"issuer_name": json_id["data"]["issuer"]["name"],
"id_name": json_id["data"]["idName"],
"verifiable_id": json_id,
}
)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
return JsonResponse(serializer.data)
@action(
methods=["post"],
detail=True,
url_path="create-vp",
permission_classes=[IsAuthenticated],
)
def create_vp(self, request, pk):
id = self.get_object()
attribute_groups = set(request.data.get("attribute_groups", []))
password = request.data.get("password")
entropy = request.data.get("entropy", "")
if not authenticate(request, username=request.user.username, password=password):
raise AuthenticationFailed("Invalid password")
presentation = create_verifiable_presentation(
id,
attribute_groups,
password,
entropy,
)
return JsonResponse(presentation)
| 33.045045 | 88 | 0.634133 | 2,898 | 0.790076 | 0 | 0 | 723 | 0.19711 | 0 | 0 | 324 | 0.088332 |
d0229c062e76ef7372542bd68ae2fdd99d5d9b15 | 1,257 | py | Python | pattern.py | surajwate/textpattern | 79869f932717bec47fc4a0e3e968c5a8321d8038 | [
"MIT"
]
| null | null | null | pattern.py | surajwate/textpattern | 79869f932717bec47fc4a0e3e968c5a8321d8038 | [
"MIT"
]
| null | null | null | pattern.py | surajwate/textpattern | 79869f932717bec47fc4a0e3e968c5a8321d8038 | [
"MIT"
]
| null | null | null | def plusdash(plus, dash):
for i in range((plus-1)*dash + plus):
if i%(dash+1)==0:
print('+', end='')
else:
print('-', end='')
print('')
def pipe(pipe, space):
for i in range((pipe-1)*space + pipe):
if i % (space+1) == 0:
print('|', end='')
else:
print(' ', end='')
print('')
def wordinbar(word, space):
pipe = len(word) + 1
j = 0
for i in range((pipe-1)*space + pipe):
if i % (space+1) == 0:
print('|', end='')
elif i % (space//2 + 1) == 0:
print(word[j], end='')
j += 1
else:
print(' ', end='')
print('')
def wordinbox(word, space):
plusdash(len(word)+1, space)
for i in range(1):
wordinbar(word, space)
plusdash(len(word)+1, space)
def wordinsquare(word, height):
if height % 2 == 0:
space = height + 1
else:
space = height
space = space * 3
plusdash(len(word)+1, space)
for i in range(space//6):
pipe(len(word)+1, space)
for i in range(1):
wordinbar(word, space)
for i in range(space//6):
pipe(len(word)+1, space)
plusdash(len(word)+1, space)
wordinsquare('SURAJ W', 3)
| 22.854545 | 42 | 0.478123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.037391 |
d028aa49515cb0d7956170029a3d7c9b7460dad7 | 2,624 | py | Python | src/apps/analysis/gen/edgeWeightBipartiteGaphGenerator.py | JacobFV/mln-analysis | f78a6531e5126f29e6895e9b8e4b4600110b3858 | [
"MIT"
]
| null | null | null | src/apps/analysis/gen/edgeWeightBipartiteGaphGenerator.py | JacobFV/mln-analysis | f78a6531e5126f29e6895e9b8e4b4600110b3858 | [
"MIT"
]
| null | null | null | src/apps/analysis/gen/edgeWeightBipartiteGaphGenerator.py | JacobFV/mln-analysis | f78a6531e5126f29e6895e9b8e4b4600110b3858 | [
"MIT"
]
| null | null | null | import os
def get_comm_no(community_id, community_dict):
community_id = str(community_id)
if community_id in community_dict:
return community_dict[community_id]
else:
return 0
def edgeWeightBipartiteGraphGenerator(
layer1,
layer2,
layer1CommunityFile,
layer2CommunityFile,
layer12InterEdgeFile,
resultFile
):
# it looks like this was written for the IMDb dataset in particular.
# shouldn't it be more general?
director_community_info = []
director_community_dict = {}
movie_community_info = []
movie_community_dict = {}
no_of_vertices_in_movie_communities = {}
no_of_vertices_in_director_communities = {}
with open(layer1CommunityFile) as f:
for line in f:
director_community_info.append(line.split(' '))
for i in director_community_info:
if len(i) == 3:
director_community_dict[i[0]] = i[1]
if i[1] not in no_of_vertices_in_director_communities:
no_of_vertices_in_director_communities[i[1]] = 1
else:
no_of_vertices_in_director_communities[i[1]] += 1
with open(layer2CommunityFile) as f:
for line in f:
movie_community_info.append(line.split(' '))
for i in movie_community_info:
if (len(i) == 3):
movie_community_dict[i[0]] = i[1]
if i[1] not in no_of_vertices_in_movie_communities:
no_of_vertices_in_movie_communities[i[1]] = 1
else:
no_of_vertices_in_movie_communities[i[1]] += 1
edges_between_director_movie_communities = {}
with open(layer12InterEdgeFile) as b:
for line in b:
item = line.strip().split(',')
comm1 = get_comm_no(item[0], director_community_dict)
comm2 = get_comm_no(item[1], movie_community_dict)
if comm1 in no_of_vertices_in_director_communities:
ver1 = no_of_vertices_in_director_communities[comm1]
else:
ver1 = 0
if comm2 in no_of_vertices_in_movie_communities:
ver2 = no_of_vertices_in_movie_communities[comm2]
else:
ver2 = 0
if ver1 > 0 and ver2 > 0:
if (comm1, comm2) not in edges_between_director_movie_communities:
edges_between_director_movie_communities[(comm1, comm2)] = 1
else:
edges_between_director_movie_communities[(comm1, comm2)] += 1
if not os.path.exists(os.path.dirname(resultFile)):
os.makedirs(os.path.dirname(resultFile))
fs = open(resultFile, "w")
for k, v in edges_between_director_movie_communities.items():
fs.write("{0},{1},{2},{3},{4}\n".format("1", k[0], "0", k[1], v, "\n"))
| 35.459459 | 76 | 0.671113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 146 | 0.05564 |
d029c8b65c82f8223b70d8ea031a22a8434f3b04 | 5,171 | py | Python | pubs/utils.py | WIPACrepo/publication-web-db | f5d77f43c89377449f4fbe952f6b1dcfc458c91a | [
"MIT"
]
| null | null | null | pubs/utils.py | WIPACrepo/publication-web-db | f5d77f43c89377449f4fbe952f6b1dcfc458c91a | [
"MIT"
]
| 16 | 2020-09-26T00:49:56.000Z | 2021-09-09T19:03:42.000Z | pubs/utils.py | WIPACrepo/publication-web-db | f5d77f43c89377449f4fbe952f6b1dcfc458c91a | [
"MIT"
]
| null | null | null | from datetime import datetime
import logging
import json
import csv
from io import StringIO
import pymongo
from bson.objectid import ObjectId
from . import PUBLICATION_TYPES, PROJECTS, SITES
def nowstr():
return datetime.utcnow().isoformat()
def date_format(datestring):
if 'T' in datestring:
if '.' in datestring:
date = datetime.strptime(datestring, "%Y-%m-%dT%H:%M:%S.%f")
else:
date = datetime.strptime(datestring, "%Y-%m-%dT%H:%M:%S")
else:
date = datetime.strptime(datestring, "%Y-%m-%d")
return date.strftime("%d %B %Y")
def create_indexes(db_url, db_name, background=True):
db = pymongo.MongoClient(db_url)[db_name]
indexes = db.publications.index_information()
if 'projects_index' not in indexes:
logging.info('creating projects_index')
db.publications.create_index('projects', name='projects_index', background=background)
if 'date_index' not in indexes:
logging.info('creating date_index')
db.publications.create_index('date', name='date_index', background=background)
if 'text_index' not in indexes:
logging.info('creating text_index')
db.publications.create_index([('title', pymongo.TEXT), ('authors', pymongo.TEXT), ('citation', pymongo.TEXT)],
weights={'title': 10, 'authors': 5, 'citation': 1},
name='text_index', background=background)
def validate(title, authors, pub_type, citation, date, downloads, projects, sites):
assert isinstance(title, str)
assert isinstance(authors, list)
for a in authors:
assert isinstance(a, str)
assert pub_type in PUBLICATION_TYPES
assert isinstance(citation, str)
assert isinstance(date, str)
date_format(date)
assert isinstance(downloads, list)
for d in downloads:
assert isinstance(d, str)
assert projects
assert isinstance(projects, list)
for p in projects:
assert p in PROJECTS
for s in sites:
assert s in SITES
async def add_pub(db, title, authors, pub_type, citation, date, downloads, projects, sites=None):
if not sites:
sites = []
validate(title, authors, pub_type, citation, date, downloads, projects, sites)
data = {
"title": title,
"authors": authors,
"type": pub_type,
"citation": citation,
"date": date,
"downloads": downloads,
"projects": projects,
"sites": sites,
}
await db.publications.insert_one(data)
async def edit_pub(db, mongo_id, title=None, authors=None, pub_type=None, citation=None, date=None, downloads=None, projects=None, sites=None):
match = {'_id': ObjectId(mongo_id)}
update = {}
if title:
assert isinstance(title, str)
update['title'] = title
if authors:
assert isinstance(authors, list)
for a in authors:
assert isinstance(a, str)
update['authors'] = authors
if pub_type:
assert pub_type in PUBLICATION_TYPES
update['type'] = pub_type
if citation:
assert isinstance(citation, str)
update['citation'] = citation
if date:
assert isinstance(date, str)
date_format(date)
update['date'] = date
if downloads:
assert isinstance(downloads, list)
for d in downloads:
assert isinstance(d, str)
update['downloads'] = downloads
if projects:
assert isinstance(projects, list)
for p in projects:
assert p in PROJECTS
update['projects'] = projects
if sites:
assert isinstance(sites, list)
for s in sites:
assert s in SITES
update['sites'] = sites
await db.publications.update_one(match, {'$set': update})
async def try_import_file(db, data):
"""
Try importing authors from file data (csv or json).
"""
# parse the data
try:
pubs = json.loads(data)
if 'publications' in pubs:
pubs = pubs['publications']
except json.JSONDecodeError:
try:
def parse_csv(row):
for k in row:
val = row[k]
if k in ('downloads', 'projects', 'sites'):
row[k] = val.split(',') if val else []
return row
with StringIO(data) as f:
reader = csv.DictReader(f)
pubs = [parse_csv(row) for row in reader]
except csv.Error:
raise Exception('File is not in a recognizable format. Only json or csv are valid.')
# now validate
for p in pubs:
if isinstance(p['authors'], str):
p['authors'] = [p['authors']]
try:
validate(p['title'], p['authors'], p['type'], p['citation'], p['date'], p['downloads'], p['projects'], p['sites'])
except AssertionError:
raise Exception(f'Error validating pub with title {p["title"][:100]}')
# now add:to db
for p in pubs:
await db.publications.replace_one({'title': p['title'], 'authors': p['authors'], 'date': p['date']}, p, upsert=True)
| 34.704698 | 143 | 0.602591 | 0 | 0 | 0 | 0 | 0 | 0 | 3,103 | 0.600077 | 853 | 0.164958 |
d02b8e08d67537a0237e5ce61fdce30861d1d5de | 2,534 | py | Python | JTL/__init__.py | AgalmicVentures/JTL | 967bc670bf696e0214a69bad619cf0148fec2fe6 | [
"MIT"
]
| 3 | 2017-12-06T04:35:24.000Z | 2020-01-29T14:29:57.000Z | JTL/__init__.py | AgalmicVentures/JTL | 967bc670bf696e0214a69bad619cf0148fec2fe6 | [
"MIT"
]
| null | null | null | JTL/__init__.py | AgalmicVentures/JTL | 967bc670bf696e0214a69bad619cf0148fec2fe6 | [
"MIT"
]
| 2 | 2018-01-06T13:18:07.000Z | 2019-09-01T01:24:04.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2021 Agalmic Ventures LLC (www.agalmicventures.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import json
import sys
def main():
"""
Runs the main JTL program.
:return: int
"""
#Parse arguments
parser = argparse.ArgumentParser(description='JSON Transformation Language')
parser.add_argument('-i', '--indent', default=4, type=int, help='Indentation amount.')
parser.add_argument('-t', '--transform-file', help='The name of the JSON file containing the transformation to run.')
parser.add_argument('transform', nargs='?', help='The transformation to run.')
arguments = parser.parse_args(sys.argv[1:])
#Load the transformation
if arguments.transform is None and arguments.transform_file is not None:
#From a file
with open(arguments.transform_file, 'r') as f:
transformStr = f.read()
elif arguments.transform is not None and arguments.transform_file is None:
#From the command line
transformStr = arguments.transform
else:
print('ERROR: Specify either a transform file or a transform')
return 1
transformData = json.loads(transformStr)
#Read the JSON in from stdin
#TODO: error handling
data = json.loads(sys.stdin.read())
#Transform the JSON
#TODO: cleaner way to do this
sys.path.append('.')
import Interpreter
result = Interpreter.transformJson(data, transformData)
#Output the result
print(json.dumps(result, indent=arguments.indent, sort_keys=True))
return 0
if __name__ == '__main__':
sys.exit(main())
| 35.194444 | 118 | 0.757695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,638 | 0.646409 |
d02bbe745d7a87ef8769d267b8913fe863e1ddc6 | 545 | py | Python | tests/conftest.py | disktnk/onnx-chainer | e4542568009e63e7da83aa0f11b2cb5504e8cef8 | [
"MIT"
]
| null | null | null | tests/conftest.py | disktnk/onnx-chainer | e4542568009e63e7da83aa0f11b2cb5504e8cef8 | [
"MIT"
]
| null | null | null | tests/conftest.py | disktnk/onnx-chainer | e4542568009e63e7da83aa0f11b2cb5504e8cef8 | [
"MIT"
]
| null | null | null | import chainer
import pytest
def pytest_addoption(parser):
parser.addoption(
'--value-check-runtime',
dest='value-check-runtime', default='onnxruntime',
choices=['skip', 'onnxruntime', 'mxnet'], help='select test runtime')
@pytest.fixture(scope='function')
def disable_experimental_warning():
org_config = chainer.disable_experimental_feature_warning
chainer.disable_experimental_feature_warning = True
try:
yield
finally:
chainer.disable_experimental_feature_warning = org_config
| 27.25 | 77 | 0.722936 | 0 | 0 | 255 | 0.46789 | 289 | 0.530275 | 0 | 0 | 114 | 0.209174 |
d02ea29aa2c01102b027e646e76a227470fdeefe | 200 | py | Python | applications/physbam/physbam-lib/External_Libraries/Archives/boost/tools/build/v2/test/test1.py | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
]
| 20 | 2017-07-03T19:09:09.000Z | 2021-09-10T02:53:56.000Z | applications/physbam/physbam-lib/External_Libraries/Archives/boost/tools/build/v2/test/test1.py | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
]
| null | null | null | applications/physbam/physbam-lib/External_Libraries/Archives/boost/tools/build/v2/test/test1.py | schinmayee/nimbus | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | [
"BSD-3-Clause"
]
| 9 | 2017-09-17T02:05:06.000Z | 2020-01-31T00:12:01.000Z | #!/usr/bin/python
import BoostBuild
t = BoostBuild.Tester()
t.write("test.jam","""
actions unbuilt { } unbuilt all ;
ECHO "Hi" ;
""")
t.run_build_system("-ftest.jam", stdout="Hi\n")
t.pass_test()
| 14.285714 | 47 | 0.66 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.49 |
d02f44da90d4e56a611cc2675d108a518117aaf4 | 295 | py | Python | micro-benchmark/snippets/classes/tuple_assignment/main.py | WenJinfeng/PyCG | b45e8e04fe697d8301cf27222a8f37646d69f168 | [
"Apache-2.0"
]
| 121 | 2020-12-16T20:31:37.000Z | 2022-03-21T20:32:43.000Z | micro-benchmark/snippets/classes/tuple_assignment/main.py | WenJinfeng/PyCG | b45e8e04fe697d8301cf27222a8f37646d69f168 | [
"Apache-2.0"
]
| 24 | 2021-03-13T00:04:00.000Z | 2022-03-21T17:28:11.000Z | micro-benchmark/snippets/classes/tuple_assignment/main.py | WenJinfeng/PyCG | b45e8e04fe697d8301cf27222a8f37646d69f168 | [
"Apache-2.0"
]
| 19 | 2021-03-23T10:58:47.000Z | 2022-03-24T19:46:50.000Z | class MyClass:
def __init__(self):
pass
def func1(self):
pass
def func2(self):
pass
def func3(self):
pass
class MyClass2:
def __init__(self):
pass
a, b = MyClass(), MyClass2()
c, (d, e) = a.func1, (a.func2, a.func3)
c()
d()
e()
| 11.8 | 39 | 0.515254 | 208 | 0.705085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d02fffe94e43114a1dbd3bf9abf8e18b0238b4a0 | 72 | py | Python | stweet/model/__init__.py | markowanga/stweet | 7f103b5c88fcef1d993d8cdc99cec358e55293f7 | [
"MIT"
]
| 101 | 2020-11-22T16:44:25.000Z | 2022-03-30T08:42:07.000Z | stweet/model/__init__.py | markowanga/stweet | 7f103b5c88fcef1d993d8cdc99cec358e55293f7 | [
"MIT"
]
| 53 | 2020-11-21T19:40:36.000Z | 2022-03-02T10:09:52.000Z | stweet/model/__init__.py | markowanga/stweet | 7f103b5c88fcef1d993d8cdc99cec358e55293f7 | [
"MIT"
]
| 16 | 2020-12-12T23:02:51.000Z | 2022-03-01T12:10:32.000Z | from .language import Language
from .user_tweet_raw import UserTweetRaw
| 24 | 40 | 0.861111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d030a03f345b6f7f695002177f49aa4bf23d3d3c | 2,471 | py | Python | src/ColorfulData_Package/ColorfulData.py | Alex8695/Colored | f72a5f5da041b73a8771c1b0f6ef80d5e0e83e7b | [
"MIT"
]
| null | null | null | src/ColorfulData_Package/ColorfulData.py | Alex8695/Colored | f72a5f5da041b73a8771c1b0f6ef80d5e0e83e7b | [
"MIT"
]
| null | null | null | src/ColorfulData_Package/ColorfulData.py | Alex8695/Colored | f72a5f5da041b73a8771c1b0f6ef80d5e0e83e7b | [
"MIT"
]
| null | null | null | import numpy as np
from math import ceil,floor
class ColorfulData:
"""
Create custom evenly distributed color palete
\n`Get_Colors_Matched`: key,value relationship evenly distributed for given unique values
\n`Get_Colors`: Evenly distributed for given length
"""
@staticmethod
def Get_Colors_Matched(items:list([any]),colorPalette:dict[int,any])->np.array:
"""
Returns 2d ndarray of unique `items` for given `colorPalette`
\nIf `colorPalette` is larger then unique `items`: \n\treturned values are equaly spaced from start to end of `colorPalette`
\n\nIf `colorPalette` is smaller then unique `items`: \n\t`colorPalette` is expanded by repeating colors, in given order, then equaly spaced from start to end
"""
_items = np.unique(np.array(items))
_itemcount = len(_items)
_ret = ColorfulData.Get_Colors(_itemcount,colorPalette=colorPalette)
_ret = np.column_stack(
[np.array(_items),
_ret])
return _ret
@staticmethod
def Get_Colors(count:int,colorPalette:dict[int,any])->np.array:
"""
Returns ndarray of given `count` for given `colorPalette`
\nIf `colorPalette` is larger then `count`: \n\treturned values are equaly spaced from start to end of `colorPalette`
\n\nIf `colorPalette` is smaller then `count`: \n\t`colorPalette` is expanded by repeating colors, in given order, then equaly spaced from start to end
"""
_paletteCount = len(colorPalette)
_colorsCount = count
_repeat = ceil(_colorsCount/_paletteCount)
_colorsIn = np.repeat(np.array(colorPalette),_repeat)
_remainder = len(_colorsIn)-_colorsCount
_colorIndex = _colorsIn
_skip = floor(_remainder/_colorsCount)
_index = np.arange(start=0,stop=_paletteCount,step= _skip if _skip>1 else 1)
if _skip > 0:
_colorIndex = \
[_colorsIn[x] for x in (_index)][:_colorsCount]
print('')
else:
_colorIndex = \
_colorsIn[:_colorsCount]
#print(f'{str(_colorsCount).rjust(5)}:'
#+f' x{_repeat}'
#+f' new palette: {str(len(_colorsIn)).rjust(5)}'
#+f' remainder: {str(_remainder).rjust(5)}'
#+f' skip:{str(_skip).rjust(3)}'
#+f' color index:{str(len(_colorIndex)).rjust(5)}')
return _colorIndex
| 39.222222 | 167 | 0.628086 | 2,413 | 0.976528 | 0 | 0 | 2,166 | 0.876568 | 0 | 0 | 1,197 | 0.484419 |
d031d9ffaf0e038bf3ce7cef8d63034738a6cd8f | 6,453 | py | Python | Algorithms/SPO2CART.py | rtm2130/SPOTree | 0b92946a2d14202a1ca251201ddbb07892951e78 | [
"MIT"
]
| 15 | 2020-03-06T23:07:09.000Z | 2022-03-30T09:46:30.000Z | Algorithms/SPO2CART.py | Tobias272727/SPOTree | 88e2e8423cb133f6c521bae5b8c7a0acba01ccab | [
"MIT"
]
| 1 | 2020-09-14T14:32:03.000Z | 2020-10-16T02:39:24.000Z | Algorithms/SPO2CART.py | Tobias272727/SPOTree | 88e2e8423cb133f6c521bae5b8c7a0acba01ccab | [
"MIT"
]
| 13 | 2020-04-04T16:43:56.000Z | 2022-03-27T05:28:19.000Z | """
Encodes SPOT MILP as the structure of a CART tree in order to apply CART's pruning method
Also supports traverse() which traverses the tree
"""
import numpy as np
from mtp_SPO2CART import MTP_SPO2CART
from decision_problem_solver import*
from scipy.spatial import distance
def truncate_train_x(train_x, train_x_precision):
return(np.around(train_x, decimals=train_x_precision))
class SPO2CART(object):
'''
This function initializes the SPO tree
Parameters:
max_depth: the maximum depth of the pre-pruned tree (default = Inf: no depth limit)
min_weight_per_node: the mininum number of observations (with respect to cumulative weight) per node
min_depth: the minimum depth of the pre-pruned tree (default: set equal to max_depth)
min_diff: if depth > min_depth, stop splitting if improvement in fit does not exceed min_diff
binary_splits: if True, use binary splits when building the tree, else consider multiway splits
(i.e., when splitting on a variable, split on all unique vals)
debias_splits/frac_debias_set/min_debias_set_size: Additional params when binary_splits = True. If debias_splits = True, then in each node,
hold out frac_debias_set of the training set (w.r.t. case weights) to evaluate the error of the best splitting point for each feature.
Stop bias-correcting when we have insufficient data; i.e. the total weight in the debias set < min_debias_set_size.
Note: after finding best split point, we then refit the model on all training data and recalculate the training error
quant_discret: continuous variable split points are chosen from quantiles of the variable corresponding to quant_discret,2*quant_discret,3*quant_discret, etc..
run_in_parallel: if set to True, enables parallel computing among num_workers threads. If num_workers is not
specified, uses the number of cpu cores available.
'''
def __init__(self, a,b,**kwargs):
kwargs["SPO_weight_param"] = 1.0
if "SPO_full_error" not in kwargs:
kwargs["SPO_full_error"] = True
self.SPO_weight_param = kwargs["SPO_weight_param"]
self.SPO_full_error = kwargs["SPO_full_error"]
self.tree = MTP_SPO2CART(a,b,**kwargs)
'''
This function fits the tree on data (X,C,weights).
X: The feature data used in tree splits. Can either be a pandas data frame or numpy array, with:
(a) rows of X = observations
(b) columns of X = features
C: the cost vectors used in the leaf node models. Must be a numpy array, with:
(a) rows of C = observations
(b) columns of C = cost vector components
weights: a numpy array of case weights. Is 1-dimensional, with weights[i] yielding weight of observation i
feats_continuous: If False, all feature are treated as categorical. If True, all feature are treated as continuous.
feats_continuous can also be a boolean vector of dimension = num_features specifying how to treat each feature
verbose: if verbose=True, prints out progress in tree fitting procedure
'''
def fit(self, X, C, train_x_precision,
weights=None, feats_continuous=True, verbose=False, refit_leaves=False,
**kwargs):
self.decision_kwargs = kwargs
X = truncate_train_x(X, train_x_precision)
num_obs = C.shape[0]
A = np.array(range(num_obs))
if self.SPO_full_error == True and self.SPO_weight_param != 0.0:
for i in range(num_obs):
A[i] = find_opt_decision(C[i,:].reshape(1,-1),**kwargs)['objective'][0]
if self.SPO_weight_param != 0.0 and self.SPO_weight_param != 1.0:
if self.SPO_full_error == True:
SPO_loss_bound = -float("inf")
for i in range(num_obs):
SPO_loss = -find_opt_decision(-C[i,:].reshape(1,-1),**kwargs)['objective'][0] - A[i]
if SPO_loss >= SPO_loss_bound:
SPO_loss_bound = SPO_loss
else:
c_max = np.max(C,axis=0)
SPO_loss_bound = -find_opt_decision(-c_max.reshape(1,-1),**kwargs)['objective'][0]
#Upper bound for MSE loss: maximum pairwise difference between any two elements
dists = distance.cdist(C, C, 'sqeuclidean')
MSE_loss_bound = np.max(dists)
else:
SPO_loss_bound = 1.0
MSE_loss_bound = 1.0
#kwargs["SPO_loss_bound"] = SPO_loss_bound
#kwargs["MSE_loss_bound"] = MSE_loss_bound
self.tree.fit(X,A,C,
weights=weights, feats_continuous=feats_continuous, verbose=verbose, refit_leaves=refit_leaves,
SPO_loss_bound = SPO_loss_bound, MSE_loss_bound = MSE_loss_bound,
**kwargs)
'''
Prints out the tree.
Required: call tree fit() method first
Prints pruned tree if prune() method has been called, else prints unpruned tree
verbose=True prints additional statistics within each leaf
'''
def traverse(self, verbose=False):
self.tree.traverse(verbose=verbose)
'''
Prunes the tree. Set verbose=True to track progress
'''
def prune(self, Xval, Cval,
weights_val=None, one_SE_rule=True,verbose=False,approx_pruning=False):
num_obs = Cval.shape[0]
Aval = np.array(range(num_obs))
if self.SPO_full_error == True and self.SPO_weight_param != 0.0:
for i in range(num_obs):
Aval[i] = find_opt_decision(Cval[i,:].reshape(1,-1),**self.decision_kwargs)['objective'][0]
self.tree.prune(Xval,Aval,Cval,
weights_val=weights_val,one_SE_rule=one_SE_rule,verbose=verbose,approx_pruning=approx_pruning)
'''
Produces decision given data Xnew
Required: call tree fit() method first
Uses pruned tree if pruning method has been called, else uses unpruned tree
Argument alpha controls level of pruning. If not specified, uses alpha trained from the prune() method
As a step in finding the estimated decisions for data (Xnew), this function first finds
the leaf node locations corresponding to each row of Xnew. It does so by a top-down search
starting at the root node 0.
If return_loc=True, est_decision will also return the leaf node locations for the data, in addition to the decision.
'''
def est_decision(self, Xnew, alpha=None, return_loc=False):
return self.tree.predict(Xnew, np.array(range(0,Xnew.shape[0])), alpha=alpha, return_loc=return_loc)
def est_cost(self, Xnew, alpha=None, return_loc=False):
return self.tree.predict(Xnew, np.array(range(0,Xnew.shape[0])), alpha=alpha, return_loc=return_loc, get_cost=True) | 44.8125 | 162 | 0.712692 | 6,066 | 0.940028 | 0 | 0 | 0 | 0 | 0 | 0 | 3,616 | 0.56036 |
d037b0f6bf8c9bdca8f41dcdf3788289e4161b30 | 2,954 | py | Python | lib/m96_visualization.py | jaenrig-ifx/MID | a7284f50105575ed6675daeb8a70e144784a0550 | [
"MIT"
]
| 2 | 2020-12-13T11:52:32.000Z | 2022-01-06T20:41:24.000Z | lib/m96_visualization.py | jaenrig-ifx/MID | a7284f50105575ed6675daeb8a70e144784a0550 | [
"MIT"
]
| null | null | null | lib/m96_visualization.py | jaenrig-ifx/MID | a7284f50105575ed6675daeb8a70e144784a0550 | [
"MIT"
]
| null | null | null | # This package uses tk to create a simple graphical
# output representing the iDrive state
import tkinter as tk
import numpy as np
# why not use the numpy native? but whatever
def rotate_2D(vector, angle):
r = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
return r.dot(vector)
# this class intializes the canvas and all geometrical
# objets drawn onto it. The method setState simply
# adjusts the color of the respective objects
class IDriveVisualizer:
def __init__(self,root):
self.root = root
self.state = [0,0]
cnvs_height = 400
cnvs_width = 400
rect_cn = (400,400)
rect_c = (80,80)
BG = '#F5F5DC'
self.cnvs = tk.Canvas(self.root, bg=BG, height=cnvs_height, width=cnvs_width)
self.cnvs.pack()
delta = 20.
x1a,y1a,x2a,y2a,x3a,y3a = 150.0+delta, 150.0, 200.0, 100.0+delta, 250.0-delta, 150.0
x1b,y1b,x2b,y2b,x3b,y3b = 150.0+delta, 250.0, 250.0-delta, 250.0, 200.0, 300.0-delta
x1c,y1c,x2c,y2c,x3c,y3c = 250.0, 150.0+delta, 300.0-delta, 200.0, 250.0, 250.0-delta
x1d,y1d,x2d,y2d,x3d,y3d = 100.0+delta, 200.0, 150.0, 150.0+delta, 150.0, 250.0-delta
SB1 = '#CCCCCC'
C0 = '#8B8878'
self.h_arrow = [
self.cnvs.create_oval(0,0,0,0,fill='blue'),
self.cnvs.create_polygon(x1a,y1a,x2a,y2a,x3a,y3a,fill=SB1,outline=C0,width=1),
self.cnvs.create_polygon(x1b,y1b,x2b,y2b,x3b,y3b,fill=SB1,outline=C0,width=1),
self.cnvs.create_polygon(x1c,y1c,x2c,y2c,x3c,y3c,fill=SB1,outline=C0,width=1),
self.cnvs.create_polygon(x1d,y1d,x2d,y2d,x3d,y3d,fill=SB1,outline=C0,width=1),
self.cnvs.create_oval(200-rect_c[1]/2.,200-rect_c[0]/2.,200+rect_c[1]/2.,200+rect_c[0]/2.,fill=SB1,outline=C0,width=1)]
r = 120
d = 16
self.h_circle = []
for i in range(0,72):
x1 = rect_cn[1]/2. + rotate_2D([0,-r], i*2*np.pi/72)[0] - d/2.
y1 = rect_cn[0]/2. + rotate_2D([0,-r], i*2*np.pi/72)[1] - d/2.
x2 = rect_cn[1]/2. + rotate_2D([0,-r], i*2*np.pi/72)[0] + d/2.
y2 = rect_cn[0]/2. + rotate_2D([0,-r], i*2*np.pi/72)[1] + d/2.
self.h_circle.append(self.cnvs.create_oval(x1, y1, x2, y2, fill=SB1,outline=C0,width=1))
def setState(self, state):
SB1 = '#CCCCCC'
RED = '#FF3030'
GREEN = '#9acd32'
if self.state[0] != state[0]:
self.cnvs.itemconfig(self.h_arrow[self.state[0]],fill = SB1)
self.cnvs.itemconfig(self.h_arrow[state[0]],fill = GREEN)
self.state[0] = state[0]
if self.state[1] != state[1]:
self.cnvs.itemconfig(self.h_circle[self.state[1]],fill = SB1)
self.cnvs.itemconfig(self.h_circle[state[1]],fill = RED)
self.state[1] = state[1]
| 38.868421 | 132 | 0.574475 | 2,459 | 0.832431 | 0 | 0 | 0 | 0 | 0 | 0 | 353 | 0.119499 |
d039ac9e3ce8ea272819341ba9dcf26eae196cff | 2,054 | py | Python | popoff/atom_types.py | pzarabadip/PopOff | 4a9db1ff264ab96196014388721a832aea0f7325 | [
"MIT"
]
| 4 | 2021-06-18T12:22:50.000Z | 2021-12-27T16:00:31.000Z | popoff/atom_types.py | pzarabadip/PopOff | 4a9db1ff264ab96196014388721a832aea0f7325 | [
"MIT"
]
| 1 | 2021-06-27T23:02:23.000Z | 2021-08-02T10:07:46.000Z | popoff/atom_types.py | pzarabadip/PopOff | 4a9db1ff264ab96196014388721a832aea0f7325 | [
"MIT"
]
| 2 | 2021-06-22T10:39:06.000Z | 2021-12-27T17:52:16.000Z | class AtomType():
"""
Class for each atom type.
"""
def __init__( self, atom_type_index, label, element_type, mass, charge, core_shell=None ):
"""
Initialise an instance for each atom type in the structure.
Args:
atom_type_index (int): Integer index for this atom type.
label (str): Label used to identify this atom type.
element_type (str): Elemental symbol for atom type.
mass (float): Mass of the atom type.
charge(float): Charge of the atom type.
core_shell (optional:str): 'core' or 'shell'. Default is None.
Returns:
None
"""
if not isinstance(atom_type_index, int) or isinstance(atom_type_index, bool):
raise TypeError('The atom type index must be an integer.')
if not isinstance(label, str):
raise TypeError('The label must be of type string.')
if not isinstance(element_type, str):
raise TypeError('The element type must be of type string.')
if not isinstance(mass, float):
raise TypeError('The mass must be a float.')
if not isinstance(charge, float):
raise TypeError('The charge must be a float.')
if core_shell not in ['core', 'shell', None]:
raise ValueError('core_shell argument should be "core" or "shell"')
self.atom_type_index = atom_type_index
self.label = label
self.element_type = element_type
self.mass = mass
self.charge = charge
self.formal_charge = charge
self.core_shell = core_shell
@property
def core_shell_string(self):
"""
Defines a string for a comment in a lammps input file format labelling cores/shells.
Args:
None
Returns:
str: Either 'core', 'shell', or '' if core_shell is None.
"""
if self.core_shell is None:
return ''
return self.core_shell
| 36.678571 | 94 | 0.581792 | 2,053 | 0.999513 | 0 | 0 | 387 | 0.188413 | 0 | 0 | 1,046 | 0.50925 |
d03a2031481c07546efab82527c1c70cce0c1ac7 | 641 | py | Python | 30_day_leetcoding_challenge/2021_02/24-Score_of_Parentheses.py | QuenLo/leecode | ce861103949510dc54fd5cb336bd992c40748de2 | [
"MIT"
]
| 6 | 2018-06-13T06:48:42.000Z | 2020-11-25T10:48:13.000Z | 30_day_leetcoding_challenge/2021_02/24-Score_of_Parentheses.py | QuenLo/leecode | ce861103949510dc54fd5cb336bd992c40748de2 | [
"MIT"
]
| null | null | null | 30_day_leetcoding_challenge/2021_02/24-Score_of_Parentheses.py | QuenLo/leecode | ce861103949510dc54fd5cb336bd992c40748de2 | [
"MIT"
]
| null | null | null | class Solution:
def scoreOfParentheses(self, S: str) -> int:
stack, score = [], 0
for s in S:
if s == '(':
stack.append("(")
else:
last = stack[-1]
if last == '(':
stack.pop()
stack.append(1)
# nothing to match
else:
count = 0
while stack[-1] != '(':
count += stack.pop()
stack.pop()
stack.append( count*2 )
return sum(stack)
| 27.869565 | 48 | 0.318253 | 640 | 0.99844 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.046802 |
d03a3dde95a4d151a055d00333559975c2f67791 | 2,116 | py | Python | fastreg/ols.py | ajferraro/fastreg | 32cdb15908480bd8d5a084126968c78b17010189 | [
"MIT"
]
| null | null | null | fastreg/ols.py | ajferraro/fastreg | 32cdb15908480bd8d5a084126968c78b17010189 | [
"MIT"
]
| 1 | 2017-11-28T16:21:09.000Z | 2017-11-28T17:19:04.000Z | fastreg/ols.py | ajferraro/fastreg | 32cdb15908480bd8d5a084126968c78b17010189 | [
"MIT"
]
| 3 | 2017-11-28T16:56:25.000Z | 2021-02-18T18:18:46.000Z | import numpy as np
from scipy import stats
import utils
def fit(xdata, ydata):
"""Calculate 2D regression.
Args:
xdata (numpy.ndarray): 1D array of independent data [ntim],
where ntim is the number of time points (or other independent
points).
ydata (numpy.ndarray): 2D array of dependent data [ntim, nspat],
where nspat is the number of spatial points (or other dependent
points).
Returns:
numpy.ndarray of dimension [5, nspat]. The 5 outputs are: slope,
intercept, Pearson's correlation coefficient, two-sided p-value for
a hypothesis test with null hypothesis that the slope is zero,
standard error for the slope estimate.
"""
# Small number to prevent divide-by-zero errors
TINY = 1.0e-20
# Dimensions
ntim = xdata.shape[0]
nspat = ydata.shape[1]
# Add a constant (1) to the xdata to allow for intercept calculation
xdata_plus_const = utils.add_constant(xdata)
# Calculate parameters of the regression by solving the OLS problem
# in its matrix form
mat1 = np.swapaxes(np.dot(xdata_plus_const.T,
(xdata_plus_const[np.newaxis, :, :])), 0, 1)
mat2 = np.dot(xdata_plus_const.T, ydata)
beta = np.linalg.solve(mat1, mat2.T)
output = beta.T
# Pearson correlation coefficient
xm, ym = xdata-xdata.mean(0), ydata-ydata.mean(0)
r_num = np.dot(xm, ym)
r_den = np.sqrt(stats.ss(xm) * stats.ss(ym))
pearson_r = r_num / r_den
# Two-sided p-value for a hypothesis test whose null hypothesis is that
# the slope is zero.
df = ntim - 2
tval = pearson_r * np.sqrt(df / ((1.0 - pearson_r + TINY) *
(1.0 + pearson_r + TINY)))
pval = stats.distributions.t.sf(np.abs(tval), df)*2
# Standard error of the slope estimate
sst = np.sum(ym ** 2, 0)
ssr = (output[0, :] ** 2) * np.sum(xm ** 2)
se = np.sqrt((1. / df) * (sst - ssr))
stderr = se / np.sqrt(np.sum(xm ** 2))
return np.vstack([output, pearson_r, pval, stderr])
| 33.587302 | 75 | 0.614367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,045 | 0.493856 |
d03b6aeb253fdd06dec81e7fe877f6830639e18f | 796 | py | Python | event/timeout.py | dannl/hunter-sim-classic | e32cccc8431cc3e78b08067dd58e10fec52aac6a | [
"MIT"
]
| null | null | null | event/timeout.py | dannl/hunter-sim-classic | e32cccc8431cc3e78b08067dd58e10fec52aac6a | [
"MIT"
]
| null | null | null | event/timeout.py | dannl/hunter-sim-classic | e32cccc8431cc3e78b08067dd58e10fec52aac6a | [
"MIT"
]
| null | null | null | from event import Event
class BuffTimeOut(Event):
def __init__(self, buff, rotation, engine, char_state, priority):
super().__init__('buff_time_out', priority)
self.buff = buff
self.rotation = rotation
self.engine = engine
self.char_state = char_state
def act(self):
if not self.engine.has_future_timeout(self.buff.name):
# self.buff.timeout(self.engine, self.char_state)
# self.buff.on_going = False
# self.rotation.statistics.add_end(self.buff.name, self.priority)
self.act_force()
def act_force(self):
self.buff.timeout(self.rotation, self.engine, self.char_state)
self.buff.on_going = False
self.rotation.statistics.add_end(self.buff.name, self.priority)
| 33.166667 | 77 | 0.653266 | 769 | 0.96608 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.197236 |
d03c4e907665dac0cd64374cfeb54bcf34b259de | 2,017 | py | Python | server.py | shawkyelshazly1/Chat-App | 7cb27e9ad0e014409407bc7f2053caf406236797 | [
"MIT"
]
| null | null | null | server.py | shawkyelshazly1/Chat-App | 7cb27e9ad0e014409407bc7f2053caf406236797 | [
"MIT"
]
| null | null | null | server.py | shawkyelshazly1/Chat-App | 7cb27e9ad0e014409407bc7f2053caf406236797 | [
"MIT"
]
| null | null | null | import socket
import threading
import json
PORT = 5000
SERVER = socket.gethostbyname(socket.gethostname())
ADDRESS = ('', PORT)
FORMAT = 'utf-8'
clients, names = [], []
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDRESS)
def StartChat():
print(f'server is working on: {SERVER}')
b = b''
server.listen()
while True:
try:
conn, addr = server.accept()
try:
data = conn.recv(1024)
message_data = json.loads(data.decode(FORMAT))
if len(clients) >= 1:
join_message = {
'welcome_message': f'{message_data["username"]} Joined the chat'}
join_send_obj = json.dumps(join_message).encode(FORMAT)
broadcastMessage(join_send_obj)
except:
continue
message = {'welcome_message': 'Connected Successfully!'}
send_obj = json.dumps(message).encode(FORMAT)
conn.send(send_obj)
thread = threading.Thread(target=handle, args=(conn, addr))
thread.daemon = True
thread.start()
clients.append(conn)
print(f'active connections: {threading.active_count()-1}')
except:
continue
def handle(conn, addr):
print(f'new connection {addr}')
connected = True
while connected:
message = conn.recv(1024)
if message:
broadcastMessage(message)
else:
remove_connection(conn)
connected = False
print(f'active connections: {threading.active_count()-1}')
def remove_connection(conn):
for client in clients:
clients.remove(client)
def broadcastMessage(message):
for client in clients:
try:
print(message)
client.send(message)
except:
print('opss')
remove_connection(client)
finally:
continue
StartChat()
| 23.729412 | 89 | 0.562717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 281 | 0.139316 |
d03d74f1871e0c66c83e4056b90ca19bdbdf3fd6 | 99 | py | Python | mhs/outbound/outbound/request/synchronous/__init__.py | tomzo/integration-adaptors | d4f296d3e44475df6f69a78a27fac6ed5b67513b | [
"Apache-2.0"
]
| 15 | 2019-08-06T16:08:12.000Z | 2021-05-24T13:14:39.000Z | mhs/outbound/outbound/request/synchronous/__init__.py | tomzo/integration-adaptors | d4f296d3e44475df6f69a78a27fac6ed5b67513b | [
"Apache-2.0"
]
| 75 | 2019-04-25T13:59:02.000Z | 2021-09-15T06:05:36.000Z | mhs/outbound/outbound/request/synchronous/__init__.py | tomzo/integration-adaptors | d4f296d3e44475df6f69a78a27fac6ed5b67513b | [
"Apache-2.0"
]
| 7 | 2019-11-12T15:26:34.000Z | 2021-04-11T07:23:56.000Z | """Modules related specifically to the handling of synchronous requests from a supplier system."""
| 49.5 | 98 | 0.79798 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.989899 |
d040592f4a74f436af4d264afbb1ca2fae548dc3 | 3,636 | py | Python | WiredQT/examples/Module Example/6.Servo/PIGPIO/frmmain.py | chiptrontech/WiredQTv1.0 | 760948bb736867db4e772031b23ed9151e0364b9 | [
"MIT"
]
| 1 | 2021-12-18T09:17:08.000Z | 2021-12-18T09:17:08.000Z | WiredQT/examples/Module Example/6.Servo/PIGPIO/frmmain.py | chiptrontech/WiredQTv1.0 | 760948bb736867db4e772031b23ed9151e0364b9 | [
"MIT"
]
| null | null | null | WiredQT/examples/Module Example/6.Servo/PIGPIO/frmmain.py | chiptrontech/WiredQTv1.0 | 760948bb736867db4e772031b23ed9151e0364b9 | [
"MIT"
]
| 2 | 2021-12-18T09:15:53.000Z | 2022-01-19T15:10:14.000Z | import gi
import time
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk,GObject,Gdk,Pango,GLib
from wta_module import *
# Generated By WiredGTK for Python: by Rocky Nuarin, 2018 Phils
# #####################www.WireThemAll.com#####################
class Handler(usercontrol):
#WiredEvent def usercontrolevent(self,value) #add more events
#WiredProperty 'usercontrolproperty': 'sample only'
def __init__(self,*param):
initUI(self,param,w=400,h=400,title="WiredGTKV1.0",controlbox=True,startpos=(200,200),timeoutdestroy=-1)
self.GTKForms()
super().__init__(self.usercontrol)
self.sch=Scheduler(500)#500 ms
self.sch.Start()
self._text=''
self._usercontrolproperty=''
@property
def usercontrolproperty(self):
return self._usercontrolproperty
@usercontrolproperty.setter
def usercontrolproperty(self,value):
self._usercontrolproperty=value
def connect(self,ev,evusr):
self.wiredevents.update({ev:evusr})
def activeXcreated(self,*args):
pass
def unload(self,*args):
destroy=True
if destroy==True:
GLib.source_remove(self.timeout_id)
self._window.hide()
del self._window
#ExitApplication() #activate this if u want to destroy this window
return False
else:
self.window.Visible=False
return True
def loop(self, user_data):
if self.form_load==False:
self.form_load=True
if self.sch.Event():#timer routine
#code here
if self.timeoutdestroy!=-1:
self.timeoutdestroy-=1
if self.timeoutdestroy==0:
self.unload(None)
self.sch.Start()#restart scheduler
return True #return true so that main_loop can call it again
def create(self,prop,control,parent,event=[]):
createWidget(self,prop,control,parent,event)
def GTKForms(self):
self.create("{'BackColor': '(0, 0.45338815965065005, 0.401859108611177, 0.5)', 'Text': 'ctlServo1', 'Left': '135', 'Width': '30', 'ParentsType': '', 'Pin': '18', 'Name': 'ctlServo1', 'Tag': 'Activex', 'Top': '100', 'MinDutyCycle': '540', 'ForeColor': '(0,0,0,1)', 'Angle': '90', 'Events': '[]', 'Picture': '', 'Height': '30', 'Enable': 'True', 'Visible': 'True', 'Font': '', 'MaxDutyCycle': '2400'}","Servo","usercontrol","[]")
self.create("{'BackColor': '(0, 0.9944924427369468, 0.012752023212419639, 0.5)', 'Text': 'VScrollBarWTA1', 'Value': '90', 'Left': '50', 'Width': '20', 'ParentsType': '', 'Name': 'VScrollBarWTA1', 'Tag': 'Activex', 'Top': '40', 'ForeColor': '(0,0,0,1)', 'Events': '[change-value]', 'Picture': 'VScrollBarWTA.png', 'Height': '220', 'Enable': 'True', 'Visible': 'True', 'Font': '', 'Max': '180'}","VScrollBarWTA","usercontrol","[['change-value', 'self,value']]")
self.create("{'BackColor': '(1,1,1,1)', 'Text': 'Entry1', 'Left': '85', 'Width': '170', 'ParentsType': '', 'Alignment': '', 'Name': 'Entry1', 'Tag': '', 'Top': '195', 'ForeColor': '(0,0,0,1)', 'Events': '[]', 'Picture': '', 'Height': '30', 'Enable': 'True', 'Visible': 'True', 'Font': ''}","Entry","usercontrol","[]")
def Widget(self):
if self._usercontrol in self._mainlayout.get_children():
self._mainlayout.remove(self._usercontrol)
return self._usercontrol
def Hide(self):
self._window.hide()
def Show(self,modal=False,x=None,y=None):
if x!=None:
self._window.move(x,y)
if modal and self.caller!=None:
self._window.set_transient_for(self.caller._window)
self._window.set_modal(modal)
self._window.show()
Gtk.main()
return ""#put ur return value here upon closing this form
def VScrollBarWTA1_change_value(self,value):
self.Entry1.Text=int(float(value))
self.ctlServo1.write(self.Entry1.Text)
pass
if __name__ == "__main__":
_m = Handler()
_m._window.show()
Gtk.main() | 3,636 | 3,636 | 0.672717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,513 | 0.966172 |
d041f4ae9fd51d426b42247db152f3d516a92484 | 561 | py | Python | slam_recognition/filters/rgby.py | SimLeek/pySILEnT | feec2d1fb654d7c8dc25f610916f4e9b202a1092 | [
"Apache-2.0",
"MIT"
]
| 5 | 2018-11-18T17:35:59.000Z | 2019-02-13T20:25:58.000Z | slam_recognition/filters/rgby.py | SimLeek/slam_recognition | feec2d1fb654d7c8dc25f610916f4e9b202a1092 | [
"Apache-2.0",
"MIT"
]
| 12 | 2018-10-31T01:57:55.000Z | 2019-02-07T05:49:36.000Z | slam_recognition/filters/rgby.py | SimLeek/pySILEnT | feec2d1fb654d7c8dc25f610916f4e9b202a1092 | [
"Apache-2.0",
"MIT"
]
| null | null | null | from slam_recognition.constant_convolutions.center_surround import rgby_3
from slam_recognition.util.get_dimensions import get_dimensions
import tensorflow as tf
def rgby_filter(tensor # type: tf.Tensor
):
n_dimensions = get_dimensions(tensor)
rgby = rgby_3(n_dimensions)
conv_rgby = tf.constant(rgby, dtype=tf.float32, shape=(3, 3, 3, 3))
compiled_rgby = tf.maximum(tf.nn.conv2d(input=tensor, filter=conv_rgby, strides=[1, 1, 1, 1],
padding='SAME'), [0])
return compiled_rgby
| 37.4 | 97 | 0.673797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.040998 |
d042d23ac886c0996046b66ccaa7d239f4bcb644 | 6,293 | py | Python | source/preprocessing/lm_text_generator.py | lzzhaha/self_talk | 238e5583c0f6ca0ed8a4a035b74f366d376bcd6d | [
"Apache-2.0"
]
| 63 | 2020-04-14T03:40:12.000Z | 2022-03-30T07:10:20.000Z | source/preprocessing/lm_text_generator.py | lzzhaha/self_talk | 238e5583c0f6ca0ed8a4a035b74f366d376bcd6d | [
"Apache-2.0"
]
| 2 | 2021-07-10T04:10:18.000Z | 2022-03-22T20:33:18.000Z | source/preprocessing/lm_text_generator.py | lzzhaha/self_talk | 238e5583c0f6ca0ed8a4a035b74f366d376bcd6d | [
"Apache-2.0"
]
| 7 | 2020-12-06T03:22:17.000Z | 2022-03-25T09:27:19.000Z | """
Adapted from https://github.com/huggingface/transformers/blob/master/examples/run_generation.py
"""
import re
import torch
import logging
from typing import List
from collections import defaultdict
from transformers import GPT2Tokenizer, XLNetTokenizer, TransfoXLTokenizer, OpenAIGPTTokenizer
from transformers import GPT2LMHeadModel, XLNetLMHeadModel, TransfoXLLMHeadModel, OpenAIGPTLMHeadModel
logging.basicConfig(
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO)
logger = logging.getLogger(__name__)
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
MODEL_CLASSES = {
'distilgpt2': (GPT2LMHeadModel, GPT2Tokenizer),
'gpt2': (GPT2LMHeadModel, GPT2Tokenizer),
'gpt2-medium': (GPT2LMHeadModel, GPT2Tokenizer),
'gpt2-large': (GPT2LMHeadModel, GPT2Tokenizer),
'gpt2-xl': (GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'xlnet-base-cased': (XLNetLMHeadModel, XLNetTokenizer),
'xlnet-large-cased': (XLNetLMHeadModel, XLNetTokenizer),
'transfo-xl-wt103': (TransfoXLLMHeadModel, TransfoXLTokenizer)
}
class LMTextGenerator:
"""
Generating text with a language model using the HuggingFace implementation.
"""
def __init__(self,
model_name: str,
device: torch.device = torch.device("cpu")) -> None:
logger.info("Loading the language model")
self.model_name = model_name
self.lm_head, self.tokenizer = init_model(model_name, device)
self.device = device
def generate(self,
prefixes: List[str],
p: float = 0.0,
k: float = 0.0,
temperature: float = 1.0,
length: int = 25,
num_samples: int = 1,
stop_token=None):
"""
Generate an ending for the beginning of the text
:param prefixes: text on which the generation is conditioned
:param p: p for nucleus sampling
:param k: k for top k sampling
:param temperature: default = 1
:param length: the maximum length to sample
:param num_samples: how many texts to generate at once
:param stop_token: if this token was generated, it's the end of the generated text.
:return: the text
"""
if "transfo-xl" in self.model_name or "xlnet" in self.model_name:
prefixes = [PADDING_TEXT + prefix for prefix in prefixes]
generated_strings = defaultdict(list)
reduce_spaces = lambda s: ' '.join(s.split())
for index, prefix in enumerate(prefixes):
out = self.generate_texts(
prompt_text=prefix, length=length, temperature=temperature,
k=k, p=p, num_samples=num_samples, stop_token=stop_token)
generated_strings[index] = [reduce_spaces(t) for t in out]
return generated_strings
def generate_texts(self,
length: int,
prompt_text: str,
num_samples: int = 1,
temperature: float = 1.0,
p: float = 0.0,
k: float = 0.0,
stop_token='?'):
"""
Generate an ending for the beginning of the text
:param prompt_text: text on which the generation is conditioned
:param p: p for nucleus sampling
:param temperature: default = 1
:param length: the maximum length to sample
:return: the text
"""
eos_token_ids = self.tokenizer.encode(f"{stop_token} <eop> <eod>", add_special_tokens=False)
if "xlnet" in self.model_name and len(eos_token_ids) > 1:
eos_token_ids = eos_token_ids[1:]
k = k if k > 0 else None
p = p if p > 0 else None
context_tokens = self.tokenizer.encode(prompt_text)
max_length = length + len(context_tokens)
input_ids = torch.tensor(context_tokens, device=self.device).unsqueeze(0)
outputs = self.lm_head.generate(
input_ids=input_ids, max_length=max_length, do_sample=True, temperature=temperature,
num_return_sequences=num_samples, top_p=p, top_k=k, eos_token_ids=eos_token_ids, repetition_penalty=2.0)
if len(outputs.shape) == 3:
outputs = outputs[0]
outputs = outputs[:, len(context_tokens):]
outputs = [self.tokenizer.decode(text, clean_up_tokenization_spaces=True) for text in outputs]
if stop_token is not None:
outputs = [text[:text.find(stop_token)+1] for text in outputs if stop_token in text]
outputs = [re.sub(" +", " ", text).strip() for text in outputs]
outputs = set([text for text in outputs if len(text) > 0])
return outputs
def init_model(model_name: str,
device: str):
"""
Initialize a pre-trained LM
:param model_name: from MODEL_CLASSES
:param device: CUDA / CPU device
:return: the model and tokenizer
"""
logger.info(f'Initializing {model_name}')
model_class, tokenizer_class = MODEL_CLASSES[model_name]
tokenizer = tokenizer_class.from_pretrained(model_name)
model = model_class.from_pretrained(model_name)
model.to(device)
model.eval()
return model, tokenizer
| 39.33125 | 116 | 0.656444 | 3,688 | 0.586048 | 0 | 0 | 0 | 0 | 0 | 0 | 2,423 | 0.385031 |
d045397b0181a2b9e3e22ff62f63868e6793ee64 | 79 | py | Python | src/semaphore/handlers/internal/__init__.py | lsst-sqre/semaphore | 73164618ac0c8d0a03f3122f3ffe8b3820fa43f0 | [
"MIT"
]
| null | null | null | src/semaphore/handlers/internal/__init__.py | lsst-sqre/semaphore | 73164618ac0c8d0a03f3122f3ffe8b3820fa43f0 | [
"MIT"
]
| 2 | 2022-03-08T00:01:27.000Z | 2022-03-28T17:45:01.000Z | src/semaphore/handlers/internal/__init__.py | lsst-sqre/semaphore | 73164618ac0c8d0a03f3122f3ffe8b3820fa43f0 | [
"MIT"
]
| null | null | null | from .handlers import router as internal_router
__all__ = ["internal_router"]
| 19.75 | 47 | 0.797468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.21519 |
d04578120df1707824a754d31bbc073113fe0980 | 440 | py | Python | Python_ABC/2-7dictionary/countLetter.py | Chandler-Song/Python_Awesome | a44b8b79de7b429a00ac5798e7ecdc26c79a09ed | [
"MIT"
]
| null | null | null | Python_ABC/2-7dictionary/countLetter.py | Chandler-Song/Python_Awesome | a44b8b79de7b429a00ac5798e7ecdc26c79a09ed | [
"MIT"
]
| null | null | null | Python_ABC/2-7dictionary/countLetter.py | Chandler-Song/Python_Awesome | a44b8b79de7b429a00ac5798e7ecdc26c79a09ed | [
"MIT"
]
| null | null | null | import pprint
# message
message = '''
Books and doors are the same thing books.
You open them, and you go through into another world.
'''
# split message to words into a list
words = message.split()
# define dictionary counter
count = {}
# traverse every word and accumulate
for word in words:
if not word[-1].isalpha():
word = word[:-1]
word = word.lower()
count.setdefault(word, 0)
count[word] +=1
# print
pprint.pprint(count) | 18.333333 | 53 | 0.702273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.497727 |
d0458729a238b401f69f7c90aa4a7ae3169c62fa | 1,959 | py | Python | clouddq/classes/dq_rule.py | charleskubicek/cloud-data-quality | 0f7dee7daeaaf974203ae154fc7ba729c05cee5d | [
"Apache-2.0"
]
| 1 | 2021-11-07T15:03:33.000Z | 2021-11-07T15:03:33.000Z | clouddq/classes/dq_rule.py | ant-laz/cloud-data-quality | 2f41ec692e7a0ec8988bb8e715fcf5e926fa47be | [
"Apache-2.0"
]
| null | null | null | clouddq/classes/dq_rule.py | ant-laz/cloud-data-quality | 2f41ec692e7a0ec8988bb8e715fcf5e926fa47be | [
"Apache-2.0"
]
| null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""todo: add classes docstring."""
from __future__ import annotations
from dataclasses import dataclass
from clouddq.classes.rule_type import RuleType
@dataclass
class DqRule:
""" """
rule_id: str
rule_type: RuleType
params: dict | None = None
def resolve_sql_expr(self: DqRule) -> str:
return self.rule_type.to_sql(self.params).safe_substitute()
@classmethod
def from_dict(cls: DqRule, rule_id: str, kwargs: dict) -> DqRule:
"""
Args:
cls: DqRule:
rule_id: str:
kwargs: typing.Dict:
Returns:
"""
rule_type: RuleType = RuleType(kwargs.get("rule_type", ""))
params: dict = kwargs.get("params", dict())
return DqRule(rule_id=str(rule_id), rule_type=rule_type, params=params)
def to_dict(self: DqRule) -> dict:
"""
Args:
self: DqRule:
Returns:
"""
return dict(
{
f"{self.rule_id}": {
"rule_type": self.rule_type.name,
"params": self.params,
"rule_sql_expr": self.resolve_sql_expr(),
}
}
)
def dict_values(self: DqRule) -> dict:
"""
Args:
self: DqRule:
Returns:
"""
return dict(self.to_dict().get(self.rule_id))
| 23.890244 | 79 | 0.597754 | 1,216 | 0.620725 | 0 | 0 | 1,227 | 0.62634 | 0 | 0 | 948 | 0.48392 |
d0461663494469b9d6df96b164f06712f149aa53 | 749 | py | Python | py/minsk/analysis/syntax/expressions/parenthesized.py | Phytolizer/Minsk | 5fb59f0d5d8cf1f0046471b91e2f5d0c41fc874c | [
"MIT"
]
| null | null | null | py/minsk/analysis/syntax/expressions/parenthesized.py | Phytolizer/Minsk | 5fb59f0d5d8cf1f0046471b91e2f5d0c41fc874c | [
"MIT"
]
| 1 | 2022-03-23T03:34:48.000Z | 2022-03-24T06:47:30.000Z | py/minsk/analysis/syntax/expressions/parenthesized.py | Phytolizer/Minsk | 5fb59f0d5d8cf1f0046471b91e2f5d0c41fc874c | [
"MIT"
]
| null | null | null | from dataclasses import dataclass
from typing import Iterable
from minsk.analysis.syntax.expression import ExpressionSyntax
from minsk.analysis.syntax.kind import SyntaxKind
from minsk.analysis.syntax.node import SyntaxNode
from minsk.analysis.syntax.token import SyntaxToken
@dataclass(frozen=True)
class ParenthesizedExpressionSyntax(ExpressionSyntax):
open_parenthesis_token: SyntaxToken
expression: ExpressionSyntax
close_parenthesis_token: SyntaxToken
@property
def kind(self) -> SyntaxKind:
return SyntaxKind.ParenthesizedExpression
@property
def children(self) -> Iterable[SyntaxNode]:
yield self.open_parenthesis_token
yield self.expression
yield self.close_parenthesis_token
| 29.96 | 61 | 0.791722 | 445 | 0.594126 | 158 | 0.210948 | 469 | 0.626168 | 0 | 0 | 0 | 0 |
d04a7bba3d57ad48f159bb585e370285252259ef | 3,113 | py | Python | src/peachyprintertools.py | PeachyPrinter/tkpeachyprinter | d88dcb4891d19c4b81a7f4f072e120d05c02124c | [
"Apache-2.0"
]
| 1 | 2017-03-08T02:48:19.000Z | 2017-03-08T02:48:19.000Z | src/peachyprintertools.py | PeachyPrinter/tkpeachyprinter | d88dcb4891d19c4b81a7f4f072e120d05c02124c | [
"Apache-2.0"
]
| null | null | null | src/peachyprintertools.py | PeachyPrinter/tkpeachyprinter | d88dcb4891d19c4b81a7f4f072e120d05c02124c | [
"Apache-2.0"
]
| 6 | 2016-05-12T04:10:18.000Z | 2020-02-15T09:55:00.000Z | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import logging
from peachyprinter import config, PrinterAPI
import argparse
import os
import sys
import time
from Tkinter import *
from ui.main_ui import MainUI
class PeachyPrinterTools(Tk):
def __init__(self, parent, path):
Tk.__init__(self, parent)
self.path = path
self.geometry("800x700")
self.title('Peachy Printer Tools')
if sys.platform != 'darwin':
self.setup_icon()
self.parent = parent
self._api = PrinterAPI()
self.start_main_window()
self.protocol("WM_DELETE_WINDOW", self.close)
def start_main_window(self):
MainUI(self, self._api)
def setup_icon(self):
img_file = os.path.join(self.path, 'resources', 'peachy.gif')
img = PhotoImage(file=img_file)
self.tk.call('wm', 'iconphoto', self._w, img)
def close(self):
self.destroy()
sys.exit(0)
def setup_logging(args):
if args.devmode:
timestr = time.strftime("%Y-%m-%d-%H%M%S")
logfile = os.path.join(config.PEACHY_PATH, 'peachyprinter-%s.log' % timestr)
else:
logfile = os.path.join(config.PEACHY_PATH, 'peachyprinter.log')
if os.path.isfile(logfile):
os.remove(logfile)
logging_format = '%(levelname)s: %(asctime)s %(module)s - %(message)s'
logging_level = getattr(logging, args.loglevel.upper(), "WARNING")
if not isinstance(logging_level, int):
raise ValueError('Invalid log level: %s' % args.loglevel)
if args.console:
rootLogger = logging.getLogger()
logFormatter = logging.Formatter(logging_format)
fileHandler = logging.FileHandler(logfile)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging_level)
else:
logging.basicConfig(filename=logfile, format=logging_format, level=logging_level)
if __name__ == "__main__":
if not os.path.exists(config.PEACHY_PATH):
os.makedirs(config.PEACHY_PATH)
parser = argparse.ArgumentParser("Configure and print with Peachy Printer")
parser.add_argument('-l', '--log', dest='loglevel', action='store', required=False, default="WARNING", help="Enter the loglevel [DEBUG|INFO|WARNING|ERROR] default: WARNING")
parser.add_argument('-c', '--console', dest='console', action='store_true', required=False, help="Logs to console not file")
parser.add_argument('-d', '--development', dest='devmode', action='store_true', required=False, help="Enable Developer Testing Mode")
args, unknown = parser.parse_known_args()
setup_logging(args)
if args.devmode:
config.devmode = True
if getattr(sys, 'frozen', False):
path = os.path.dirname(sys.executable)
else:
path = os.path.dirname(os.path.realpath(__file__))
app = PeachyPrinterTools(None, path)
app.title('Peachy Printer Tools')
app.mainloop()
| 34.208791 | 186 | 0.666881 | 745 | 0.239319 | 0 | 0 | 0 | 0 | 0 | 0 | 598 | 0.192098 |
d04d2d19a25223c8c1cc1c6c129d213851622ac0 | 813 | py | Python | db/db_create.py | dafarz/base-service | 95791beac06c1ac58e0fa2050aa2cf3a3a22d8d7 | [
"MIT"
]
| null | null | null | db/db_create.py | dafarz/base-service | 95791beac06c1ac58e0fa2050aa2cf3a3a22d8d7 | [
"MIT"
]
| null | null | null | db/db_create.py | dafarz/base-service | 95791beac06c1ac58e0fa2050aa2cf3a3a22d8d7 | [
"MIT"
]
| null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from env_variables import SQL_ALCHEMY_URL
_db_url_without_db = '/'.join(SQL_ALCHEMY_URL.split('/')[:-1])
engine = create_engine(f'{_db_url_without_db}', isolation_level='AUTOCOMMIT', echo=True)
Session = sessionmaker(engine)
def create_database():
db_name = SQL_ALCHEMY_URL.split('/')[-1]
create_database_statement = f"""SELECT 'CREATE DATABASE {db_name}'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '{db_name}')"""
with Session() as con:
result = con.execute(create_database_statement).fetchone()
if result:
con.execute(result)
else:
print(f'{db_name} already exists')
if __name__ == '__main__':
create_database()
| 32.52 | 109 | 0.676507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.282903 |
d04dee56b2ed832a66c6149983ca467bfbbbbafc | 404 | py | Python | [5]-RailFence-Cipher.py | anuj0809/Fundamentals-Of-Cryptography | e6f88dcbd5f63f6938ddf5825bf9395d5ede9fe1 | [
"Apache-2.0"
]
| null | null | null | [5]-RailFence-Cipher.py | anuj0809/Fundamentals-Of-Cryptography | e6f88dcbd5f63f6938ddf5825bf9395d5ede9fe1 | [
"Apache-2.0"
]
| null | null | null | [5]-RailFence-Cipher.py | anuj0809/Fundamentals-Of-Cryptography | e6f88dcbd5f63f6938ddf5825bf9395d5ede9fe1 | [
"Apache-2.0"
]
| null | null | null | def threeRailEncrypt(plainText):
plainText = plainText.lower()
cipherText = ""
rail1 = ""
rail2 = ""
rail3 = ""
for i in range(len(plainText)):
if i%3 == 0:
rail1 += plainText[i]
elif i%3 == 1:
rail2 += plainText[i]
else:
rail3 += plainText[i]
cipherText = rail1 + rail2 + rail3
return cipherText
print(threeRailEncrypt("Vineet"))
| 19.238095 | 37 | 0.564356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.039604 |
d04e92b69338a9a744afe83b7964f2f2ce880ffe | 2,382 | py | Python | util/data.py | arturb90/nl2pl | 2cd37bdd7c6f9f99349f1235001a1755ba169f4a | [
"MIT"
]
| null | null | null | util/data.py | arturb90/nl2pl | 2cd37bdd7c6f9f99349f1235001a1755ba169f4a | [
"MIT"
]
| null | null | null | util/data.py | arturb90/nl2pl | 2cd37bdd7c6f9f99349f1235001a1755ba169f4a | [
"MIT"
]
| 1 | 2021-07-16T09:21:15.000Z | 2021-07-16T09:21:15.000Z | import torch
from random import random
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset
def collate_fn(batch):
'''
Batch-wise preprocessing and padding.
:param batch: the current batch.
:returns: padded sources, targets, alignments
stacks and corresponding real lengths.
'''
sources, targets, alignments, \
stacks, stack_lens = zip(*batch)
src_lens = [len(src) for src in sources]
tgt_lens = [len(tgt) for tgt in targets]
source_pad = pad_sequence(sources, padding_value=0)
target_pad = pad_sequence(targets, padding_value=0)
align_pad = pad_sequence(alignments, padding_value=0)
max_stack_len = max(s.size(1) for s in stacks)
max_target_len = target_pad.size(0)
# Must be send to device.
stack_pad = torch.zeros(
[len(batch),
max_target_len,
max_stack_len]
).long()
for i in range(len(batch)):
stack = stacks[i]
stack_pad[i, :stack.size(0), :stack.size(1)] = stack
# Padding value is 1, for stacks that only contain
# start-of-sequence token. Ignored during forward pass
# since it corresponds to decoder padding targets.
stack_lens = pad_sequence(stack_lens, padding_value=1)
stack_lens = stack_lens.tolist()
return (source_pad,
target_pad,
src_lens,
tgt_lens,
align_pad,
stack_pad,
stack_lens)
class Dataset(Dataset):
'''
Pytorch dataset object.
'''
def __init__(self, dataset, device, mask_ratio=0):
self.data = dataset
self.device = device
self.mask_ratio = mask_ratio
def __len__(self):
return len(self.data)
def __getitem__(self, i):
sample = self.data[i]
x = torch.LongTensor(sample['src_i']).to(self.device)
y = torch.LongTensor(sample['tgt_i']).to(self.device)
align = torch.LongTensor(sample['alignment']).to(self.device)
stacks = torch.LongTensor(sample['value_stacks']).to(self.device)
stack_lens = torch.LongTensor(sample['stack_lens'])
if self.mask_ratio:
for i in range(1, len(x)-1):
if random() <= self.mask_ratio:
# 3 is <UNK> token.
x[i] = 3
return x, y, align, stacks, stack_lens
| 27.697674 | 73 | 0.615869 | 897 | 0.376574 | 0 | 0 | 0 | 0 | 0 | 0 | 495 | 0.207809 |
d04f4962f6074bb45c68186f6ea8419d8e3eca30 | 649 | py | Python | stocklab/core/crawler.py | hchsiao/stocklab | 9c8f786249d5fd5eb954829f63549f0635a428ff | [
"MIT"
]
| 1 | 2020-03-26T14:49:31.000Z | 2020-03-26T14:49:31.000Z | stocklab/core/crawler.py | syoukore/stocklab | 9c8f786249d5fd5eb954829f63549f0635a428ff | [
"MIT"
]
| null | null | null | stocklab/core/crawler.py | syoukore/stocklab | 9c8f786249d5fd5eb954829f63549f0635a428ff | [
"MIT"
]
| 1 | 2020-06-16T16:56:23.000Z | 2020-06-16T16:56:23.000Z | from . import StocklabObject
class Crawler(StocklabObject):
"""The base class for stocklab Crawlers."""
def __init__(self):
super().__init__()
class CrawlerTrigger(Exception):
"""
A `Node` will raise this exception when the required data is not locally
available (e.g. not in the database). This object will also carry the
parameters for the corresponding crawler function
(`Crawler.crawler_entry`).
"""
def __init__(self, **kwargs):
super().__init__()
self.kwargs = kwargs
def __str__(self):
# TODO: Expose more information?
return f'CrawlerTrigger {self.kwargs}'
| 29.5 | 76 | 0.665639 | 616 | 0.949153 | 0 | 0 | 0 | 0 | 0 | 0 | 354 | 0.545455 |
d04fcbb17bf03785d02041e016915bdc95d6404c | 4,514 | py | Python | tiddlyweb/web/validator.py | tiddlyweb/tiddlyweb | 376bcad280e24d2de4d74883dc4d8369abcb2c28 | [
"BSD-3-Clause"
]
| 57 | 2015-02-01T21:03:34.000Z | 2021-12-25T12:02:31.000Z | tiddlyweb/web/validator.py | tiddlyweb/tiddlyweb | 376bcad280e24d2de4d74883dc4d8369abcb2c28 | [
"BSD-3-Clause"
]
| 6 | 2016-02-05T11:43:32.000Z | 2019-09-05T13:38:49.000Z | tiddlyweb/web/validator.py | tiddlyweb/tiddlyweb | 376bcad280e24d2de4d74883dc4d8369abcb2c28 | [
"BSD-3-Clause"
]
| 17 | 2015-05-12T08:53:23.000Z | 2021-12-21T15:56:30.000Z | """
A collection of routines for validating, santizing and otherwise messing
with content coming in from the web to be :py:class:`tiddlers
<tiddlyweb.model.tiddler.Tidder>`, :py:class:`bags
<tiddlyweb.model.bag.Bag>` or :py:class:`recipes
<tiddlyweb.model.recipe.Recipe>`.
The validators can be extended by adding functions to the ``BAG_VALIDATORS``,
``RECIPE_VALIDATORS`` and ``TIDDLER_VALIDATORS``. The functions take an
entity object, and an optional WSGI ``environ`` dict.
"""
class InvalidTiddlerError(Exception):
"""
The provided :py:class:`tiddler <tiddlyweb.model.tiddler.Tiddler>`
has not passed a validation routine and has been rejected.
The caller should stop processing and return an error to calling
code or user-agent.
"""
pass
class InvalidBagError(Exception):
"""
The provided :py:class:`bag <tiddlyweb.model.bag.Bag>` has not passed
a validation routine and has been rejected. The caller should stop
processing and return an error to calling code or user-agent.
"""
pass
class InvalidRecipeError(Exception):
"""
The provided :py:class:`recipe <tiddlyweb.model.recipe.Recipe>` has
not passed a validation routine and has been rejected. The caller
should stop processing and return an error to calling code or
user-agent.
"""
pass
def sanitize_desc(entity, environ):
"""
Strip any dangerous HTML which may be present in a :py:class:`bag
<tiddlyweb.model.bag.Bag>` or :py:class:`recipe
<tiddlyweb.model.recipe.Recipe>` description.
"""
desc = entity.desc
entity.desc = sanitize_html_fragment(desc)
BAG_VALIDATORS = [
sanitize_desc,
]
TIDDLER_VALIDATORS = []
RECIPE_VALIDATORS = [
sanitize_desc,
]
def validate_tiddler(tiddler, environ=None):
"""
Pass the :py:class:`tiddler <tiddlyweb.model.tiddler.Tiddler>`
to each of the functions in ``TIDDLER_VALIDATORS``, in order,
either changing the content of the tiddler's attributes, or if
some aspect of the tiddler can not be accepted raising
:py:class:`InvalidTiddlerError`.
``TIDDLER_VALIDATORS`` is an empty list which may be extended
by plugins.
``validate_tiddler`` is called from :py:mod:`web handlers
<tiddlyweb.web.handler>`, when the ``accept`` constraint on
the :py:class:`policy <tiddlyweb.model.policy.Policy>` of the
:py:class:`bag <tiddlyweb.model.bag.Bag>` containing the
tiddler does not pass.
"""
_validate(tiddler, environ, TIDDLER_VALIDATORS)
def validate_bag(bag, environ=None):
"""
Pass the :py:class:`bag <tiddlyweb.model.bag.Bag>` to each of
the functions in ``BAG_VALIDATORS``, in order, either changing
the content of the bags's attributes, or if some aspect of the
bag can not be accepted raising :py:class:`InvalidBagError`.
``BAG_VALIDATORS`` may be extended by plugins.
``validate_bag`` is called whenever a bag is ``PUT`` via HTTP.
"""
_validate(bag, environ, BAG_VALIDATORS)
def validate_recipe(recipe, environ=None):
"""
Pass the :py:class:`recipe <tiddlyweb.model.recipe.Recipe>` to
each of the functions in ``RECIPE_VALIDATORS``, in order, either
changing the content of the recipes's attributes, or if some aspect
of the recipe can not be accepted raising :py:class:`InvalidRecipeError`.
``RECIPE_VALIDATORS`` may be extended by plugins.
``validate_recipe`` is called whenever a recipe is ``PUT`` via HTTP.
"""
_validate(recipe, environ, RECIPE_VALIDATORS)
def _validate(entity, environ, validators):
"""
Validate the provided entity against the list of functions
in validators.
"""
if environ is None:
environ = {}
for validator in validators:
validator(entity, environ)
def sanitize_html_fragment(fragment):
"""
Santize an HTML ``fragment``, returning a copy of the fragment
that has been cleaned up.
"""
if fragment:
import html5lib
from html5lib.sanitizer import HTMLSanitizer
from html5lib.serializer.htmlserializer import HTMLSerializer
parser = html5lib.HTMLParser(tokenizer=HTMLSanitizer)
parsed = parser.parseFragment(fragment)
walker = html5lib.treewalkers.getTreeWalker('etree')
stream = walker(parsed)
serializer = HTMLSerializer(quote_attr_values=True,
omit_optional_tags=False)
output = serializer.render(stream)
return output
else:
return fragment
| 31.347222 | 77 | 0.699158 | 843 | 0.186752 | 0 | 0 | 0 | 0 | 0 | 0 | 3,060 | 0.677891 |
d050c2f9fe46941d4dbe952021eec4b5d9528020 | 6,548 | py | Python | mth5/io/lemi424.py | kujaku11/mth5 | b7681335871f3cd1b652276fd93c08554c7538ff | [
"MIT"
]
| 5 | 2021-01-08T23:38:47.000Z | 2022-03-31T14:13:47.000Z | mth5/io/lemi424.py | kujaku11/mth5 | b7681335871f3cd1b652276fd93c08554c7538ff | [
"MIT"
]
| 76 | 2020-09-04T02:35:19.000Z | 2022-03-31T22:18:09.000Z | mth5/io/lemi424.py | kujaku11/mth5 | b7681335871f3cd1b652276fd93c08554c7538ff | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue May 11 15:31:31 2021
:copyright:
Jared Peacock ([email protected])
:license: MIT
"""
from pathlib import Path
import pandas as pd
import numpy as np
import logging
from mth5.timeseries import ChannelTS, RunTS
from mt_metadata.timeseries import Station, Run
class LEMI424:
"""
Read in a LEMI424 file, this is a place holder until IRIS finalizes
their reader.
"""
def __init__(self, fn=None):
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
self.fn = fn
self._has_data = False
self.sample_rate = 1.0
self.chunk_size = 10000
self.column_names = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"bx",
"by",
"bz",
"temperature_e",
"temperature_h",
"e1",
"e2",
"e3",
"e4",
"battery",
"elevation",
"latitude",
"lat_hemisphere",
"longitude",
"lon_hemisphere",
"n_satellites",
"gps_fix",
"tdiff",
]
if self.fn:
self.read()
@property
def fn(self):
return self._fn
@fn.setter
def fn(self, value):
if value is not None:
value = Path(value)
if not value.exists():
raise IOError(f"Could not find {value}")
self._fn = value
@property
def start(self):
if self._has_data:
return "T".join(
[
"-".join(
[
f"{self._df.year.min()}",
f"{self._df.month.min():02d}",
f"{self._df.day.min():02d}",
]
),
":".join(
[
f"{self._df.hour.min():02d}",
f"{self._df.minute.min():02d}",
f"{self._df.second.min():02d}",
]
),
]
)
@property
def end(self):
if self._has_data:
return "T".join(
[
"-".join(
[
f"{self._df.year.max()}",
f"{self._df.month.max():02d}",
f"{self._df.day.max():02d}",
]
),
":".join(
[
f"{self._df.hour.max():02d}",
f"{self._df.minute.max():02d}",
f"{self._df.second.max():02d}",
]
),
]
)
@property
def latitude(self):
if self._has_data:
return np.rad2deg(self._df.latitude.median() / 3600)
@property
def longitude(self):
if self._has_data:
return np.rad2deg(self._df.longitude.median() / 3600)
@property
def elevation(self):
if self._has_data:
return self._df.elevation.median()
@property
def gps_lock(self):
if self._has_data:
return self._df.gps_fix.values
@property
def station_metadata(self):
s = Station()
if self._has_data:
s.location.latitude = self.latitude
s.location.longitude = self.longitude
s.location.elevation = self.elevation
s.time_period.start = self.start
s.time_period.end = self.end
return s
@property
def run_metadata(self):
r = Run()
r.sample_rate = self.sample_rate
r.data_logger.model = "LEMI424"
r.data_logger.manufacturer = "LEMI"
if self._has_data:
r.data_logger.power_source.voltage.start = self._df.battery.max()
r.data_logger.power_source.voltage.end = self._df.battery.min()
r.time_period.start = self.start
r.time_period.end = self.end
def read(self, fn=None):
"""
Read a LEMI424 file using pandas
:param fn: DESCRIPTION, defaults to None
:type fn: TYPE, optional
:return: DESCRIPTION
:rtype: TYPE
"""
if fn is not None:
self.fn = fn
if not self.fn.exists():
msg = "Could not find file %s"
self.logger.error(msg, self.fn)
raise IOError(msg % self.fn)
self._df = pd.read_csv(self.fn, delimiter="\s+", names=self.column_names)
self._has_data = True
def to_run_ts(self, fn=None, e_channels=["e1", "e2"]):
"""
Return a RunTS object from the data
:param fn: DESCRIPTION, defaults to None
:type fn: TYPE, optional
:return: DESCRIPTION
:rtype: TYPE
"""
ch_list = []
for comp in (
["bx", "by", "bz"] + e_channels + ["temperature_e", "temperature_h"]
):
if comp[0] in ["h", "b"]:
ch = ChannelTS("magnetic")
elif comp[0] in ["e"]:
ch = ChannelTS("electric")
else:
ch = ChannelTS("auxiliary")
ch.sample_rate = self.sample_rate
ch.start = self.start
ch.ts = self._df[comp].values
ch.component = comp
ch_list.append(ch)
return RunTS(
array_list=ch_list,
station_metadata=self.station_metadata,
run_metadata=self.run_metadata,
)
# =============================================================================
# define the reader
# =============================================================================
def read_lemi424(fn, e_channels=["e1", "e2"], logger_file_handler=None):
"""
Read a LEMI 424 TXT file.
:param fn: input file name
:type fn: string or Path
:param e_channels: A list of electric channels to read,
defaults to ["e1", "e2"]
:type e_channels: list of strings, optional
:return: A RunTS object with appropriate metadata
:rtype: :class:`mth5.timeseries.RunTS`
"""
txt_obj = LEMI424()
if logger_file_handler:
txt_obj.logger.addHandler(logger_file_handler)
txt_obj.read(fn)
return txt_obj.to_run_ts(e_channels=e_channels)
| 27.170124 | 81 | 0.464875 | 5,457 | 0.833384 | 0 | 0 | 2,902 | 0.443189 | 0 | 0 | 1,894 | 0.289249 |
d050d5f902907c952287689dc0a4c79b3535eea2 | 4,895 | py | Python | preprocessing/encoder.py | mjlaali/housing-model | 8f0286a4b1909b7e0218d9a8f1340b95d5b9463d | [
"Apache-2.0"
]
| null | null | null | preprocessing/encoder.py | mjlaali/housing-model | 8f0286a4b1909b7e0218d9a8f1340b95d5b9463d | [
"Apache-2.0"
]
| 3 | 2020-11-13T18:43:28.000Z | 2022-02-10T01:18:05.000Z | preprocessing/encoder.py | mjlaali/housing_model | 8f0286a4b1909b7e0218d9a8f1340b95d5b9463d | [
"Apache-2.0"
]
| null | null | null | import abc
import logging
import os
import pickle
from collections import Counter
from datetime import datetime
from typing import List, Union
import numpy as np
_logger = logging.getLogger(__name__)
class Transformation(abc.ABC):
@abc.abstractmethod
def analyze(self, raw: object) -> object:
pass
@abc.abstractmethod
def process(self, raw: object) -> object:
pass
def save(self):
pass
class StatelessTransformation(Transformation, abc.ABC):
def analyze(self, raw: object) -> object:
return self.process(raw)
def save(self):
pass
class WhitespaceTokenizer(StatelessTransformation):
def process(self, raw: str) -> list:
return raw.split(" ")
class CategoricalFeature(Transformation):
UNK = "unk"
def __init__(self, vocab_file, num_values):
self._vocab_file = vocab_file
self._token_id = {self.UNK: 0}
self._num_values = num_values
self._if_analyze = not os.path.exists(vocab_file)
if self._if_analyze:
self._vocabs = Counter()
else:
with open(self._vocab_file, "rb") as fin:
self._vocabs = pickle.load(fin)
_logger.warning(
f"The vocab file {vocab_file} already exist, hence, vocabs will not be computed again"
)
def analyze(self, raw: list) -> list:
res = []
for a_token in raw:
if self._if_analyze:
self._vocabs[a_token] += 1
res.append(0)
return res
def process(self, raw: list) -> list:
res = []
for a_token in raw:
token_id = self._token_id.get(a_token)
if token_id is None:
token_id = self._token_id[self.UNK]
res.append(token_id)
return res
def save(self):
if self._if_analyze:
with open(self._vocab_file, "wb") as fout:
pickle.dump(self._vocabs, fout)
total_tokens = sum(self._vocabs.values())
considered_token = 0
for token, freq in self._vocabs.most_common(self._num_values):
considered_token += freq
self._token_id[token] = len(self._token_id)
_logger.info(
f"{self._vocab_file} covers {considered_token/total_tokens:.2f} of tokens."
)
class ToList(StatelessTransformation):
def process(self, raw: object) -> list:
return [raw]
class Lowercase(StatelessTransformation):
def process(self, raw: str) -> str:
return raw.lower()
class DateTransformer(StatelessTransformation):
def __init__(self, template_format: str, base: str):
self._template_format = template_format
self._base = datetime.strptime(base, template_format).date()
def process(self, raw: str) -> int:
delta = datetime.strptime(raw, self._template_format).date() - self._base
return delta.days
class Scale(StatelessTransformation):
def __init__(self, scale: Union[float, int]):
self._scale = scale
def process(self, raw: float) -> float:
return self._scale * raw
class PositionEncoder(StatelessTransformation):
def __init__(self, dim, scale):
i = 2 * np.arange(dim) // 2
self._angle_rates = 1 / np.power(scale, i / np.float32(dim))
def process(self, raw: int) -> list:
angle_rads = raw * self._angle_rates
# apply sin to even indices in the array; 2i
angle_rads[0::2] = np.sin(angle_rads[0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[1::2] = np.cos(angle_rads[1::2])
return angle_rads
class Encoder(object):
def __init__(self, transformations: List[Transformation], dtype: str, dim: int):
self._transformations = transformations
self._mode = "analyze"
self._dtype = dtype
self._dim = dim
def __call__(self, raw_input):
assert raw_input is not None
in_val = raw_input
out_val = None
for a_transformation in self._transformations:
op = getattr(a_transformation, self._mode)
out_val = op(in_val)
if out_val is None:
raise ValueError(
f"{type(a_transformation)} generates None value for {in_val}"
)
in_val = out_val
try:
feature_value = np.asarray(out_val, dtype=self._dtype)
except TypeError as e:
raise ValueError(f"{out_val} is not compatible with {self._dtype}") from e
assert len(feature_value.shape) == self._dim
return feature_value
def save(self):
for a_transformation in self._transformations:
a_transformation.save()
self._mode = "process"
@property
def dtype(self):
return self._dtype
@property
def dim(self):
return self._dim
| 28.459302 | 102 | 0.614913 | 4,663 | 0.952605 | 0 | 0 | 266 | 0.054341 | 0 | 0 | 394 | 0.08049 |
d05258e114b5375cb6f9a5865f707d6098504caa | 27 | py | Python | tests/unittests/__init__.py | p33t00/py-game | 9a77dc0daf3def1f17d97887281078d8a0918090 | [
"MIT"
]
| null | null | null | tests/unittests/__init__.py | p33t00/py-game | 9a77dc0daf3def1f17d97887281078d8a0918090 | [
"MIT"
]
| null | null | null | tests/unittests/__init__.py | p33t00/py-game | 9a77dc0daf3def1f17d97887281078d8a0918090 | [
"MIT"
]
| null | null | null | """Initialize unittest."""
| 13.5 | 26 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.962963 |
d0526bab2f2fcce625c5809ae54737f104402629 | 2,402 | py | Python | tests/test_anglicize.py | hugovk/python-anglicize | 1284ec72026f78d56ff5e995328547565ddb4f0b | [
"BSD-2-Clause"
]
| 1 | 2020-03-08T09:33:14.000Z | 2020-03-08T09:33:14.000Z | tests/test_anglicize.py | hugovk/python-anglicize | 1284ec72026f78d56ff5e995328547565ddb4f0b | [
"BSD-2-Clause"
]
| 2 | 2020-03-08T16:45:08.000Z | 2020-03-08T20:34:04.000Z | tests/test_anglicize.py | hugovk/python-anglicize | 1284ec72026f78d56ff5e995328547565ddb4f0b | [
"BSD-2-Clause"
]
| 1 | 2020-03-08T16:33:22.000Z | 2020-03-08T16:33:22.000Z | import pytest
from pytest import param as p
from anglicize import anglicize, build_mapping
@pytest.mark.parametrize(
"text, expected",
[
p("Abc 123", "Abc 123", id="noop"),
p("ĂaÂâÎîȘșȚț", "AaAaIiSsTt", id="romanian"),
p("ĄąĆćĘꣳŃńŹźŻż", "AaCcEeLlNnZzZz", id="polish"),
p("ÁáÉéÍíÓóÖöŐőÚúÜüŰű", "AaEeIiOoOoOoUuUuUu", id="hungarian"),
p("ÀàÆæÇçÊêËëÈèÉéÏïÔôŒœÙùÛûŸÿ", "AaAaCcEeEeEeEeIiOoOoUuUuYy", id="french"),
p("ÁáÉéÍíÓóÑñÚúÝý", "AaEeIiOoNnUuYy", id="spanish"),
p("ÁáÂâÃãÀàÇçÉéÊêÍíÓóÔôÕõÚú", "AaAaAaAaCcEeEeIiOoOoOoUu", id="portuguese"),
# Don't be fooled by the similarities, these four swear they speak diferent languages:
p("ĆćČčĐ𩹮ž", "CcCcDdSsZz", id="bosnian-croatian-montenegrin-serbian"),
p("ÇçËë", "CcEe", id="albanian"),
p("ßÄäÖöÜü", "sAaOoUu", id="german"),
p("IJij", "Ii", id="dutch"),
p("Ëë", "Ee", id="luxembourgish"),
p("ÐðÉéÓóÚúÝýÞþÆæÖö", "DdEeOoUuYyPpAaOo", id="icelandic"),
p("ÆæÅ娸ÉéÈèÊêÓóÒòÂâÔô", "AaAaOoEeEeEeOoOoAaOo", id="norwegian"),
p("ÅåÄäÖö", "AaAaOo", id="swedish"),
p("ÅåÄäÖöŠšŽž", "AaAaOoSsZz", id="finnish"),
p("ŠšŽžÄäÖöÜü", "SsZzAaOoUu", id="estonian"),
p("ĀāČčĒēĢģĪīĶķĻļŅņŠšŪūŽž", "AaCcEeGgIiKkLlNnSsUuZz", id="latvian"),
p("ĄąČčĖėĘęĮįŠšŲųŪū", "AaCcEeEeIiSsUuUu", id="lithuanian"),
p("Ç窺ĞğIıİiÖöÜü", "CcSsGgIiIiOoUu", id="turkish"),
p("ÄäƏəÇçĞğIıİiKkÖöŞşÜü", "AaAaCcGgIiIiKkOoSsUu", id="azerbaijani"),
p("ÄäÇçĞğIıIiÍíÑñÖöŞşÜü", "AaCcGgIiIiIiNnOoSsUu", id="tatar"),
p("ÇçÄäŇňÖöŞşÜüÝýŽž", "CcAaNnOoSsUuYyZz", id="turkmen"),
p("ÄäÇçÊêIıİiÖöŞşŢţÜü", "AaCcEeIiIiOoSsTtUu", id="gagauz"),
p("ǍǎČčŠšŽž", "AaCcSsZz", id="bulgarian-transliteration"),
p("ᵻᶧ", "Ii", id="misc"),
],
)
def test_anglicize(text, expected):
assert anglicize(text) == expected
@pytest.mark.parametrize(
"mapping, expected",
[
(
{"A": "ĂÂ", "T": "Ț", "S": "Șß"},
{
"ă": "a",
"â": "a",
"ș": "s",
"ț": "t",
"Ă": "A",
"Â": "A",
"Ș": "S",
"Ț": "T",
"ß": "s",
},
),
],
)
def test_build_mapping(mapping, expected):
assert build_mapping(mapping) == expected
| 37.53125 | 94 | 0.562448 | 0 | 0 | 0 | 0 | 2,637 | 0.964168 | 0 | 0 | 1,596 | 0.583547 |
d053babddc7a2862293a9876d42fa1a7ffe1dc9e | 109 | py | Python | control/City.py | ChreSyr/kerys | 48749f15c8caed221598f350a2530f81dcf8f291 | [
"MIT"
]
| null | null | null | control/City.py | ChreSyr/kerys | 48749f15c8caed221598f350a2530f81dcf8f291 | [
"MIT"
]
| null | null | null | control/City.py | ChreSyr/kerys | 48749f15c8caed221598f350a2530f81dcf8f291 | [
"MIT"
]
| null | null | null |
import baopig as bp
import images as im
# TODO : a city defines the style, a district defines the content | 15.571429 | 65 | 0.752294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.59633 |
d053ccfb39ce30bd9ced8dc52651dfaad639314a | 11,426 | py | Python | app/routes.py | mrtoronto/FAIPD | 8cb4df2577af515238ce6ee12e627b830bec67a6 | [
"MIT"
]
| null | null | null | app/routes.py | mrtoronto/FAIPD | 8cb4df2577af515238ce6ee12e627b830bec67a6 | [
"MIT"
]
| null | null | null | app/routes.py | mrtoronto/FAIPD | 8cb4df2577af515238ce6ee12e627b830bec67a6 | [
"MIT"
]
| null | null | null | from datetime import datetime
from flask import render_template, flash, redirect, url_for, request
from flask_login import login_user, logout_user, current_user, login_required
from werkzeug.urls import url_parse
from app import app, db
from app.forms import LoginForm, RegistrationForm, EditProfileForm, PostForm, \
ResetPasswordRequestForm, ResetPasswordForm, EditPostForm
from app.models import User, Post
from app.email import send_password_reset_email
@app.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
return render_template('home.html')
@app.route('/posts/<post_target_type>', methods=['GET', 'POST'])
def feed(post_target_type):
page = request.args.get('page', 1, type=int)
if post_target_type in ['school', 'student', 'company', 'pair']:
posts = Post.query.filter_by(post_target_type=post_target_type).order_by(Post.timestamp.desc())\
.paginate(page, app.config['POSTS_PER_PAGE'], False)
if post_target_type == 'school':
page_header = 'Opportunities for Universities'
elif post_target_type == 'student':
page_header = 'Opportunities for Students'
elif post_target_type == 'company':
page_header = 'Opportunities for Companies'
elif post_target_type == 'pair':
page_header = 'Opportunities for Paired University-Companies'
elif post_target_type == 'feed':
if not current_user.is_authenticated:
return(redirect(url_for('feed', post_target_type = 'explore')))
posts = current_user.followed_posts().order_by(Post.timestamp.desc()).paginate(
page, app.config['POSTS_PER_PAGE'], False)
page_header = 'Followed Opportunities'
elif post_target_type == 'explore':
posts = Post.query.order_by(Post.timestamp.desc()).paginate(
page, app.config['POSTS_PER_PAGE'], False)
page_header = 'All Opportunities'
next_url = url_for('feed', page=posts.next_num, post_target_type = post_target_type) \
if posts.has_next else None
prev_url = url_for('feed', page=posts.prev_num, post_target_type = post_target_type) \
if posts.has_prev else None
return render_template('index.html',
post_target_type = post_target_type,
page_header=page_header,
posts=posts.items,
next_url=next_url,
prev_url=prev_url)
@app.route('/posts/create', methods=['GET', 'POST'])
@login_required
def make_a_post():
form = PostForm()
if current_user.user_type == 'student':
form.post_target_type.data = 'pair'
elif current_user.user_type == 'school':
form.post_target_type.data = 'company'
elif current_user.user_type == 'company':
form.post_target_type.data = 'school'
elif current_user.user_type == 'pair':
form.post_target_type.data = 'student'
else:
form.post_target_type.data = 'student'
if form.validate_on_submit() and current_user.is_authenticated:
post = Post(post_title = form.post_title.data,
body=form.body.data,
author=current_user,
post_origin_type = current_user.user_type,
post_target_type = form.post_target_type.data)
if (form.post_target_type.data == 'student' and current_user.user_type != 'pair') or \
(form.post_target_type.data == 'school' and current_user.user_type != 'company') or \
(form.post_target_type.data == 'company' and current_user.user_type != 'school') or \
(form.post_target_type.data == 'pair' and current_user.user_type != 'student'):
flash("Are you sure you set your user type correctly?")
return redirect(url_for('edit_profile'))
db.session.add(post)
db.session.commit()
flash('Your post is now live!')
return redirect(url_for('feed', post_target_type = 'explore'))
return render_template('make_a_post.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data,
email=form.email.data,
user_type = form.user_type.data,
display_name=form.display_name.data,
affiliation=form.affiliation.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/reset_password_request', methods=['GET', 'POST'])
def reset_password_request():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash('Check your email for the instructions to reset your password')
return redirect(url_for('login'))
return render_template('reset_password_request.html',
title='Reset Password', form=form)
@app.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
return redirect(url_for('index'))
user = User.verify_reset_password_token(token)
if not user:
return redirect(url_for('index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash('Your password has been reset.')
return redirect(url_for('login'))
return render_template('reset_password.html', form=form)
@app.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
posts = user.posts.order_by(Post.timestamp.desc()).paginate(
page, app.config['POSTS_PER_PAGE'], False)
next_url = url_for('user', username=user.username, page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('user', username=user.username, page=posts.prev_num) \
if posts.has_prev else None
return render_template('user.html', user=user, posts=posts.items,
next_url=next_url, prev_url=prev_url)
@app.route('/post/<id>')
def post(id):
#page = request.args.get('page', 1, type=int)
post = Post.query.filter_by(id=id).first_or_404()
author = User.query.filter_by(id=post.user_id).first_or_404()
return render_template('post.html', post=post, author=author)
@app.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
if form.username.data:
current_user.username = form.username.data
if form.about_me.data:
current_user.about_me = form.about_me.data
if form.display_name.data:
current_user.display_name = form.display_name.data
if form.affiliation.data:
current_user.affiliation = form.affiliation.data
if form.user_type.data:
current_user.user_type = form.user_type.data
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('edit_profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
form.affiliation.data = current_user.affiliation
form.display_name.data = current_user.display_name
form.user_type.data = current_user.user_type
return render_template('edit_profile.html', title='Edit Profile',
form=form)
@app.route('/post/<post_id>/edit', methods=['GET', 'POST'])
@login_required
def edit_post(post_id):
post = Post.query.filter_by(id=post_id).first_or_404()
form = EditPostForm()
form.body.default = post.body
form.post_target_type.default = post.post_target_type
form.post_title.default = post.post_title
if form.validate_on_submit():
post.post_title = form.post_title.data
post.body = form.body.data
post.post_target_type = form.post_target_type.data
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('edit_post', post_id=post_id))
elif request.method == 'GET':
form.post_target_type.data = post.post_target_type
form.post_title.data = post.post_title
form.body.data = post.body
return render_template('edit_post.html', title='Edit Profile',
form=form)
@app.route('/follow/<username>')
@login_required
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('User {} not found.'.format(username))
return redirect(url_for('index'))
if user == current_user:
flash('You cannot follow yourself!')
return redirect(url_for('user', username=username))
current_user.follow(user)
db.session.commit()
flash('You are following {}!'.format(username))
return redirect(url_for('user', username=username))
@app.route('/unfollow/<username>')
@login_required
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('User {} not found.'.format(username))
return redirect(url_for('index'))
if user == current_user:
flash('You cannot unfollow yourself!')
return redirect(url_for('user', username=username))
current_user.unfollow(user)
db.session.commit()
flash('You are not following {}.'.format(username))
return redirect(url_for('user', username=username))
| 40.953405 | 105 | 0.647033 | 0 | 0 | 0 | 0 | 10,866 | 0.950989 | 0 | 0 | 1,787 | 0.156398 |
d053e208c7d7b16075409089cf697fffe7f24a16 | 2,147 | py | Python | try_test.py | olvitar/python_training | 4c2ca819b1781fc785dc634f4e13e88fc0b45fc2 | [
"Apache-2.0"
]
| null | null | null | try_test.py | olvitar/python_training | 4c2ca819b1781fc785dc634f4e13e88fc0b45fc2 | [
"Apache-2.0"
]
| null | null | null | try_test.py | olvitar/python_training | 4c2ca819b1781fc785dc634f4e13e88fc0b45fc2 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
success = True
wd = webdriver.Firefox()
wait = WebDriverWait(wd, 22)
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
try:
wd.get("https://www.python.org/")
wd.find_element_by_css_selector("button").click()
wd.find_element_by_link_text("Success Stories").click()
wd.find_element_by_css_selector("button").click()
wd.find_element_by_link_text("About").click()
wd.find_element_by_css_selector("button").click()
wd.find_element_by_link_text("Docs").click()
wd.find_element_by_link_text("Audio/Visual Talks").click()
wd.find_element_by_css_selector("button").click()
wd.find_element_by_link_text("PyPI").click()
wd.find_element_by_link_text("Log in").click()
wd.find_element_by_id("username").click()
wd.find_element_by_id("username").clear()
wd.find_element_by_id("username").send_keys("oliver")
wd.find_element_by_id("password").click()
wd.find_element_by_id("password").clear()
wd.find_element_by_id("password").send_keys("Vbhjy_30")
wd.find_element_by_css_selector("input.button.button--primary").click()
wd.find_element_by_id("search").click()
wd.find_element_by_id("search").clear()
wd.find_element_by_id("search").send_keys("python")
wd.find_element_by_css_selector("button.search-form__button").click()
wd.find_element_by_xpath("//form[@id='classifiers']//button[.='Topic']").click()
wd.find_element_by_xpath("//form[@id='classifiers']//button[.='Topic']").click()
wd.find_element_by_id("search").click()
wd.find_element_by_id("search").clear()
wd.find_element_by_id("search").send_keys("pytest")
wd.find_element_by_css_selector("button.search-form__button").click()
wd.find_element_by_css_selector("button.horizontal-menu__link.dropdown__trigger").click()
wd.find_element_by_css_selector("button.dropdown__link").click()
print("Everything is Ok")
finally:
wd.quit()
if not success:
raise Exception("Test failed.")
| 40.509434 | 93 | 0.723335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 567 | 0.264089 |
d0543092d21f71915cd4c279a74f105e00c18015 | 7,035 | py | Python | cogs/Reminders.py | noahkw/botw-bot | 8d8c9515a177c52270093fb64abf34d111535d16 | [
"MIT"
]
| 1 | 2020-11-29T23:00:27.000Z | 2020-11-29T23:00:27.000Z | cogs/Reminders.py | noahkw/botw-bot | 8d8c9515a177c52270093fb64abf34d111535d16 | [
"MIT"
]
| 18 | 2020-08-05T11:59:31.000Z | 2022-03-15T03:48:40.000Z | cogs/Reminders.py | noahkw/botw-bot | 8d8c9515a177c52270093fb64abf34d111535d16 | [
"MIT"
]
| null | null | null | import logging
import re
from datetime import timezone
import pendulum
from aioscheduler import TimedScheduler
from dateparser import parse
from discord.ext import commands
from discord.ext.menus import MenuPages
import db
from cogs import CustomCog, AinitMixin
from cogs.Logging import log_usage
from const import UNICODE_EMOJI
from menu import ReminderListSource, SimpleConfirm
from models import Reminder
from util import has_passed, auto_help, safe_send
logger = logging.getLogger(__name__)
def setup(bot):
bot.add_cog(Reminders(bot))
class ReminderConverter(commands.Converter):
async def convert(self, ctx, argument):
# match strings like 'in 1 hour to do the laundry'
r_to = re.search(r"(.*?) to (.*)", argument, re.DOTALL)
if r_to:
return r_to.group(1), r_to.group(2)
# match strings like '"28-05-20 at 18:00 KST" "Red Velvet comeback"'
# may be improved to also parse the forms '"longer string" singleword'
# and 'singleword "longer string"'
r_quotes = re.search(r'"(.*)" *"(.*)"', argument, re.DOTALL)
if r_quotes:
return r_quotes.group(1), r_quotes.group(2)
# match strings like 'tomorrow football'
tokens = argument.split()
return tokens[0], tokens[1]
def parse_date(date):
parsed_date = parse(date)
if parsed_date is None:
raise commands.BadArgument("Couldn't parse the date.")
if parsed_date.tzinfo is None:
parsed_date = parsed_date.astimezone(timezone.utc)
parsed_date = pendulum.parse(str(parsed_date))
# add 1s to account for processing time, results in nicer diffs
parsed_date = parsed_date.add(seconds=1)
if has_passed(parsed_date):
raise commands.BadArgument(
f"`{parsed_date.to_cookie_string()}` is in the past."
)
return parsed_date
class Reminders(CustomCog, AinitMixin):
def __init__(self, bot):
super().__init__(bot)
self.scheduler = TimedScheduler(prefer_utc=True)
self.scheduler.start()
Reminder.inject_bot(bot)
super(AinitMixin).__init__()
async def _ainit(self):
await self.bot.wait_until_ready()
async with self.bot.Session() as session:
reminders = await db.get_reminders(session)
for reminder in reminders:
if reminder.is_due():
await self.remind_user(reminder.reminder_id, late=True)
else:
self.scheduler.schedule(
self.remind_user(reminder.reminder_id), reminder.due
)
logger.info(f"# Initial reminders from db: {len(reminders)}")
def cog_unload(self):
self.scheduler._task.cancel()
@auto_help
@commands.group(
name="reminders",
aliases=["remindme", "remind"],
invoke_without_command=True,
brief="Set reminders in the future",
)
async def reminders_(self, ctx, *, args: ReminderConverter = None):
if args:
await ctx.invoke(self.add, args=args)
else:
await ctx.send_help(self.reminders_)
@reminders_.command(brief="Adds a new reminder")
@log_usage(command_name="remind")
async def add(self, ctx, *, args: ReminderConverter):
"""
Adds a new reminder.
Example usage:
`{prefix}remind in 3 hours to do the laundry`
`{prefix}remind 15-06-20 at 6pm KST to Irene & Seulgi debut`
`{prefix}remind in 6 minutes 30 seconds to eggs`
"""
when, what = args
parsed_date = parse_date(when)
now = pendulum.now("UTC")
diff = parsed_date.diff_for_humans(now, True)
async with self.bot.Session() as session:
reminder = Reminder(_user=ctx.author.id, due=parsed_date, content=what)
session.add(reminder)
await session.flush()
self.scheduler.schedule(self.remind_user(reminder.reminder_id), parsed_date)
await session.commit()
await ctx.send(
f"I'll remind you on `{parsed_date.to_cookie_string()}` (in {diff}): `{what}`."
)
@reminders_.command()
async def list(self, ctx):
"""
Lists your reminders
"""
async with self.bot.Session() as session:
reminders = await db.get_reminders(session, user_id=ctx.author.id)
if len(reminders) > 0:
pages = MenuPages(
source=ReminderListSource(reminders), clear_reactions_after=True
)
await pages.start(ctx)
else:
await ctx.send("You have 0 pending reminders!")
async def remind_user(self, reminder_id, late=False):
async with self.bot.Session() as session:
reminder = await db.get_reminder(session, reminder_id)
diff = reminder.created.diff_for_humans(reminder.due, True)
assert not reminder.done
user = reminder.user
if user and late:
await safe_send(
user,
f"{self.bot.custom_emoji['SHOUT']} You told me to remind you some time ago. "
f"Sorry for being late:\n{reminder.content}",
)
elif user:
message = await safe_send(
user,
f"{self.bot.custom_emoji['SHOUT']} You told me to remind you {diff} ago:\n{reminder.content}",
)
if message:
ctx = await self.bot.get_context(message)
ctx.author = user
confirm = await SimpleConfirm(
message, timeout=120.0, emoji=UNICODE_EMOJI["SNOOZE"]
).prompt(ctx)
if confirm:
try:
new_due = await self.prompt_snooze_time(reminder)
reminder.due = new_due
self.scheduler.schedule(
self.remind_user(reminder.reminder_id), new_due
)
await session.commit()
return
except commands.BadArgument as ba:
await ctx.send(ba)
reminder.done = True
await session.commit()
async def prompt_snooze_time(self, reminder):
user = reminder.user
message = await user.send(
"When do you want me to remind you again? (e.g.: `in 30 minutes`)"
)
channel = message.channel
answer = await self.bot.wait_for(
"message", check=lambda msg: msg.channel == channel and msg.author == user
)
parsed_date = parse_date(answer.content)
now = pendulum.now("UTC")
diff = parsed_date.diff_for_humans(now, True)
await channel.send(f"Reminding you again in {diff}.")
return parsed_date
| 33.341232 | 114 | 0.5828 | 5,908 | 0.839801 | 0 | 0 | 1,925 | 0.273632 | 5,193 | 0.738166 | 1,314 | 0.18678 |
d05487672c8369c2d9e228e3c2e3d6e6a8514f49 | 4,598 | py | Python | lambda/code/lambda_function.py | acloudfan/Amazon-Aurora-DAS-Setup | 9c5ca4ac3705e78e877fc51b9ba927a7d367d029 | [
"MIT-0"
]
| null | null | null | lambda/code/lambda_function.py | acloudfan/Amazon-Aurora-DAS-Setup | 9c5ca4ac3705e78e877fc51b9ba927a7d367d029 | [
"MIT-0"
]
| null | null | null | lambda/code/lambda_function.py | acloudfan/Amazon-Aurora-DAS-Setup | 9c5ca4ac3705e78e877fc51b9ba927a7d367d029 | [
"MIT-0"
]
| 2 | 2021-05-25T16:14:13.000Z | 2022-01-14T14:04:49.000Z | import json
import base64
import os
import boto3
import zlib
# Used for decryption of the received payload
import aws_encryption_sdk
from aws_encryption_sdk import CommitmentPolicy
from aws_encryption_sdk.internal.crypto import WrappingKey
from aws_encryption_sdk.key_providers.raw import RawMasterKeyProvider
from aws_encryption_sdk.identifiers import WrappingAlgorithm, EncryptionKeyType
import processor.heartbeat_processor as heartbeat_processor
import processor.sqlevents_processor as sqlevents_processor
from processor import heartbeat_processor
from processor import sqlevents_processor
# Controls the filtering of Heartbean events
FILTER_HEARTBEAT_EVENTS = os.getenv('FILTER_HEARTBEAT_EVENTS', "false").lower() == "true"
# Setup the session | clients
REGION_NAME= os.environ['AWS_REGION']
session = boto3.session.Session()
kms = session.client('kms', region_name=REGION_NAME)
# Create the encryption client
enc_client = aws_encryption_sdk.EncryptionSDKClient(commitment_policy=CommitmentPolicy.REQUIRE_ENCRYPT_ALLOW_DECRYPT)
# Represents the Master Key Provider
class MyRawMasterKeyProvider(RawMasterKeyProvider):
provider_id = "BC"
def __new__(cls, *args, **kwargs):
obj = super(RawMasterKeyProvider, cls).__new__(cls)
return obj
def __init__(self, plain_key):
RawMasterKeyProvider.__init__(self)
self.wrapping_key = WrappingKey(wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING,
wrapping_key=plain_key, wrapping_key_type=EncryptionKeyType.SYMMETRIC)
def _get_raw_key(self, key_id):
return self.wrapping_key
# Decrypt the payload using the key and then decompress (zip to plaintext)
def decrypt_decompress(payload, key):
my_key_provider = MyRawMasterKeyProvider(key)
my_key_provider.add_master_key("DataKey")
decrypted_plaintext, header = enc_client.decrypt(
source=payload,
materials_manager=aws_encryption_sdk.materials_managers.default.DefaultCryptoMaterialsManager(master_key_provider=my_key_provider))
# print(decrypted)
return zlib.decompress(decrypted_plaintext, zlib.MAX_WBITS + 16)
# Lambda Handler function
def lambda_handler(event, context):
# Output is an array of transformed records
output = []
heartBeatEventRecords = heartbeat_processor.HeartBeatEventRecords()
sQLEventRecords = sqlevents_processor.SQLEventRecords()
for record in event['records']:
# Get the data from record - it is in base64 format
data = record['data']
payload_overall = base64.b64decode(data)
payload_overall = payload_overall.decode('utf-8')
# Parse the json payload
payload_overall_json=json.loads(payload_overall)
# Get the base64 decoded databaseActivityEvents array from the record
payload_decoded = base64.b64decode(payload_overall_json['databaseActivityEvents'])
# Decrypt the key
# RESOURCE_ID = Cluster ID of the RDS instance
RESOURCE_ID = os.environ['RESOURCE_ID']
# Decrypt
data_key_decoded = base64.b64decode(payload_overall_json['key'])
data_key_decrypt_result = kms.decrypt(CiphertextBlob=data_key_decoded, EncryptionContext={'aws:rds:dbc-id': RESOURCE_ID})
# Decrypt the data
# print(data_key_decrypt_result['Plaintext'])
data_decrypted_decompressed = decrypt_decompress(payload_decoded, data_key_decrypt_result['Plaintext'])
# Parse the JSON
data_decrypted_decompressed_json =json.loads(data_decrypted_decompressed)
if data_decrypted_decompressed_json['databaseActivityEventList'][0]['type'] == "heartbeat" :
# print(data_decrypted_decompressed_json)
heartBeatEventRecords.add(record['recordId'], data_decrypted_decompressed_json,record['approximateArrivalTimestamp'])
else:
sQLEventRecords.add(record['recordId'], data_decrypted_decompressed_json, record['approximateArrivalTimestamp'])
# output.append(heartBeatEventRecords.process(FILTER_HEARTBEAT_EVENTS))
# output.append(sQLEventRecords.process())
# output_hb = heartBeatEventRecords.process(FILTER_HEARTBEAT_EVENTS)
output_hb = heartBeatEventRecords.process(FILTER_HEARTBEAT_EVENTS)
output_sql = sQLEventRecords.process()
print('Total records processed {} records.'.format(len(output_hb)+len(output_sql)))
return {'records': output_hb + output_sql }
| 38.316667 | 139 | 0.738582 | 566 | 0.123097 | 0 | 0 | 0 | 0 | 0 | 0 | 1,188 | 0.258373 |
d05800ed78f8e4d54ffdbd820c4452051fa6d0a1 | 195 | py | Python | location/admin.py | GDG2021hackathon/modoogym-backend-dj | 1af8c4132a8979bf333b725b73c3e5eab3e228b5 | [
"MIT"
]
| null | null | null | location/admin.py | GDG2021hackathon/modoogym-backend-dj | 1af8c4132a8979bf333b725b73c3e5eab3e228b5 | [
"MIT"
]
| null | null | null | location/admin.py | GDG2021hackathon/modoogym-backend-dj | 1af8c4132a8979bf333b725b73c3e5eab3e228b5 | [
"MIT"
]
| null | null | null | from django.contrib import admin
from .models import Location
class LocationAdmin(admin.ModelAdmin):
list_display = ["id", "city", "region"]
admin.site.register(Location, LocationAdmin)
| 17.727273 | 44 | 0.753846 | 82 | 0.420513 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.092308 |
d0592aa31ec2220c57276d8f6df3bc52f87e72e4 | 138 | py | Python | SimCam/__init__.py | AgainstEntropy/my-interpretable-ConvNeXt | ec12be73dcd6c0b0f73bd90b61b5fce33246bdbb | [
"MIT"
]
| null | null | null | SimCam/__init__.py | AgainstEntropy/my-interpretable-ConvNeXt | ec12be73dcd6c0b0f73bd90b61b5fce33246bdbb | [
"MIT"
]
| null | null | null | SimCam/__init__.py | AgainstEntropy/my-interpretable-ConvNeXt | ec12be73dcd6c0b0f73bd90b61b5fce33246bdbb | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# @Date : 2022/4/13 12:07
# @Author : WangYihao
# @File : __init__.py.py
from SimCam.simcam import SimCam
| 19.714286 | 32 | 0.608696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.724638 |
d059502a7ec59d16005b90bde454a7338ee03251 | 3,315 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/iSCSI_Data_Out_template.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
]
| null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/iSCSI_Data_Out_template.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
]
| null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/iSCSI_Data_Out_template.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
]
| null | null | null | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class iSCSI_Data_Out(Base):
__slots__ = ()
_SDM_NAME = 'iSCSI_Data_Out'
_SDM_ATT_MAP = {
'Opcode': 'iSCSI_Data_Out.header.Opcode',
'Flags': 'iSCSI_Data_Out.header.Flags',
'TotalAHSLength': 'iSCSI_Data_Out.header.TotalAHSLength',
'Unknown ': 'iSCSI_Data_Out.header.Unknown ',
'DataSegmentLength': 'iSCSI_Data_Out.header.DataSegmentLength',
'LUN': 'iSCSI_Data_Out.header.LUN',
'InitiatorTaskTag': 'iSCSI_Data_Out.header.InitiatorTaskTag',
'TargetTransferTag': 'iSCSI_Data_Out.header.TargetTransferTag',
'ExpStatSN': 'iSCSI_Data_Out.header.field0',
'DataSN': 'iSCSI_Data_Out.header.DataSN',
'BufferOffset': 'iSCSI_Data_Out.header.BufferOffset',
'HeaderDigest': 'iSCSI_Data_Out.header.HeaderDigest',
}
def __init__(self, parent):
super(iSCSI_Data_Out, self).__init__(parent)
@property
def Opcode(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Opcode']))
@property
def Flags(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Flags']))
@property
def TotalAHSLength(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TotalAHSLength']))
@property
def Unknown_(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Unknown ']))
@property
def DataSegmentLength(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DataSegmentLength']))
@property
def LUN(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LUN']))
@property
def InitiatorTaskTag(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InitiatorTaskTag']))
@property
def TargetTransferTag(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TargetTransferTag']))
@property
def ExpStatSN(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExpStatSN']))
@property
def DataSN(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DataSN']))
@property
def BufferOffset(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BufferOffset']))
@property
def HeaderDigest(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderDigest']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 37.670455 | 92 | 0.715837 | 3,232 | 0.974962 | 0 | 0 | 2,170 | 0.6546 | 0 | 0 | 724 | 0.218401 |
d05a3552b3ed442094ecdba600618970c4eeddf8 | 289 | py | Python | rated/migrations/0005_delete_profileapi.py | nyamzy/Rater | 934a6600afe4b47742621bbe0a47c1afc6325a21 | [
"Unlicense"
]
| null | null | null | rated/migrations/0005_delete_profileapi.py | nyamzy/Rater | 934a6600afe4b47742621bbe0a47c1afc6325a21 | [
"Unlicense"
]
| null | null | null | rated/migrations/0005_delete_profileapi.py | nyamzy/Rater | 934a6600afe4b47742621bbe0a47c1afc6325a21 | [
"Unlicense"
]
| null | null | null | # Generated by Django 4.0.2 on 2022-02-16 14:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rated', '0004_profileapi'),
]
operations = [
migrations.DeleteModel(
name='ProfileAPI',
),
]
| 17 | 47 | 0.595156 | 204 | 0.705882 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.287197 |
d05d10f97cc5c0bdb332b3fd013760d9dc94d719 | 9,449 | py | Python | Code/Maskrcnn-keras/Experiments2/our_preprocessing.py | SZamboni/NightPedestrianDetection | fc492e0bd3f6f99070975d08a229cc6ef969f9e8 | [
"MIT"
]
| 3 | 2020-04-03T06:25:23.000Z | 2021-04-06T07:30:56.000Z | Code/Maskrcnn-keras/Experiments2/our_preprocessing.py | SZamboni/NightPedestrianDetection | fc492e0bd3f6f99070975d08a229cc6ef969f9e8 | [
"MIT"
]
| null | null | null | Code/Maskrcnn-keras/Experiments2/our_preprocessing.py | SZamboni/NightPedestrianDetection | fc492e0bd3f6f99070975d08a229cc6ef969f9e8 | [
"MIT"
]
| 1 | 2021-04-06T07:40:26.000Z | 2021-04-06T07:40:26.000Z | import cv2
import numpy as np
from skimage import exposure as ex
from skimage import data
from PIL import Image
import skfuzzy as fuzz
import math
import timeit
import time
'''
Histogram equalization with colour YCR_CB and histogram equalization only on Y
@img: the image to modify
@return: the image with the histogram equalized
'''
def hisEqulColor_YCRCB(img):
ycrcb=cv2.cvtColor(img,cv2.COLOR_RGB2YCR_CB)
channels=cv2.split(ycrcb)
cv2.equalizeHist(channels[0],channels[0])
cv2.merge(channels,ycrcb)
result = cv2.cvtColor(ycrcb,cv2.COLOR_YCR_CB2RGB)
return result
'''
Histogram equalization with colour YCR_CB and histogram equalization only on Y
@img: the image to modify
@return: the image with the histogram equalized
'''
def hisEqulColor_HSV(img):
ycrcb=cv2.cvtColor(img,cv2.COLOR_RGB2HSV)
channels=cv2.split(ycrcb)
cv2.equalizeHist(channels[0],channels[0])
cv2.merge(channels,ycrcb)
result = cv2.cvtColor(ycrcb,cv2.COLOR_HSV2RGB)
return result
# Histrogram equalization from
# https://github.com/AndyHuang1995/Image-Contrast-Enhancement/blob/master/he.py
'''
Histogram equalization equalizing every colour
@img: the image to modify
@return: the image with the histogram equalized
'''
def hisEqulColor_RGB(img):
outImg = np.zeros((img.shape[0],img.shape[1],3))
for channel in range(img.shape[2]):
outImg[:, :, channel] = ex.equalize_hist(img[:, :, channel])*255
outImg[outImg>255] = 255
outImg[outImg<0] = 0
return outImg.astype(np.uint8)
'''
Gamma correction
@imgage: the image to modify
@gamma: the gamma value, 1.0 does nothing
@return: the image with the gamma corrected
'''
def adjust_gamma(image, gamma=1.0):
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image, table)
# Adaptive gamma correction based on the reference.
# Reference:
# S. Huang, F. Cheng and Y. Chiu, "Efficient Contrast Enhancement Using Adaptive Gamma Correction With
# Weighting Distribution," in IEEE Transactions on Image Processing, vol. 22, no. 3, pp. 1032-1041,
# March 2013. doi: 10.1109/TIP.2012.2226047
# Revised from https://github.com/mss3331/AGCWD/blob/master/AGCWD.m
#from https://github.com/qyou/AGCWD/blob/master/agcwd.py
import numpy as np
import cv2
'''
Adaptive gamma correction with Weighting Distribution
@image: the image to modify
@w: the weight distribution
@return: the image with the gamma corrected
'''
def agcwd(image, w=0.5):
is_colorful = len(image.shape) >= 3
img = extract_value_channel(image) if is_colorful else image
img_pdf = get_pdf(img)
max_intensity = np.max(img_pdf)
min_intensity = np.min(img_pdf)
w_img_pdf = max_intensity * (((img_pdf - min_intensity) / (max_intensity - min_intensity)) ** w)
w_img_cdf = np.cumsum(w_img_pdf) / np.sum(w_img_pdf)
l_intensity = np.arange(0, 256)
l_intensity = np.array([255 * (e / 255) ** (1 - w_img_cdf[e]) for e in l_intensity], dtype=np.uint8)
enhanced_image = np.copy(img)
height, width = img.shape
for i in range(0, height):
for j in range(0, width):
intensity = enhanced_image[i, j]
enhanced_image[i, j] = l_intensity[intensity]
enhanced_image = set_value_channel(image, enhanced_image) if is_colorful else enhanced_image
return enhanced_image
def extract_value_channel(color_image):
color_image = color_image.astype(np.float32) / 255.
hsv = cv2.cvtColor(color_image, cv2.COLOR_BGR2HSV)
v = hsv[:, :, 2]
return np.uint8(v * 255)
def get_pdf(gray_image):
height, width = gray_image.shape
pixel_count = height * width
hist = cv2.calcHist([gray_image], [0], None, [256], [0, 256])
return hist / pixel_count
def set_value_channel(color_image, value_channel):
value_channel = value_channel.astype(np.float32) / 255
color_image = color_image.astype(np.float32) / 255.
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2HSV)
color_image[:, :, 2] = value_channel
color_image = np.array(cv2.cvtColor(color_image, cv2.COLOR_HSV2BGR) * 255, dtype=np.uint8)
return color_image
# Then we sould have from https://github.com/AndyHuang1995/Image-Contrast-Enhancement/blob/master/ying.py
# from https://www.programcreek.com/python/example/89353/cv2.createCLAHE,
# CLAHE (Contrast-limited adaptive histogram equalization)
'''
Function that apply CLAHE (Contrast-limited adaptive histogram equalization)
to every channel of the image
@imgage: the image to modify
@return: the image with the histrogram corrected
'''
def applyCLAHE(image, clip_limit=3):
# convert image to LAB color model
image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# split the image into L, A, and B channels
l_channel, a_channel, b_channel = cv2.split(image_lab)
# apply CLAHE to lightness channel
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(8, 8))
cl = clahe.apply(l_channel)
# merge the CLAHE enhanced L channel with the original A and B channel
merged_channels = cv2.merge((cl, a_channel, b_channel))
# convert iamge from LAB color model back to RGB color model
final_image = cv2.cvtColor(merged_channels, cv2.COLOR_LAB2BGR)
return final_image
# RETINEX from https://github.com/dongb5/Retinex/blob/master/
def singleScaleRetinex(img, sigma):
retinex = np.log10(img) - np.log10(cv2.GaussianBlur(img, (0, 0), sigma))
return retinex
def multiScaleRetinex(img, sigma_list):
retinex = np.zeros_like(img)
for sigma in sigma_list:
retinex += singleScaleRetinex(img, sigma)
retinex = retinex / len(sigma_list)
return retinex
def colorRestoration(img, alpha, beta):
img_sum = np.sum(img, axis=2, keepdims=True)
color_restoration = beta * (np.log10(alpha * img) - np.log10(img_sum))
return color_restoration
def simplestColorBalance(img, low_clip, high_clip):
total = img.shape[0] * img.shape[1]
for i in range(img.shape[2]):
unique, counts = np.unique(img[:, :, i], return_counts=True)
current = 0
for u, c in zip(unique, counts):
if float(current) / total < low_clip:
low_val = u
if float(current) / total < high_clip:
high_val = u
current += c
img[:, :, i] = np.maximum(np.minimum(img[:, :, i], high_val), low_val)
return img
def MSRCR(img, sigma_list, G, b, alpha, beta, low_clip, high_clip):
img = np.float64(img) + 1.0
img_retinex = multiScaleRetinex(img, sigma_list)
img_color = colorRestoration(img, alpha, beta)
img_msrcr = G * (img_retinex * img_color + b)
for i in range(img_msrcr.shape[2]):
img_msrcr[:, :, i] = (img_msrcr[:, :, i] - np.min(img_msrcr[:, :, i])) / \
(np.max(img_msrcr[:, :, i]) - np.min(img_msrcr[:, :, i])) * \
255
img_msrcr = np.uint8(np.minimum(np.maximum(img_msrcr, 0), 255))
img_msrcr = simplestColorBalance(img_msrcr, low_clip, high_clip)
return img_msrcr
def automatedMSRCR(img, sigma_list):
img = np.float64(img) + 1.0
img_retinex = multiScaleRetinex(img, sigma_list)
for i in range(img_retinex.shape[2]):
unique, count = np.unique(np.int32(img_retinex[:, :, i] * 100), return_counts=True)
for u, c in zip(unique, count):
if u == 0:
zero_count = c
break
low_val = unique[0] / 100.0
high_val = unique[-1] / 100.0
for u, c in zip(unique, count):
if u < 0 and c < zero_count * 0.1:
low_val = u / 100.0
if u > 0 and c < zero_count * 0.1:
high_val = u / 100.0
break
img_retinex[:, :, i] = np.maximum(np.minimum(img_retinex[:, :, i], high_val), low_val)
img_retinex[:, :, i] = (img_retinex[:, :, i] - np.min(img_retinex[:, :, i])) / \
(np.max(img_retinex[:, :, i]) - np.min(img_retinex[:, :, i])) \
* 255
img_retinex = np.uint8(img_retinex)
return img_retinex
'''
Function that apply MSRCP (Multi Scale Retinex
@img: the image to modify
@sigma_list: the list of the sigma, by default [15,80,250]
@return: the image with the histrogram corrected
'''
def MSRCP(img, sigma_list = [15,80,250], low_clip = 0.01, high_clip = 0.99):
img = np.float64(img) + 1.0
intensity = np.sum(img, axis=2) / img.shape[2]
retinex = multiScaleRetinex(intensity, sigma_list)
intensity = np.expand_dims(intensity, 2)
retinex = np.expand_dims(retinex, 2)
intensity1 = simplestColorBalance(retinex, low_clip, high_clip)
intensity1 = (intensity1 - np.min(intensity1)) / \
(np.max(intensity1) - np.min(intensity1)) * \
255.0 + 1.0
img_msrcp = np.zeros_like(img)
for y in range(img_msrcp.shape[0]):
for x in range(img_msrcp.shape[1]):
B = np.max(img[y, x])
A = np.minimum(256.0 / B, intensity1[y, x, 0] / intensity[y, x, 0])
img_msrcp[y, x, 0] = A * img[y, x, 0]
img_msrcp[y, x, 1] = A * img[y, x, 1]
img_msrcp[y, x, 2] = A * img[y, x, 2]
img_msrcp = np.uint8(img_msrcp - 1.0)
return img_msrcp
| 31.288079 | 105 | 0.650122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,236 | 0.236639 |
d05de342ea54b26f257e91dab0c259cdcde355f4 | 1,812 | py | Python | bin/make_known_good_cice_masks.py | PRIMAVERA-H2020/pre-proc | 0c47636cbe32a13a9544f3e5ce9f4c778dc55078 | [
"BSD-3-Clause"
]
| null | null | null | bin/make_known_good_cice_masks.py | PRIMAVERA-H2020/pre-proc | 0c47636cbe32a13a9544f3e5ce9f4c778dc55078 | [
"BSD-3-Clause"
]
| null | null | null | bin/make_known_good_cice_masks.py | PRIMAVERA-H2020/pre-proc | 0c47636cbe32a13a9544f3e5ce9f4c778dc55078 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
"""
make_known_good_cice_masks.py
Copy known good CICE masks for use in fixing the HadGEM CICE masks.
"""
import os
import numpy as np
from netCDF4 import Dataset
OUTPUT_DIR = "/gws/nopw/j04/primavera1/masks/HadGEM3Ocean_fixes/cice_masks"
def main():
"""main entry"""
rootgrp = Dataset(os.path.join(OUTPUT_DIR, "primavera_cice_orca1_uv.nc"),
"w", format="NETCDF3_CLASSIC")
print(os.path.join(OUTPUT_DIR, "primavera_cice_orca1_uv.nc"))
mask = np.zeros((330, 360))
mask[-1, 180:] += 1
_i = rootgrp.createDimension('i', 360)
_j = rootgrp.createDimension('j', 330)
mask_variable = rootgrp.createVariable('mask', 'i4', ('j', 'i'))
mask_variable.units = '1'
mask_variable[:] = mask
rootgrp.close()
print(os.path.join(OUTPUT_DIR, "primavera_cice_orca025_t.nc"))
rootgrp = Dataset(os.path.join(OUTPUT_DIR, "primavera_cice_orca025_t.nc"),
"w", format="NETCDF3_CLASSIC")
mask = np.zeros((1205, 1440))
mask[-1, 720:] += 1
_i = rootgrp.createDimension('i', 1440)
_j = rootgrp.createDimension('j', 1205)
mask_variable = rootgrp.createVariable('mask', 'i4', ('j', 'i'))
mask_variable.units = '1'
mask_variable[:] = mask
rootgrp.close()
print(os.path.join(OUTPUT_DIR, "primavera_cice_orca12_t.nc"))
rootgrp = Dataset(os.path.join(OUTPUT_DIR, "primavera_cice_orca12_t.nc"),
"w", format="NETCDF3_CLASSIC")
mask = np.zeros((3604, 4320))
mask[-1, 2160:] += 1
_i = rootgrp.createDimension('i', 4320)
_j = rootgrp.createDimension('j', 3604)
mask_variable = rootgrp.createVariable('mask', 'i4', ('j', 'i'))
mask_variable.units = '1'
mask_variable[:] = mask
rootgrp.close()
if __name__ == "__main__":
main()
| 31.241379 | 78 | 0.642936 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 520 | 0.286976 |
d05e365d3bba6c56cbc2fec6fc272d72dc471554 | 1,071 | py | Python | plotmass/statistics.py | QUT-Motorsport/QUTMS_VehicleSim | 5c69a76beb889f3e1fb8a157751f5b6dc12d6d10 | [
"MIT"
]
| 2 | 2020-05-10T13:44:05.000Z | 2021-11-30T14:59:26.000Z | plotmass/statistics.py | QUT-Motorsport/QUTMS_VehicleSim | 5c69a76beb889f3e1fb8a157751f5b6dc12d6d10 | [
"MIT"
]
| 24 | 2020-01-12T14:12:26.000Z | 2020-07-22T02:34:53.000Z | plotmass/statistics.py | QUT-Motorsport/QUTMS_VehicleSim | 5c69a76beb889f3e1fb8a157751f5b6dc12d6d10 | [
"MIT"
]
| 2 | 2020-01-13T06:51:31.000Z | 2021-12-30T16:45:43.000Z | from datetime import timedelta
class Statistics:
def __init__(self, velocity, dist):
# Calculate time
self.lap_time = 0
dist_travelled = []
self.time = []
for i in range(len(velocity)):
dist_travelled.append(0)
self.time.append(0)
if i != 0:
dist_travelled[i] = dist[i] - dist[i - 1]
self.time[i] = dist_travelled[i]/velocity[i]
self.lap_time += self.time[i]
self.fastest_lap = str(timedelta(seconds=round(self.lap_time, 2)))[:-4]
# Calculate max and min speeds in km/h
self.max_speed = round(max(velocity) * 3.6, 2)
self.min_speed = round(min(velocity) * 3.6, 2)
def get_t(self):
return self.time
def get_time(self):
return self.get_t()
def get_fastest_lap(self):
return self.fastest_lap
def get_lap_time(self):
return self.lap_time
def get_max_speed(self):
return self.max_speed
def get_min_speed(self):
return self.min_speed | 25.5 | 79 | 0.577965 | 1,039 | 0.970121 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.05042 |
d05e5954805301cc10d8ab2d703ec21b5e037de7 | 756 | py | Python | config.py | raspberry9/tinypost | 6e4b4bf764e93f6d344fbdb9369f326f08146d00 | [
"MIT"
]
| null | null | null | config.py | raspberry9/tinypost | 6e4b4bf764e93f6d344fbdb9369f326f08146d00 | [
"MIT"
]
| null | null | null | config.py | raspberry9/tinypost | 6e4b4bf764e93f6d344fbdb9369f326f08146d00 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import logging
import configparser
class Config(object):
def __init__(self, filename):
logging.config.fileConfig(filename)
config = configparser.RawConfigParser()
config.read(filename)
for option, value in config.items(self.name):
try:
_val = eval(value)
except:
_val = value
setattr(self, option, _val)
session_opts = {}
for option, value in config.items("session"):
key = "session." + option
session_opts[key] = value
self.session_opts = session_opts
host, port = self.bind.split(":")
self.host = host
self.port = int(port)
| 26.068966 | 53 | 0.539683 | 694 | 0.917989 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.059524 |
d05e5b044a9120637eea4c01afc5076feed78586 | 2,817 | py | Python | database/database.py | Valzavator/YouTubeTrendingVideosAnalysis | 4baca01a351a20bec04331936cd9f6eafaea815d | [
"MIT"
]
| 2 | 2019-06-11T03:26:50.000Z | 2020-04-13T01:28:23.000Z | database/database.py | Valzavator/YouTubeTrendingVideosAnalysis | 4baca01a351a20bec04331936cd9f6eafaea815d | [
"MIT"
]
| 2 | 2020-01-08T13:11:49.000Z | 2020-01-08T13:11:54.000Z | database/database.py | Valzavator/YouTubeTrendingVideosAnalysis | 4baca01a351a20bec04331936cd9f6eafaea815d | [
"MIT"
]
| 1 | 2019-06-11T03:26:54.000Z | 2019-06-11T03:26:54.000Z | import os
import subprocess
from dotenv import load_dotenv
import pymongo
from pymongo import MongoClient
from pymongo.cursor import Cursor
from pymongo.errors import DuplicateKeyError, BulkWriteError
from util.args import Args
load_dotenv()
class Database:
def __init__(self, uri=Args.db_host()):
self.__client = MongoClient(uri)
self.__db = self.__client["videos_analysis"]
self.__videos_coll = self.__db["videos"]
self.__videos_coll.create_index([("county_code", pymongo.DESCENDING)])
self.__mongodump_path = os.getenv('MONGODUMP_PATH')
self.__mongorestore_path = os.getenv('MONGORESTORE_PATH')
def __del__(self):
self.close()
def save_one_video(self, video: dict) -> bool:
if video is None:
return False
try:
self.__videos_coll.insert_one(video)
return True
except DuplicateKeyError as e:
print(e)
return False
def save_many_videos(self, videos: list, ordered=False) -> int:
if videos is None or len(videos) == 0:
return 0
is_repeat = False
quantity_before = self.__videos_coll.count()
try:
self.__videos_coll.insert_many(videos, ordered=ordered)
except BulkWriteError as e:
if ordered:
raise e
is_repeat = True
if is_repeat:
return self.__videos_coll.count() - quantity_before
else:
return len(videos)
def get_all_videos(self) -> Cursor:
return self.__videos_coll.find()
def get_videos_by_country_code(self, country_code: str) -> Cursor:
return self.__videos_coll.find({'country_code': country_code})
def get_videos_by_country_codes(self, country_codes: list) -> Cursor:
return self.__videos_coll.find({'country_code': {'$in': country_codes}})
def remove_all_documents(self):
self.__videos_coll.remove()
def count(self):
return self.__videos_coll.count()
def get_all_country_codes(self) -> list:
return list(self.__videos_coll.distinct('country_code'))
def backup_database(self):
if not os.path.exists(Args.backup_db_dir()):
os.makedirs(Args.backup_db_dir())
cns_command = f'"{self.__mongodump_path}" --collection videos --db videos_analysis' \
f' --out "{os.path.abspath(Args.backup_db_dir())}"'
subprocess.check_output(cns_command)
def restore_database(self):
if not os.path.exists(Args.backup_db_dir()):
os.makedirs(Args.backup_db_dir())
cns_command = f'"{self.__mongorestore_path}" "{os.path.abspath(Args.backup_db_dir())}"'
subprocess.check_output(cns_command)
def close(self):
self.__client.close()
| 28.17 | 95 | 0.649627 | 2,569 | 0.911963 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.111111 |
d0629421490c20c90017965031c7298c1372c640 | 4,066 | py | Python | messaging_components/services/service_docker.py | fgiorgetti/qpid-dispatch-tests | 164c609d28db87692eed53d5361aa1ee5c97375c | [
"Apache-2.0"
]
| null | null | null | messaging_components/services/service_docker.py | fgiorgetti/qpid-dispatch-tests | 164c609d28db87692eed53d5361aa1ee5c97375c | [
"Apache-2.0"
]
| null | null | null | messaging_components/services/service_docker.py | fgiorgetti/qpid-dispatch-tests | 164c609d28db87692eed53d5361aa1ee5c97375c | [
"Apache-2.0"
]
| null | null | null | from enum import Enum
from typing import Union
from iqa_common.executor import Command, Execution, ExecutorAnsible, CommandAnsible, ExecutorContainer, \
CommandContainer, Executor
from iqa_common.utils.docker_util import DockerUtil
from messaging_abstract.component import Service, ServiceStatus
import logging
class ServiceDocker(Service):
"""
Implementation of a service represented by a docker container.
So startup and shutdown are done by managing current state of related
docker container name.
"""
_logger = logging.getLogger(__name__)
def __init__(self, name: str, executor: Union[ExecutorAnsible, ExecutorContainer]):
super().__init__(name, executor)
self.docker_host = executor.docker_host
self.docker_util = DockerUtil(docker_host=executor.docker_host)
class ServiceDockerState(Enum):
STARTED = ('start', 'started')
STOPPED = ('stop', 'stopped')
RESTARTED = ('restart', 'started')
def __init__(self, system_state, ansible_state):
self.system_state = system_state
self.ansible_state = ansible_state
def status(self) -> ServiceStatus:
"""
Returns the status based on status of container name.
:return: The status of this specific service
:rtype: ServiceStatus
"""
try:
container = self.docker_util.get_container(self.name)
if not container:
ServiceDocker._logger.debug("Service: %s - Status: UNKNOWN" % self.name)
return ServiceStatus.UNKNOWN
if container.status == 'running':
ServiceDocker._logger.debug("Service: %s - Status: RUNNING" % self.name)
return ServiceStatus.RUNNING
elif container.status == 'exited':
ServiceDocker._logger.debug("Service: %s - Status: STOPPED" % self.name)
return ServiceStatus.STOPPED
except Exception:
ServiceDocker._logger.exception('Error retrieving status of docker container')
return ServiceStatus.FAILED
return ServiceStatus.UNKNOWN
def start(self) -> Execution:
return self.executor.execute(self._create_command(self.ServiceDockerState.STARTED))
def stop(self) -> Execution:
return self.executor.execute(self._create_command(self.ServiceDockerState.STOPPED))
def restart(self) -> Execution:
return self.executor.execute(self._create_command(self.ServiceDockerState.RESTARTED))
def enable(self) -> Execution:
"""
Simply ignore it (not applicable to containers)
:return:
"""
return None
def disable(self) -> Execution:
"""
Simply ignore it (not applicable to containers)
:return:
"""
return None
def _create_command(self, service_state: ServiceDockerState):
"""
Creates a Command instance based on executor type and state
that is specific to each type of command.
:param service_state:
:return:
"""
if isinstance(self.executor, ExecutorAnsible):
state = service_state.ansible_state
restart = 'no'
if service_state == self.ServiceDockerState.RESTARTED:
restart = 'yes'
print('name=%s state=%s restart=%s docker_host=%s'
% (self.name, state, restart, self.docker_host))
docker_host_opt = 'docker_host=%s' % self.docker_host if self.docker_host else ''
return CommandAnsible('name=%s state=%s restart=%s %s'
% (self.name, state, restart, docker_host_opt),
ansible_module='docker_container',
stdout=True,
timeout=self.TIMEOUT)
elif isinstance(self.executor, ExecutorContainer):
state = service_state.system_state
return CommandContainer([], docker_command=state, stdout=True, timeout=self.TIMEOUT)
| 38.72381 | 105 | 0.632809 | 3,747 | 0.921545 | 0 | 0 | 0 | 0 | 0 | 0 | 1,020 | 0.250861 |
d063b8972e4afe0fab8307dbfa94ac49321f94ea | 4,836 | py | Python | seatsvotes/bootstrap/abstracts.py | ljwolf/seatsvotes | 6d44bba02016cc7ac24cebf6e0d70e1e9e801a5b | [
"MIT"
]
| null | null | null | seatsvotes/bootstrap/abstracts.py | ljwolf/seatsvotes | 6d44bba02016cc7ac24cebf6e0d70e1e9e801a5b | [
"MIT"
]
| null | null | null | seatsvotes/bootstrap/abstracts.py | ljwolf/seatsvotes | 6d44bba02016cc7ac24cebf6e0d70e1e9e801a5b | [
"MIT"
]
| null | null | null | import numpy as np
from ..mixins import Preprocessor, AlwaysPredictPlotter, AdvantageEstimator
from warnings import warn
class Bootstrap(Preprocessor, AlwaysPredictPlotter, AdvantageEstimator):
def __init__(self, elex_frame, covariate_columns=None,
weight_column=None,
share_column='vote_share',
year_column='year',
redistrict_column=None, district_id='district_id',
missing='ignore', uncontested='ignore'):
super().__init__(elex_frame,
covariates=covariate_columns,
weight_column=weight_column,
share_column=share_column,
year_column=year_column,
redistrict_column=redistrict_column,
district_id=district_id,
missing=missing,
uncontested=uncontested,
)
self._years = np.sort(self.long.year.unique())
@property
def years(self):
return self._years
def simulate_elections(self, n_sims=1000, predict=True,
t=-1, year=None, swing=0, target_v=None, fix=False, replace=True):
"""
Simulate elections according to a bootstrap technique.
Arguments
---------
n_sims : int
number of simulations to conduct
swing : float
arbitrary shift in vote means, will be added to the
empirical distribution of $\delta_{t}$.
target_v: float
target mean vote share to peg the simulations to.
Will ensure that the average of all
simulations shift towards this value, but no guarantees
about simulation expectation
can be made due to the structure of the bootstrap.
t : int
the target time offset to use for the counterfactual
simulations. Overridden by year.
year : int
the target year to use for the counterfactual simulations
predict : bool
flag denoting whether to use the predictive distribution
(i.e. add bootstrapped swings to
the voteshare in the previous year) or the counterfactual
distribution (i.e. add bootstrapped
swings to the voteshare in the current year).
fix : bool
flag denoting whether to force the average district vote to be
target_v exactly. If True, all elections will have exactly target_v
mean district vote share. If False, all elections will have approximately
target_v mean district vote share, with the grand mean vote share being target_v
replace : bool
flag denoting whether to resample swings with replacement or without replacement.
If the sampling occurs without replacement, then each swing is used exactly one time in a simulation.
If the sampling occurs with replacement, then each swing can be used more than one
time in a simulation, and some swings may not be used in a simulation.
Returns
---------
an (n_sims, n_districts) matrix of simulated vote shares.
"""
if fix:
raise Exception("Bootstrapped elections cannot be fixed in "
"mean to the target value.")
t = list(self.years).index(year) if year is not None else t
this_year = self.wide[t]
party_voteshares = np.average(this_year.vote_share,
weights=this_year.weight)
if predict is False:
self._GIGO("Prediction must be true if using bootstrap")
target_h = this_year.vote_share.values.flatten()
else:
target_h = this_year.vote_share__prev.values.flatten()
if swing is not None and target_v is not None:
raise ValueError("either swing or target_v, not both.")
elif target_v is not None:
swing = (target_v - party_voteshares)
obs_swings = (this_year.vote_share - this_year.vote_share__prev)
obs_swings = obs_swings.fillna(obs_swings.mean())
n_dists = len(target_h)
pweights = (this_year.weight / this_year.weight.sum()).values.flatten()
pweights /= pweights.sum()
sim_swings = np.random.choice(obs_swings + swing, (n_sims, n_dists),
replace=replace, p=pweights)
sim_h = target_h[None, :] + sim_swings
return np.clip(sim_h, 0, 1)
| 49.85567 | 121 | 0.575889 | 4,712 | 0.974359 | 0 | 0 | 57 | 0.011787 | 0 | 0 | 2,451 | 0.506824 |
d064480390cd469cea87e1fccaa98205621d8e9a | 147 | py | Python | Logging/log4.py | Anilkumar95/python-75-hackathon | 0cc9304e46ceace826090614b46d8048a068d106 | [
"MIT"
]
| null | null | null | Logging/log4.py | Anilkumar95/python-75-hackathon | 0cc9304e46ceace826090614b46d8048a068d106 | [
"MIT"
]
| null | null | null | Logging/log4.py | Anilkumar95/python-75-hackathon | 0cc9304e46ceace826090614b46d8048a068d106 | [
"MIT"
]
| 2 | 2019-01-27T16:59:48.000Z | 2019-01-29T13:07:40.000Z |
import logging
logging.warning('warning message')
logging.error('This is an error message')
logging.critical('This is a critical error message')
| 21 | 52 | 0.782313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.52381 |
d064ab0067234e99ee2f70ae98cf7d5fece967e6 | 304 | py | Python | spellcheck.py | laiquzzama/laiquzzama | 42393808ecb0342392ab055d9d94220de8a15796 | [
"Apache-2.0"
]
| 3 | 2021-03-06T01:42:51.000Z | 2021-04-13T12:02:47.000Z | spellcheck.py | laiquzzama/laiquzzama | 42393808ecb0342392ab055d9d94220de8a15796 | [
"Apache-2.0"
]
| null | null | null | spellcheck.py | laiquzzama/laiquzzama | 42393808ecb0342392ab055d9d94220de8a15796 | [
"Apache-2.0"
]
| null | null | null | from textblob import TextBlob
a = str(input("enter your word to check spell")
_b = TextBlob(a)
print (_b.correct())
# from textblob import Textblob
#mylst = ["firt","clor"]
#correct_list = []
#for word in mylst:
# correct_list.append(TextBlob())
#
#for word in correct_list:
# print (word.correct())
| 19 | 47 | 0.700658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.684211 |
d064bc4db90fca2bed0f8cf38219eca21ad15605 | 1,657 | py | Python | lessons/cse-numpy/drums/drums-5.py | uiuc-cse/2014-01-30-cse | de30ff0afdbb2030c3a844b9cd138177f38d3b76 | [
"CC-BY-3.0"
]
| 1 | 2021-04-21T23:05:51.000Z | 2021-04-21T23:05:51.000Z | lessons/cse-numpy/drums/drums-5.py | gitter-badger/2014-01-30-cse | de30ff0afdbb2030c3a844b9cd138177f38d3b76 | [
"CC-BY-3.0"
]
| null | null | null | lessons/cse-numpy/drums/drums-5.py | gitter-badger/2014-01-30-cse | de30ff0afdbb2030c3a844b9cd138177f38d3b76 | [
"CC-BY-3.0"
]
| 2 | 2016-03-12T02:28:13.000Z | 2017-05-01T20:43:22.000Z | from __future__ import division
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.special import jn, jn_zeros
import subprocess
def drumhead_height(n, k, distance, angle, t):
nth_zero = jn_zeros(n, k)
return np.cos(t)*np.cos(n*angle)*jn(n, distance*nth_zero[-1])
# Define polar and cartesian coordinates for the drum.
theta = np.r_[0:2*np.pi:50j]
radius = np.r_[0:1:50j]
x = np.array([r*np.cos(theta) for r in radius])
y = np.array([r*np.sin(theta) for r in radius])
radial_nodes = 2
zeros = 2
# Define the base plot.
fig = plt.figure(num=None,figsize=(16,16),dpi=120,facecolor='w',edgecolor='k')
ax = list()
# Loop over the desired angular nodes.
cnt = 0
pixcnt = 0
plt.ion()
for t in np.r_[0:2*np.pi:40j]:
cnt = 0
pixcnt += 1
for i in np.r_[0:radial_nodes+1:1]:
for j in np.r_[1:zeros+1:1]:
cnt += 1;
ax.append(fig.add_subplot(radial_nodes+1,zeros,cnt,projection='3d'))
z = np.array([drumhead_height(i, j, r, theta, t) for r in radius])
ax[-1].set_xlabel('R@%d,A@%d' % (i,j))
ax[-1].plot_surface(x,y,z,rstride=1,cstride=1,cmap=mpl.cm.Accent,linewidth=0,vmin=-1,vmax=1)
ax[-1].set_zlim(-1,1)
plt.savefig('./drum-modes-%d.png' % pixcnt, format='png')
# Collate pictures to an animated GIF.
import os,string
cwd = os.getcwd()
cmd = 'cd %s; ls drum-modes*.png | sort -k1.12n'%cwd
png_files = os.popen(cmd)
png_files_list = string.join(png_files.readlines()).replace('\n',' ')
os.popen('convert -delay 10 -loop 1 %s ./drum-animate.gif'%png_files_list)
| 31.865385 | 104 | 0.660229 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 298 | 0.179843 |
d065e2da402db36ecb6c887992ef35dec831f741 | 704 | py | Python | QB5/spiders/qb5.py | smithgoo/Scrapy_books | b556714510473f324a2952b739d79c0c78f47398 | [
"MIT"
]
| null | null | null | QB5/spiders/qb5.py | smithgoo/Scrapy_books | b556714510473f324a2952b739d79c0c78f47398 | [
"MIT"
]
| null | null | null | QB5/spiders/qb5.py | smithgoo/Scrapy_books | b556714510473f324a2952b739d79c0c78f47398 | [
"MIT"
]
| null | null | null | import scrapy
from bs4 import BeautifulSoup
import requests
from QB5.pipelines import dbHandle
from QB5.items import Qb5Item
class Qb5Spider(scrapy.Spider):
name = 'qb5'
allowed_domains = ['qb5.tw']
start_urls = ['https://qb5.tw']
def parse(self, response):
soup = BeautifulSoup(response.text)
######获取最近更新的数据
tlists = soup.find_all('div', attrs={'class': 'txt'})
# print(tlist)
item = Qb5Item()
for tlist in tlists:
xx = tlist.find_all('a')[0]
print(xx['href'])
item['url'] = xx['href']
item['name'] = xx.text
print('********************************')
yield item
| 28.16 | 61 | 0.536932 | 595 | 0.8241 | 472 | 0.65374 | 0 | 0 | 0 | 0 | 153 | 0.211911 |
d0688c7557a32c0ad0f636ac14f06a163a2f4570 | 36,514 | py | Python | BookDatabaseUtility.py | BitWorks/xbrlstudio | 231beb46c56c8086f9fcc8846955667d947709c2 | [
"MIT"
]
| null | null | null | BookDatabaseUtility.py | BitWorks/xbrlstudio | 231beb46c56c8086f9fcc8846955667d947709c2 | [
"MIT"
]
| null | null | null | BookDatabaseUtility.py | BitWorks/xbrlstudio | 231beb46c56c8086f9fcc8846955667d947709c2 | [
"MIT"
]
| null | null | null | """
:mod: 'BookDatabaseUtility'
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. py:module:: BookDatabaseUtility
:copyright: Copyright BitWorks LLC, All rights reserved.
:license: MIT
:synopsis: SQLAlchemy ORM engine, metadata, and utility functions for working with dynamic sqlite databases
:description: Contains the following functions:
makeEntityTable - creates table 'entities' - columns = entity_cik, parent_cik, entity_name
makeFilingsTable - creates table 'filings####' - columns = entity_cik, q1, q2, q3, q4
getAllTables - returns a list of all SQLAlchemy Table objects
tableExists - determines whether a given table name exists in the database
getEntityTreeInfo - returns list of tuples, where each tuple is a row [(entity_cik, parent_cik, entity_name)]
getNameFromCik - uses a given cik to get an entity_name from the database
updateEntityParent - updates the parent cik of a given child cik; used when user alters entity tree view hierarchy
getEntityDict - returns a dict of the format {entity_name:entity_cik}, for all entities in database
getFilingTreeInfo - returns list of strings, where each string corresponds to a filing available for viewing
selectFromDatabase - given a cik and filing period, selects a Filing object from the database
existsInDatabase - determines whether a given filing exists in the database
manualExistsInDatabase - determines whether a given filing exists in the database, with input from user
addToEntitiesTable - updates 'entities' table to include a given entity, if not present
addToFilingsTable - updates a 'filings####' table to include a given filing, if not present
addToDatabase - adds a given fact file to the database in the form of a pickled Filing object
manualAddToDatabase - adds a given fact file to the database in the form of a pickled Filing object, with input from user
countEntityAndChildren - determines the breadth and depth of an entity tree in the database, used for status bar updates
removeEntityFromDatabase - removes a given entity (and all its children) from the database; currently an expensive function
removeFilingFromDatabase - removes a given filing item (and all its children) from the database; currently also expensive
updateEntityName - updates the name of an entity to that disclosed in the latest available filing
getLastFiling - returns the latest filing for a particular entity
renameEntityInDatabase(target_cik, new_entity_name) - manual replacement of the entity name with new_entity_name in the database
"""
try:
import pickle, sys, os, datetime, logging
database_utility_logger = logging.getLogger()
from sqlalchemy import (create_engine, Table, Column, Integer, String, PickleType)
from sqlalchemy.schema import MetaData
from sqlalchemy.pool import NullPool
# Tiered
# from . import (BookFilingUtility)
# Flat
import BookFilingUtility
except Exception as err:
database_utility_logger.error("{0}:BookDatabaseUtility import error:{1}".format(str(datetime.datetime.now()), str(err)))
def buildEngine(db_uri):
try:
global Engine
global Global_metadata
Engine = create_engine(os.path.join("sqlite:///{0}".format(db_uri)), poolclass = NullPool, echo = False)
Global_metadata = MetaData(bind = Engine, reflect = True)
except Exception as err:
database_utility_logger.error("{0}:buildEngine():{1}".format(str(datetime.datetime.now()), str(err)))
def makeEntityTable():
try:
global Global_metadata
ent = Table(
"entities",
Global_metadata,
Column("entity_cik", Integer, primary_key = True),
Column("parent_cik", Integer, nullable = True),
Column("entity_name", String(60))
)
Global_metadata.create_all(Engine)
except Exception as err:
database_utility_logger.error("{0}:makeEntityTable():{1}".format(str(datetime.datetime.now()), str(err)))
return
def makeFilingsTable(target_name):
try:
global Global_metadata
fil = Table(
target_name,
Global_metadata,
Column("entity_cik", Integer, primary_key = True),
Column("q1", PickleType, nullable = True),
Column("q2", PickleType, nullable = True),
Column("q3", PickleType, nullable = True),
Column("q4", PickleType, nullable = True)
)
Global_metadata.create_all(Engine)
except Exception as err:
database_utility_logger.error("{0}:makeFilingsTable():{1}".format(str(datetime.datetime.now()), str(err)))
return
def getAllTables():
try:
local_metadata = MetaData(bind = Engine, reflect = True)
tables = []
for table in local_metadata.sorted_tables:
tables.append(table)
return tables
except Exception as err:
database_utility_logger.error("{0}:getAllTables():{1}".format(str(datetime.datetime.now()), str(err)))
def tableExists(target_table_name):
try:
tables = getAllTables()
for table in tables:
if table.name == target_table_name:
return True
return False
except Exception as err:
database_utility_logger.error("{0}:tableExists():{1}".format(str(datetime.datetime.now()), str(err)))
def getEntityTreeInfo():
try:
connection = Engine.connect()
table_select = []
entity_list = []
tables = getAllTables()
for table in tables:
if table.name == "entities":
try:
select_stmt = table.select()
table_select = connection.execute(select_stmt).fetchall()
except Exception as err:
pass
for entry in table_select:
entity_list.append(entry)
connection.close()
return entity_list
except Exception as err:
database_utility_logger.error("{0}:getEntityTreeInfo():{1}".format(str(datetime.datetime.now()), str(err)))
def getNameFromCik(target_cik):
try:
connection = Engine.connect()
entity_name = None
tables = getAllTables()
for table in tables:
if table.name == "entities":
try:
select_stmt = table.select().where(table.columns.entity_cik == target_cik)
table_select = connection.execute(select_stmt).fetchall()
entity_name = table_select[0][2]
except Exception as err:
pass
connection.close()
return entity_name
except Exception as err:
database_utility_logger.error("{0}:getNameFromCik():{1}".format(str(datetime.datetime.now()), str(err)))
def updateEntityParent(target_child_cik, target_parent_cik):
try:
target_child_cik = int(target_child_cik)
except Exception as err:
pass
try:
target_parent_cik = int(target_parent_cik)
except Exception as err:
pass
try:
connection = Engine.connect()
tables = getAllTables()
for table in tables:
if table.name == "entities":
try:
update_stmt = table.update().where(table.columns.entity_cik == target_child_cik).values(parent_cik = target_parent_cik)
table_update = connection.execute(update_stmt)
except Exception as err:
pass
connection.close()
try:
if table_update.last_updated_params() is not None:
return_val = True
else:
return_val = False
except Exception as err:
return_val = False
return return_val
except Exception as err:
database_utility_logger.error("{0}:updateEntityParent() body:{1}".format(str(datetime.datetime.now()), str(err)))
def getEntityDict():
try:
connection = Engine.connect()
entity_dict = {} #key = entity_name, value = entity_cik
tables = getAllTables()
for table in tables:
if table.name == "entities":
try:
select_stmt = table.select()
table_select = connection.execute(select_stmt).fetchall()
for entry in table_select:
try:
entity_dict[entry[2]] = entry[0]
except Exception as err:
database_utility_logger.error("{0}:getEntityDict() inner:{1}".format(str(datetime.datetime.now()), str(err)))
except Exception as err:
database_utility_logger.error("{0}:getEntityDict() middle:{1}".format(str(datetime.datetime.now()), str(err)))
connection.close()
return entity_dict
except Exception as err:
database_utility_logger.error("{0}:getEntityDict() outer:{1}".format(str(datetime.datetime.now()), str(err)))
def getFilingTreeInfo(target_cik):
try:
target_cik = int(target_cik)
connection = Engine.connect()
filings = []
tables = getAllTables()
for table in tables:
if table.name.startswith("filings"):
try:
select_stmt = table.select().where(table.columns.entity_cik == target_cik)
table_select = connection.execute(select_stmt).fetchall()
if len(table_select) > 0:
if table_select[0][1] is not None:
filings.append(table.name[-4:] + "-Q1")
if table_select[0][2] is not None:
filings.append(table.name[-4:] + "-Q2")
if table_select[0][3] is not None:
filings.append(table.name[-4:] + "-Q3")
if table_select[0][4] is not None:
filings.append(table.name[-4:] + "-Q4")
except Exception as err:
database_utility_logger.error("{0}:getFilingTreeInfo() inner:{1}".format(str(datetime.datetime.now()), str(err)))
connection.close()
return filings
except Exception as err:
database_utility_logger.error("{0}:getFilingTreeInfo() outer:{1}".format(str(datetime.datetime.now()), str(err)))
def selectFromDatabase(target_cik, target_period):
try:
connection = Engine.connect()
target_cik = int(target_cik)
tables = getAllTables()
select_result = None
for table in tables:
if table.name == "filings{0}".format(target_period[2:6]):
if target_period[0:2] == "q1":
try:
select_stmt = table.select().where(table.columns.entity_cik == target_cik)
select_result = connection.execute(select_stmt).first() #SA RowProxy
if select_result is not None:
try:
if select_result.items()[1][1] is not None:
select_result = pickle.loads(select_result.items()[1][1]) #1st: col #; 2nd: 0 = key, 1 = val
else:
select_result = None
except Exception as err:
database_utility_logger.error("{0}:selectFromDatabase() q1 inner:{1}".format(str(datetime.datetime.now()), str(err)))
select_result = None
except Exception as err:
database_utility_logger.error("{0}:selectFromDatabase() q1 outer:{1}".format(str(datetime.datetime.now()), str(err)))
select_result = None
elif target_period[0:2] == "q2":
try:
select_stmt = table.select().where(table.columns.entity_cik == target_cik)
select_result = connection.execute(select_stmt).first()
if select_result is not None:
try:
if select_result.items()[2][1] is not None:
select_result = pickle.loads(select_result.items()[2][1])
else:
select_result = None
except Exception as err:
database_utility_logger.error("{0}:selectFromDatabase() q2 inner:{1}".format(str(datetime.datetime.now()), str(err)))
select_result = None
except Exception as err:
database_utility_logger.error("{0}:selectFromDatabase() q2 outer:{1}".format(str(datetime.datetime.now()), str(err)))
select_result = None
elif target_period[0:2] == "q3":
try:
select_stmt = table.select().where(table.columns.entity_cik == target_cik)
select_result = connection.execute(select_stmt).first()
if select_result is not None:
try:
if select_result.items()[3][1] is not None:
select_result = pickle.loads(select_result.items()[3][1])
else:
select_result = None
except Exception as err:
database_utility_logger.error("{0}:selectFromDatabase() q3 inner:{1}".format(str(datetime.datetime.now()), str(err)))
select_result = None
except Exception as err:
database_utility_logger.error("{0}:selectFromDatabase() q3 outer:{1}".format(str(datetime.datetime.now()), str(err)))
select_result = None
elif target_period[0:2] == "q4":
try:
select_stmt = table.select().where(table.columns.entity_cik == target_cik)
select_result = connection.execute(select_stmt).first()
if select_result is not None:
try:
if select_result.items()[4][1] is not None:
select_result = pickle.loads(select_result.items()[4][1])
else:
select_result = None
except Exception as err:
database_utility_logger.error("{0}:selectFromDatabase() q4 inner:{1}".format(str(datetime.datetime.now()), str(err)))
select_result = None
except Exception as err:
database_utility_logger.error("{0}:selectFromDatabase() q4 outer:{1}".format(str(datetime.datetime.now()), str(err)))
select_result = None
else:
select_result = None
connection.close()
return select_result
except Exception as err:
database_utility_logger.error("{0}:selectFromDatabase() outer:{1}".format(str(datetime.datetime.now()), str(err)))
def existsInDatabase(target_fact_uri, target_cik = None):
try:
return_vals = []
filing = BookFilingUtility.parseFactFile(target_fact_uri)
entity_cik_list, entity_parent_cik, entity_name, filing_period = BookFilingUtility.getFilingInfo(filing)
cell_list = []
if target_cik is not None:
cell = selectFromDatabase(target_cik, filing_period)
if cell is not None:
return_vals.append((target_cik, filing_period, cell))
else:
if len(entity_cik_list) >= 1:
for entity_cik in entity_cik_list:
cell = selectFromDatabase(entity_cik, filing_period)
cell_list.append((entity_cik, filing_period, cell))
for item in cell_list:
if item[2] is not None:
return_vals.append(item)
return return_vals
except Exception as err:
database_utility_logger.error("{0}:existsInDatabase():{1}".format(str(datetime.datetime.now()), str(err)))
def manualExistsInDatabase(manual_cik, manual_period):
try:
cell = selectFromDatabase(manual_cik, manual_period)
if cell is None:
return False
else:
return True
except Exception as err:
database_utility_logger.error("{0}:manualExistsInDatabase():{1}".format(str(datetime.datetime.now()), str(err)))
def addToEntitiesTable(target_entity_cik, target_parent_cik, target_entity_name):
try:
connection = Engine.connect()
tables = getAllTables()
present = False
for table in tables:
if table.name == "entities":
try:
select_stmt = table.select().where(table.columns.entity_cik == target_entity_cik)
select_result = connection.execute(select_stmt).first()
if select_result is not None:
present = True
else:
insert_stmt = table.insert().values(entity_cik = target_entity_cik,
parent_cik = target_parent_cik,
entity_name = target_entity_name)
insert_result = connection.execute(insert_stmt)
present = True
except Exception as err:
database_utility_logger.error("{0}:addToEntitiesTable() inner:{1}".format(str(datetime.datetime.now()), str(err)))
connection.close()
return present
except Exception as err:
database_utility_logger.error("{0}:addToEntitiesTable() outer:{1}".format(str(datetime.datetime.now()), str(err)))
def addToFilingsTable(target_table_name, target_entity_cik, target_quarter, target_filing):
try:
target_filing = pickle.dumps(target_filing)
connection = Engine.connect()
tables = getAllTables()
present = False
for table in tables:
if table.name == target_table_name:
try:
select_stmt = table.select().where(table.columns.entity_cik == target_entity_cik)
select_result = connection.execute(select_stmt).first()
except Exception as err:
database_utility_logger.error("{0}:addToFilingsTable() select:{1}".format(str(datetime.datetime.now()), str(err)))
if select_result is not None:
if target_quarter == "q1":
try:
update_stmt = table.update().where(table.columns.entity_cik == target_entity_cik).values(q1 = target_filing)
update_result = connection.execute(update_stmt)
present = True
except Exception as err:
database_utility_logger.error("{0}:addToFilingsTable() select_result == None q1:{1}".format(str(datetime.datetime.now()), str(err)))
elif target_quarter == "q2":
try:
update_stmt = table.update().where(table.columns.entity_cik == target_entity_cik).values(q2 = target_filing)
update_result = connection.execute(update_stmt)
present = True
except Exception as err:
database_utility_logger.error("{0}:addToFilingsTable() select_result == None q2:{1}".format(str(datetime.datetime.now()), str(err)))
elif target_quarter == "q3":
try:
update_stmt = table.update().where(table.columns.entity_cik == target_entity_cik).values(q3 = target_filing)
update_result = connection.execute(update_stmt)
present = True
except Exception as err:
database_utility_logger.error("{0}:addToFilingsTable() select_result == None q3:{1}".format(str(datetime.datetime.now()), str(err)))
elif target_quarter == "q4":
try:
update_stmt = table.update().where(table.columns.entity_cik == target_entity_cik).values(q4 = target_filing)
update_result = connection.execute(update_stmt)
present = True
except Exception as err:
database_utility_logger.error("{0}:addToFilingsTable() select_result == None q4:{1}".format(str(datetime.datetime.now()), str(err)))
else:
if target_quarter == "q1":
try:
insert_stmt = table.insert().values(entity_cik = target_entity_cik, q1 = target_filing)
insert_result = connection.execute(insert_stmt)
present = True
except Exception as err:
database_utility_logger.error("{0}:addToFilingsTable() select_result != None q1:{1}".format(str(datetime.datetime.now()), str(err)))
elif target_quarter == "q2":
try:
insert_stmt = table.insert().values(entity_cik = target_entity_cik, q2 = target_filing)
insert_result = connection.execute(insert_stmt)
present = True
except Exception as err:
database_utility_logger.error("{0}:addToFilingsTable() select_result != None q2:{1}".format(str(datetime.datetime.now()), str(err)))
elif target_quarter == "q3":
try:
insert_stmt = table.insert().values(entity_cik = target_entity_cik, q3 = target_filing)
insert_result = connection.execute(insert_stmt)
present = True
except Exception as err:
database_utility_logger.error("{0}:addToFilingsTable() select_result != None q3:{1}".format(str(datetime.datetime.now()), str(err)))
elif target_quarter == "q4":
try:
insert_stmt = table.insert().values(entity_cik = target_entity_cik, q4 = target_filing)
insert_result = connection.execute(insert_stmt)
present = True
except Exception as err:
database_utility_logger.error("{0}:addToFilingsTable() select_result != None q4:{1}".format(str(datetime.datetime.now()), str(err)))
connection.close
return present
except Exception as err:
database_utility_logger.error("{0}:addToFilingsTable() outer:{1}".format(str(datetime.datetime.now()), str(err)))
def addToDatabase(target_fact_uri, target_cik = None):
try:
filing = BookFilingUtility.parseFactFile(target_fact_uri)
target_cik_list, target_parent_cik, target_name, filing_period = BookFilingUtility.getFilingInfo(filing)
if target_cik is not None:
target_cik = int(target_cik)
else:
if len(target_cik_list) >= 1:
target_cik = int(target_cik_list[0])
filing_year = filing_period[2:6]
filing_quarter = filing_period[0:2]
filing_table_name = "filings" + filing_year
if target_cik == None:
return
tables = getAllTables()
filings_table_found = False
for table in tables:
if table.name == filing_table_name:
filings_table_found = True
if not filings_table_found:
makeFilingsTable(filing_table_name)
addToEntitiesTable(target_cik, target_parent_cik, target_name)
addToFilingsTable(filing_table_name, target_cik, filing_quarter, filing)
updateEntityName(target_cik)
except Exception as err:
database_utility_logger.error("{0}:addToDatabase():{1}".format(str(datetime.datetime.now()), str(err)))
return
def manualAddToDatabase(manual_cik, manual_name, manual_period, target_fact_uri):
try:
filing = BookFilingUtility.parseFactFile(target_fact_uri)
target_cik = int(manual_cik)
target_parent_cik = None
target_name = str(manual_name)
manual_period = str(manual_period)
filing_year = manual_period[2:6]
filing_quarter = manual_period[0:2]
filing_table_name = "filings" + filing_year
tables = getAllTables()
filings_table_found = False
for table in tables:
if table.name == filing_table_name:
filings_table_found = True
if not filings_table_found:
makeFilingsTable(filing_table_name)
addToEntitiesTable(target_cik, target_parent_cik, target_name)
addToFilingsTable(filing_table_name, target_cik, filing_quarter, filing)
updateEntityName(target_cik)
except Exception as err:
database_utility_logger.error("{0}:manualAddToDatabase():{1}".format(str(datetime.datetime.now()), str(err)))
return
def countEntityAndChildren(target_cik, count = 0):
try:
connection = Engine.connect()
target_cik = int(target_cik)
tables = getAllTables()
if len(tables) > 0:
for table in tables:
if table.exists() is True:
if table.name == "entities":
try:
entity_sel_stmt = table.select().where(table.columns.entity_cik == target_cik)
entity_sel_result = connection.execute(entity_sel_stmt).fetchall()
if entity_sel_result is not None:
count += len(entity_sel_result)
children_sel_stmt = table.select().where(table.columns.parent_cik == target_cik)
children_sel_result = connection.execute(children_sel_stmt).fetchall()
except Exception as err:
database_utility_logger.error("{0}:countEntityAndChildren() inner:{1}".format(str(datetime.datetime.now()), str(err)))
if children_sel_result is not None:
for result in children_sel_result:
count += countEntityAndChildren(result.entity_cik)
connection.close()
return count
except Exception as err:
database_utility_logger.error("{0}:countEntityAndChildren() outer:{1}".format(str(datetime.datetime.now()), str(err)))
def removeEntityFromDatabase(book_main_window, target_cik, call = 0, total_items = 0):
try:
call += 1
if call == 1:
total_items = countEntityAndChildren(target_cik)
if total_items != 0:
progress = int(100 * call / total_items)
else:
if total_items != 0:
progress = int(100 * call / total_items)
book_main_window.updateProgressBar(progress)
children_sel_result = None
connection = Engine.connect()
target_cik = int(target_cik)
tables = getAllTables()
if len(tables) > 0:
for table in tables:
if table.exists() is True:
if table.name == "entities":
try:
parent_del_stmt = table.delete().where(table.columns.entity_cik == target_cik)
parent_del_result = connection.execute(parent_del_stmt)
children_sel_stmt = table.select().where(table.columns.parent_cik == target_cik)
children_sel_result = connection.execute(children_sel_stmt).fetchall()
except Exception as err:
pass
else:
try:
generic_del_stmt = table.delete().where(table.columns.entity_cik == target_cik)
generic_del_result = connection.execute(generic_del_stmt)
except Exception as err:
pass
if children_sel_result is not None:
for result in children_sel_result:
call = removeEntityFromDatabase(book_main_window, result.entity_cik,
call = call, total_items = total_items)
if len(tables) > 0:
for table in tables:
if table.exists() is True:
try:
generic_sel_stmt = table.select()
generic_sel_result = connection.execute(generic_sel_stmt).first()
if generic_sel_result is None and table.name != "entities":
table.drop(bind = Engine)
except Exception as err:
pass
if call == total_items:
connection.execute("VACUUM")
book_main_window.resetProgressBar()
connection.close()
return call
except Exception as err:
database_utility_logger.error("{0}:removeEntityFromDatabase():{1}".format(str(datetime.datetime.now()), str(err)))
def removeFilingFromDatabase(book_main_window, target_cik, target_period, call = 0, total_items = 0):
try:
call += 1
total_items = 3
progress = int(100 * call / total_items)
book_main_window.updateProgressBar(progress)
connection = Engine.connect()
target_cik = int(target_cik)
target_period = str(target_period)
if len(target_period) == 6:
target_quarter = target_period[0:2]
target_year = target_period[2:6]
target_table_name = "filings" + target_year
elif len(target_period) == 4:
target_year = target_period
target_table_name = "filings" + target_year
tables = getAllTables()
if len(tables) > 0:
for table in tables:
if table.exists() is True:
if table.name == target_table_name:
try:
if len(target_period) == 6:
if target_quarter == "q1":
del_stmt = table.update().where(table.columns.entity_cik == target_cik).values(q1 = None)
elif target_quarter == "q2":
del_stmt = table.update().where(table.columns.entity_cik == target_cik).values(q2 = None)
elif target_quarter == "q3":
del_stmt = table.update().where(table.columns.entity_cik == target_cik).values(q3 = None)
elif target_quarter == "q4":
del_stmt = table.update().where(table.columns.entity_cik == target_cik).values(q4 = None)
elif len(target_period) == 4:
del_stmt = table.delete().where(table.columns.entity_cik == target_cik)
connection.execute(del_stmt)
except Exception as err:
database_utility_logger.error("{0}:removeFilingFromDatabase() delete:{1}".format(str(datetime.datetime.now()), str(err)))
for table in tables:
if table.exists() is True:
try:
generic_sel_stmt = table.select()
generic_sel_result = connection.execute(generic_sel_stmt).first()
if generic_sel_result is None and table.name != "entities":
table.drop(bind = Engine)
except Exception as err:
database_utility_logger.error("{0}:removeFilingFromDatabase() table_drop:{1}".format(str(datetime.datetime.now()), str(err)))
call += 1
progress = int(100 * call / total_items)
book_main_window.updateProgressBar(progress)
connection.execute("VACUUM")
call += 1
progress = int(100 * call / total_items)
book_main_window.updateProgressBar(progress)
book_main_window.resetProgressBar()
connection.close()
return True
except Exception as err:
database_utility_logger.error("{0}:removeFilingFromDatabase() outer:{1}".format(str(datetime.datetime.now()), str(err)))
def updateEntityName(target_cik):
try:
connection = Engine.connect()
last_filing = getLastFiling(target_cik)
target_entity_cik_list, entity_parent_cik, new_entity_name, filing_period = BookFilingUtility.getFilingInfo(last_filing)
tables = getAllTables()
for table in tables:
if table.name == "entities":
try:
update_stmt = table.update().where(table.columns.entity_cik == target_cik).values(entity_name = new_entity_name)
update_result = connection.execute(update_stmt)
except Exception as err:
database_utility_logger.error("{0}:updateEntityName() inner:{1}".format(str(datetime.datetime.now()), str(err)))
connection.close()
except Exception as err:
database_utility_logger.error("{0}:updateEntityName() outer:{1}".format(str(datetime.datetime.now()), str(err)))
return
def getLastFiling(target_cik):
try:
connection = Engine.connect()
tables = getAllTables()
select_result = None
target_cik = int(target_cik)
for table in reversed(tables):
if table.name.startswith("filings"):
try:
select_stmt = table.select().where(table.columns.entity_cik == target_cik)
select_result = connection.execute(select_stmt).first() #SA RowProxy
if select_result is not None: # entity is in table
try:
for col in reversed(select_result.items()):
if col[1] is not None: # latest filing
select_result = pickle.loads(col[1]) # [0 = key, 1 = val]
return select_result
else:
pass
except Exception as err:
database_utility_logger.error("{0}:getLastFiling() inner:{1}".format(str(datetime.datetime.now()), str(err)))
select_result = None
except Exception as err:
database_utility_logger.error("{0}:getLastFiling() middle:{1}".format(str(datetime.datetime.now()), str(err)))
select_result = None
connection.close()
return select_result
except Exception as err:
database_utility_logger.error("{0}:getLastFiling() outer:{1}".format(str(datetime.datetime.now()), str(err)))
def renameEntityInDatabase(target_cik, new_entity_name):
try:
target_cik = int(target_cik)
new_entity_name = str(new_entity_name)
connection = Engine.connect()
tables = getAllTables()
for table in tables:
if table.name == "entities":
try:
update_stmt = table.update().where(table.columns.entity_cik == target_cik).values(entity_name = new_entity_name)
update_result = connection.execute(update_stmt)
except Exception as err:
database_utility_logger.error("{0}:renameEntityInDatabase() inner:{1}".format(str(datetime.datetime.now()), str(err)))
connection.close()
except Exception as err:
database_utility_logger.error("{0}:renameEntityInDatabase() outer:{1}".format(str(datetime.datetime.now()), str(err)))
return
| 50.019178 | 160 | 0.572082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,190 | 0.142137 |
d0693c16999250ca2301257521cd46471a60d89e | 58 | py | Python | hela/math/__init__.py | erikmunkby/hela | 915221386fbdb8d6f5783b97d44af036a5e5f7aa | [
"Apache-2.0"
]
| 1 | 2022-02-26T17:55:42.000Z | 2022-02-26T17:55:42.000Z | hela/math/__init__.py | erikmunkby/hela | 915221386fbdb8d6f5783b97d44af036a5e5f7aa | [
"Apache-2.0"
]
| null | null | null | hela/math/__init__.py | erikmunkby/hela | 915221386fbdb8d6f5783b97d44af036a5e5f7aa | [
"Apache-2.0"
]
| null | null | null | """
Module for math and statistics related functions.
"""
| 14.5 | 49 | 0.724138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.982759 |
d06a5181661f5f73feeb7820ddebac2f55560f7e | 3,491 | py | Python | src/models/markov_chain.py | dballesteros7/master-thesis-2015 | 8c0bf9a6eef172fc8167a30780ae0666f8ea2d88 | [
"MIT"
]
| null | null | null | src/models/markov_chain.py | dballesteros7/master-thesis-2015 | 8c0bf9a6eef172fc8167a30780ae0666f8ea2d88 | [
"MIT"
]
| null | null | null | src/models/markov_chain.py | dballesteros7/master-thesis-2015 | 8c0bf9a6eef172fc8167a30780ae0666f8ea2d88 | [
"MIT"
]
| null | null | null | import itertools
import numpy as np
import constants
from utils import file
class MarkovChain:
def __init__(self, n_items: int, pseudo_count: int = 1,
use_rejection: bool = True):
self.n_items = n_items
self.counts = np.empty(n_items)
self.first_order_counts = np.empty((n_items, n_items))
self.counts.fill((n_items - 1) * pseudo_count)
self.first_order_counts.fill(pseudo_count)
self.use_rejection = use_rejection
np.fill_diagonal(self.first_order_counts, 0) # No self loops.
def train(self, ordered_sets: np.ndarray):
for ordered_set in ordered_sets:
for item, next_item in itertools.zip_longest(
ordered_set, ordered_set[1:]):
if next_item is not None:
self.counts[item] += 1
self.first_order_counts[item][next_item] += 1
def propose_set_item(self, to_complete):
missing_pos = to_complete.index('?')
probs = np.zeros_like(self.first_order_counts)
for idx, row in enumerate(self.first_order_counts):
probs[idx, :] = self.first_order_counts[idx, :] / self.counts[idx]
if missing_pos == 0:
column = probs[:, int(to_complete[missing_pos + 1])]
row = np.ones_like(column)
elif missing_pos == len(to_complete) - 1:
row = probs[:, int(to_complete[missing_pos - 1])]
column = np.ones_like(row)
else:
column = probs[:, int(to_complete[missing_pos + 1])]
row = probs[:, int(to_complete[missing_pos - 1])]
likelihood = column*row
to_complete = [int(x) for x in to_complete if x != '?']
if self.use_rejection:
likelihood[to_complete] = 0.0
sorted_indexes = np.argsort(likelihood)
return sorted_indexes[::-1]
def train_and_evaluate(dataset_name: str, n_items: int):
for fold in range(1, constants.N_FOLDS + 1):
for use_rejection in (False, True):
model = MarkovChain(n_items, use_rejection=use_rejection)
loaded_data = file.load_set_data(
constants.TRAIN_DATA_PATH_TPL.format(
fold=fold, dataset=dataset_name))
model.train(loaded_data)
loaded_test_data = file.load_csv_test_data(
constants.PARTIAL_DATA_PATH_TPL.format(
fold=fold, dataset=dataset_name))
model_name = 'pseudo_markov' if use_rejection else 'markov'
target_path = constants.RANKING_MODEL_PATH_TPL.format(
dataset=dataset_name, fold=fold, model=model_name)
with open(target_path, 'w') as output_file:
for subset in loaded_test_data:
model.propose_set_item(subset)
result = model.propose_set_item(subset)
# if subset.index('?') > 0:
# short_subset = subset[:subset.index('?')]
# short_subset = [int(item) for item in short_subset]
#
output_file.write(','.join(str(item) for item in result))
output_file.write('\n')
# else:
# output_file.write('-\n')
if __name__ == '__main__':
train_and_evaluate(constants.DATASET_NAME_TPL.format('100_no_singles'), 100)
#train_and_evaluate(constants.DATASET_NAME_TPL.format('50_no_singles'), 50)
| 42.573171 | 80 | 0.593813 | 1,793 | 0.513606 | 0 | 0 | 0 | 0 | 0 | 0 | 325 | 0.093097 |
d06ac96e9708483c7fba688eecfd660ccb68f5f6 | 31 | py | Python | Tasks_codes/task1_catdog.py | manasviaggarwal/tipr-second-assignment | 45869a71c9de1a1d66c3581a06854f330f38e14d | [
"MIT"
]
| null | null | null | Tasks_codes/task1_catdog.py | manasviaggarwal/tipr-second-assignment | 45869a71c9de1a1d66c3581a06854f330f38e14d | [
"MIT"
]
| null | null | null | Tasks_codes/task1_catdog.py | manasviaggarwal/tipr-second-assignment | 45869a71c9de1a1d66c3581a06854f330f38e14d | [
"MIT"
]
| null | null | null |
ACCURACY :::62.86377259982597 | 15.5 | 30 | 0.774194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d06c3b1e34c6a4e7355555a0f272e78441d27ce6 | 11,611 | py | Python | lablog/python/ExperimentRepository.py | yonch/wireless | 5e5a081fcf3cd49d901f25db6c4c1fabbfc921d5 | [
"MIT"
]
| 29 | 2015-03-11T04:54:01.000Z | 2021-09-20T06:07:59.000Z | lablog/python/ExperimentRepository.py | darksidelemm/wireless | 5e5a081fcf3cd49d901f25db6c4c1fabbfc921d5 | [
"MIT"
]
| null | null | null | lablog/python/ExperimentRepository.py | darksidelemm/wireless | 5e5a081fcf3cd49d901f25db6c4c1fabbfc921d5 | [
"MIT"
]
| 16 | 2015-01-28T18:58:33.000Z | 2021-08-29T02:00:24.000Z |
import bsddb.db as bdb
import os.path
import cPickle
from base64 import b64encode, b64decode
from struct import pack
def toUniqueString(d):
if isinstance(d, int) or isinstance(d, float) or isinstance(d, long):
s = 'n' + repr(d)
elif isinstance(d, str):
s = 's' + d
elif isinstance(d, list):
s = 'l' + reduce(lambda x,y: x+y, [toUniqueString(x) for x in d])
elif isinstance(d, tuple):
s = 't' + reduce(lambda x,y: x+y, [toUniqueString(x) for x in d])
elif isinstance(d, dict):
s = 'd' + reduce(lambda x,y: x+y, [toUniqueString(x) + toUniqueString(y) for x,y in sorted(d.items())])
else:
raise RuntimeError, "Unknown type"
return pack("<L", len(s)) + s
def toUniqueHash(d):
import hashlib
return hashlib.sha1(toUniqueString(d)).digest()
class BundleExistsError (Exception):
def __init__(self, bundle_id):
self.bundle_id = bundle_id
def __str__(self):
return "bundle ID: " + repr(bundle_id)
class ExperimentRepository(object):
"""
Stores required experiments and results. Users specify what experiments
they want to run, and how many samples they require. The repository
can then answer queries about what experiments are missing, and stores
new results.
Data structure:
experiments.db: a dictionary experimentID -> experimentSpec
requiredCount.db: a dictionary experimentID -> requiredCount
resultCount.db: a dictionary experimentID -> resultCount
results/<eId>.db: a record collection of strings, each string is a
serialized result.
bundleIDs: a hash tables bundleID -> True
"""
def __init__(self, repoDir, readOnly=True):
self.readOnly = readOnly
if readOnly:
envFlags = bdb.DB_INIT_MPOOL
self.dbFlags = bdb.DB_RDONLY
else:
envFlags = bdb.DB_CREATE | bdb.DB_INIT_TXN | bdb.DB_INIT_MPOOL | \
bdb.DB_INIT_LOCK | bdb.DB_INIT_LOG # | bdb.DB_RECOVER_FATAL
self.dbFlags = bdb.DB_CREATE | bdb.DB_READ_UNCOMMITTED | bdb.DB_AUTO_COMMIT
localDbFlags = self.dbFlags
self.env = bdb.DBEnv(0)
# set size of locking filesystem
self.env.set_lk_max_objects(100000)
self.env.set_lk_max_locks(100000)
# set size of logging manager
self.env.set_lg_regionmax(1 << 20)
# open the environment
#self.env.remove(repoDir)
self.env.open(repoDir, envFlags)
# Lazy syncing should improve the throughput
#self.env.set_flags(bdb.DB_TXN_NOSYNC, 1)
self._experimentDb = bdb.DB(self.env)
self._experimentDb.open("experiments.db",
dbtype=bdb.DB_HASH,
flags=localDbFlags)
self._requiredCountDb = bdb.DB(self.env)
self._requiredCountDb.open("requiredCount.db",
dbtype=bdb.DB_HASH,
flags=localDbFlags)
self._resultCountDb = bdb.DB(self.env)
self._resultCountDb.open("resultCount.db",
dbtype=bdb.DB_HASH,
flags=localDbFlags)
self._bundleIdsDb = bdb.DB(self.env)
self._bundleIdsDb.open("bundleIds.db",
dbtype=bdb.DB_HASH,
flags=localDbFlags)
# make the directory where results will be stored
try:
os.mkdir(os.path.join(repoDir, 'results'))
except:
# already exists? anyway, we'll get an error later
pass
if self.readOnly:
self.txn = None
else:
self.txn = self.env.txn_begin()
def close(self):
# try to commit if transaction is still open
if self.txn is not None:
try:
self.txn.commit()
except bdb.DBError:
# nothing to do in d'tor
pass
self._bundleIdsDb.close()
self._resultCountDb.close()
self._requiredCountDb.close()
self._experimentDb.close()
self.env.close()
def __del__(self):
self.close()
def _serialize(self, val):
return b64encode(cPickle.dumps(val, protocol=2))
def _deserialize(self, s):
return cPickle.loads(b64decode(s))
def _getExperimentIDs(self):
"""
Returns all the experiment IDs
"""
if self.txn is not None:
return self._experimentDb.keys(self.txn)
else:
return self._experimentDb.keys()
experimentIDs = property(_getExperimentIDs)
def _getBundleIDs(self):
"""
Returns all the bundle IDs
"""
return self._bundleIdsDb.keys(self.txn)
bundleIDs = property(_getBundleIDs)
def _getExperimentDbFilename(self, eID):
return "results/%s.db" % eID.encode('hex')
def _getExperimentDB(self, eID):
exprDb = bdb.DB(self.env)
exprDb.open(self._getExperimentDbFilename(eID),
dbtype=bdb.DB_RECNO,
flags=self.dbFlags)
#txn=self.txn)
return exprDb
def commit(self):
if self.txn is None:
raise RuntimeError, "No active transaction, are you in read-only mode?"
# Commit the transaction
self.txn.commit()
# Start a new transaction
self.txn = self.env.txn_begin()
def abort(self):
if self.txn is None:
raise RuntimeError, "No active transaction, are you in read-only mode?"
# Abort the transaction
self.txn.abort()
# Start a new transaction
self.txn = self.env.txn_begin()
def _hasExperimentHash(self, expHash):
return self._experimentDb.has_key(expHash, txn=self.txn)
def _assertWriteable(self):
if self.readOnly:
raise RuntimeError, "Opened in read only mode!"
def hasExperiment(self, experimentSpec):
expHash = toUniqueHash(experimentSpec)
return self._hasExperimentHash(expHash)
def getExperimentID(self, experimentSpec):
expHash = toUniqueHash(experimentSpec)
if not self._hasExperimentHash(expHash):
raise RuntimeError, "Experiment does not exist"
else:
return expHash
def addExperiment(self, experimentSpec, numRequired = 0):
self._assertWriteable()
expHash = toUniqueHash(experimentSpec)
if not self._experimentDb.has_key(expHash, txn=self.txn):
self._experimentDb.put(expHash,
self._serialize(experimentSpec),
txn=self.txn)
if numRequired > 0:
self.setRequiredCount(expHash, numRequired)
else:
self.require(expHash, numRequired)
return expHash
def deleteExperiment(self, eID):
"""
This deletes experiment and all related results. Bundles associated are
never deleted.
"""
self._assertWriteable()
try:
self._experimentDb.delete(eID, txn=self.txn)
except(bdb.DBNotFoundError):
pass # will do best effort
try:
self._requiredCountDb.delete(eID, txn=self.txn)
except(bdb.DBNotFoundError):
pass # will do best effort
try:
self._resultCountDb.delete(eID, txn=self.txn)
except(bdb.DBNotFoundError):
pass # will do best effort
experimentDbName = self._getExperimentDbFilename(eID)
self.env.dbremove(experimentDbName, txn=self.txn)
def getExperiment(self, eID):
return self._deserialize(self._experimentDb.get(eID, txn=self.txn))
def getRequiredCount(self, eID):
return int(self._requiredCountDb.get(eID, default=0, txn=self.txn))
def setRequiredCount(self, eID, numRequired):
self._requiredCountDb.put(eID, str(numRequired), txn=self.txn)
def require(self, eID, numRequired):
"""
Enlarges the number of required experiments, if it is smaller than
'numRequired'.
"""
if numRequired == 0:
return
curRequirement = self.getRequiredCount(eID)
if curRequirement < numRequired:
self.setRequiredCount(eID, numRequired)
def unrequireAll(self):
"""
Sets requirements for all experiments to 0
"""
for k in self._requiredCountDb.keys(self.txn):
self._requiredCountDb.delete(k, txn=self.txn)
def getResultCount(self, eID):
return int(self._resultCountDb.get(eID, default="0", txn=self.txn))
def getResults(self, eID, numResults):
exprDb = self._getExperimentDB(eID)
results = []
cursor = exprDb.cursor(txn=self.txn)
try:
for i in xrange(numResults):
k, data = cursor.get(flags=bdb.DB_NEXT)
results.append(data)
finally:
cursor.close()
exprDb.close()
if len(results) != numResults:
raise RuntimeError, "not enough results in files"
return results
def addResults(self, eID, resultList):
self._assertWriteable()
# remember number of results before insertion
numBefore = self.getResultCount(eID)
# add results
exprDb = self._getExperimentDB(eID)
try:
for res in resultList:
exprDb.append(res, txn=self.txn)
except:
exprDb.close()
self.abort()
raise
# update count of results
self._resultCountDb.put(eID, str(numBefore + len(resultList)), txn=self.txn)
exprDb.close()
def addBundle(self, bundleID, resultDict):
"""
adds the bundleID. raises if bundleID already exists
@param bundleID the ID of the bundle
@param resultDict: a dictionary eID -> List of results
"""
self._assertWriteable()
# Check bundle doesn't already exist
if self._bundleIdsDb.has_key(bundleID, txn=self.txn):
raise BundleExistsError(bundleID)
# Add the bundle ID into the bundle table
self._bundleIdsDb.put(bundleID, "", txn=self.txn)
# Add results to suitable experiments
for eID, resList in resultDict.items():
self.addResults(eID, resList)
def getPending(self):
"""
@return: a list of tuples (experimentID, numMissing) for
experiments with numMissing > 0.
"""
requiredList = self._requiredCountDb.items(self.txn)
pendingList = []
for eID, req in requiredList:
req = int(req)
present = int(self._resultCountDb.get(eID, default = "0", txn=self.txn))
if present < req:
pendingList.append( (eID, req-present ))
return pendingList
def dump(self, fname):
raise RuntimeError, "not implemented"
def load(self, fname):
raise RuntimeError, "not implemented"
| 32.432961 | 112 | 0.571872 | 10,779 | 0.928344 | 0 | 0 | 0 | 0 | 0 | 0 | 2,561 | 0.220567 |
d06c950496205dbbc1ed9eef4c8c7e1dcbe953e8 | 1,668 | py | Python | tests/pipeline/nodes/dabble/test_check_large_groups.py | ericleehy/PeekingDuck | 8cf1be842235fa60bac13bc466cac09747a780ea | [
"Apache-2.0"
]
| 1 | 2021-12-02T05:15:58.000Z | 2021-12-02T05:15:58.000Z | tests/pipeline/nodes/dabble/test_check_large_groups.py | ericleehy/PeekingDuck | 8cf1be842235fa60bac13bc466cac09747a780ea | [
"Apache-2.0"
]
| null | null | null | tests/pipeline/nodes/dabble/test_check_large_groups.py | ericleehy/PeekingDuck | 8cf1be842235fa60bac13bc466cac09747a780ea | [
"Apache-2.0"
]
| null | null | null | # Copyright 2022 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from peekingduck.pipeline.nodes.dabble.check_large_groups import Node
@pytest.fixture
def check_large_groups():
node = Node(
{"input": ["obj_attrs"], "output": ["large_groups"], "group_size_threshold": 3}
)
return node
class TestCheckLargeGroups:
def test_no_obj_groups(self, check_large_groups):
array1 = []
input1 = {"obj_attrs": {"groups": array1}}
assert check_large_groups.run(input1)["large_groups"] == []
assert input1["obj_attrs"]["groups"] == array1
def test_no_large_groups(self, check_large_groups):
array1 = [0, 1, 2, 3, 4, 5]
input1 = {"obj_attrs": {"groups": array1}}
assert check_large_groups.run(input1)["large_groups"] == []
assert input1["obj_attrs"]["groups"] == array1
def test_multi_large_groups(self, check_large_groups):
array1 = [0, 1, 0, 3, 1, 0, 1, 2, 1, 0]
input1 = {"obj_attrs": {"groups": array1}}
assert check_large_groups.run(input1)["large_groups"] == [0, 1]
assert input1["obj_attrs"]["groups"] == array1
| 34.040816 | 87 | 0.678657 | 831 | 0.498201 | 0 | 0 | 168 | 0.100719 | 0 | 0 | 782 | 0.468825 |
d06e09e4639214f16deaafbd6112fa849f57cd73 | 2,684 | py | Python | src/seisspark/seisspark_context.py | kdeyev/SeisSpark | 528d22143acb72e78ed310091db07eb5d731ca09 | [
"ECL-2.0",
"Apache-2.0"
]
| 11 | 2017-08-16T02:32:37.000Z | 2020-12-25T07:18:57.000Z | src/seisspark/seisspark_context.py | kdeyev/SeisSpark | 528d22143acb72e78ed310091db07eb5d731ca09 | [
"ECL-2.0",
"Apache-2.0"
]
| 1 | 2018-10-15T14:44:17.000Z | 2018-10-15T14:44:17.000Z | src/seisspark/seisspark_context.py | kdeyev/SeisSpark | 528d22143acb72e78ed310091db07eb5d731ca09 | [
"ECL-2.0",
"Apache-2.0"
]
| 5 | 2018-05-16T02:36:38.000Z | 2020-06-15T07:46:50.000Z | # =============================================================================
# Copyright (c) 2021 SeisSpark (https://github.com/kdeyev/SeisSpark).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
from zipfile import ZipFile
import pyspark
from pyspark.sql import SparkSession
def zipdir(path: str, ziph: ZipFile) -> None:
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), os.path.join(path, "..")))
class SeisSparkContext:
def __init__(self) -> None:
seisspark_home = os.environ["SEISSPARK_HOME"]
seisspark_zip_pile = "seisspark.zip"
if os.path.exists(seisspark_zip_pile):
os.remove(seisspark_zip_pile)
with ZipFile(seisspark_zip_pile, mode="a") as myzipfile:
zipdir(f"{seisspark_home}/src/su_data", myzipfile)
zipdir(f"{seisspark_home}/src/su_rdd", myzipfile)
zipdir(f"{seisspark_home}/src/seisspark", myzipfile)
zipdir(f"{seisspark_home}/src/seisspark_modules", myzipfile)
spark_conf = pyspark.SparkConf()
if "SPARK_MASTER_URL" in os.environ:
spark_conf.setMaster(os.environ["SPARK_MASTER_URL"])
# spark_conf.setAll([
# ('spark.master', ),
# ('spark.app.name', 'myApp'),
# ('spark.submit.deployMode', 'client'),
# ('spark.ui.showConsoleProgress', 'true'),
# ('spark.eventLog.enabled', 'false'),
# ('spark.logConf', 'false'),
# ('spark.driver.bindAddress', 'vps00'),
# ('spark.driver.host', 'vps00'),
# ])
spark_sess = SparkSession.builder.config(conf=spark_conf).getOrCreate()
spark_ctxt = spark_sess.sparkContext
spark_sess.read
spark_sess.readStream
spark_ctxt.setLogLevel("WARN")
spark_ctxt.addPyFile(seisspark_zip_pile)
self._spark_ctxt = spark_ctxt
@property
def context(self) -> pyspark.SparkContext:
return self._spark_ctxt
| 38.342857 | 117 | 0.616617 | 1,547 | 0.576379 | 0 | 0 | 88 | 0.032787 | 0 | 0 | 1,332 | 0.496274 |
d06f1cb2d99e6c91380d0f70f6e5f7c771735207 | 1,116 | py | Python | tests/parsers/notifications/test_Notification.py | Tberdy/python-amazon-mws-tools | 2925118ce113851a2d8db98ad7f99163154f4151 | [
"Unlicense"
]
| 9 | 2017-03-28T12:58:36.000Z | 2020-03-02T14:42:32.000Z | tests/parsers/notifications/test_Notification.py | Tberdy/python-amazon-mws-tools | 2925118ce113851a2d8db98ad7f99163154f4151 | [
"Unlicense"
]
| 5 | 2017-01-05T19:36:18.000Z | 2021-12-13T19:43:42.000Z | tests/parsers/notifications/test_Notification.py | Tberdy/python-amazon-mws-tools | 2925118ce113851a2d8db98ad7f99163154f4151 | [
"Unlicense"
]
| 5 | 2017-02-15T17:29:02.000Z | 2019-03-06T07:30:55.000Z | from unittest import TestCase
from unittest import TestSuite
from unittest import main
from unittest import makeSuite
from mwstools.parsers.notifications import Notification
class Dummy(object):
"""
Only used for test_notification_payload since there is not actually a payload to test.
"""
def __init__(self, *args, **kwargs):
pass
class TestNotification(TestCase):
body = """
<Notification>
<NotificationMetaData>
<Empty />
</NotificationMetaData>
<NotificationPayload>
<Emtpy />
</NotificationPayload>
</Notification>
"""
def setUp(self):
self.parser = Notification.load(self.body)
def test_notification_metadata(self):
self.assertIsNotNone(self.parser.notification_metadata)
def test_notification_payload(self):
self.assertIsNotNone(self.parser.notification_payload(Dummy))
__all__ = [
TestNotification
]
def suite():
s = TestSuite()
for a in __all__:
s.addTest(makeSuite(a))
return s
if __name__ == '__main__':
main(defaultTest='suite')
| 20.666667 | 90 | 0.669355 | 738 | 0.66129 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.301971 |
d06f2e4133f899f7d55993a62f6fac399373c048 | 1,025 | py | Python | sec_certs/config/configuration.py | J08nY/sec-certs | d25a4a7c830c587a45eb8e37d99f8794dec1a5eb | [
"MIT"
]
| 2 | 2021-03-24T11:56:15.000Z | 2021-04-12T12:22:16.000Z | sec_certs/config/configuration.py | J08nY/sec-certs | d25a4a7c830c587a45eb8e37d99f8794dec1a5eb | [
"MIT"
]
| 73 | 2021-04-12T14:04:04.000Z | 2022-03-31T15:40:26.000Z | sec_certs/config/configuration.py | J08nY/sec-certs | d25a4a7c830c587a45eb8e37d99f8794dec1a5eb | [
"MIT"
]
| 3 | 2021-03-26T16:15:49.000Z | 2021-05-10T07:26:23.000Z | import json
from pathlib import Path
from typing import Union
import jsonschema
import yaml
class Configuration(object):
def load(self, filepath: Union[str, Path]):
with Path(filepath).open("r") as file:
state = yaml.load(file, Loader=yaml.FullLoader)
script_dir = Path(__file__).parent
with (Path(script_dir) / "settings-schema.json").open("r") as file:
schema = json.loads(file.read())
try:
jsonschema.validate(state, schema)
except jsonschema.exceptions.ValidationError as e:
print(f"{e}\n\nIn file {filepath}")
for k, v in state.items():
setattr(self, k, v)
def __getattribute__(self, key):
res = object.__getattribute__(self, key)
if isinstance(res, dict) and "value" in res:
return res["value"]
return object.__getattribute__(self, key)
DEFAULT_CONFIG_PATH = Path(__file__).parent / "settings.yaml"
config = Configuration()
config.load(DEFAULT_CONFIG_PATH)
| 27.702703 | 75 | 0.643902 | 807 | 0.787317 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.082927 |
d0703322f54aad95ad1c141cfb0733e4dbc48655 | 25 | py | Python | src/masonite/events/__init__.py | cercos/masonite | f7f220efa7fae833683e9f07ce13c3795a87d3b8 | [
"MIT"
]
| 1,816 | 2018-02-14T01:59:51.000Z | 2022-03-31T17:09:20.000Z | src/masonite/events/__init__.py | cercos/masonite | f7f220efa7fae833683e9f07ce13c3795a87d3b8 | [
"MIT"
]
| 340 | 2018-02-11T00:27:26.000Z | 2022-03-21T12:00:24.000Z | src/masonite/events/__init__.py | cercos/masonite | f7f220efa7fae833683e9f07ce13c3795a87d3b8 | [
"MIT"
]
| 144 | 2018-03-18T00:08:16.000Z | 2022-02-26T01:51:58.000Z | from .Event import Event
| 12.5 | 24 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d0707615a365376fb262ae4ab58d6c156cbaf97a | 4,415 | py | Python | parlai/scripts/split_phrases.py | shigailowa/ParlAI | 5bb359cdacb8f2b92ba482273cdff20f0d147a72 | [
"MIT"
]
| null | null | null | parlai/scripts/split_phrases.py | shigailowa/ParlAI | 5bb359cdacb8f2b92ba482273cdff20f0d147a72 | [
"MIT"
]
| null | null | null | parlai/scripts/split_phrases.py | shigailowa/ParlAI | 5bb359cdacb8f2b92ba482273cdff20f0d147a72 | [
"MIT"
]
| null | null | null | import nltk
from nltk.chunk.regexp import ChunkString, ChunkRule, ChinkRule
from nltk.tree import Tree
from nltk.chunk import RegexpParser
from nltk.corpus import conll2000
from nltk.tag import NgramTagger
#class for Unigram Chunking
class UnigramChunker(nltk.ChunkParserI):
def __init__(self, train_sents):
train_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)]
for sent in train_sents]
self.tagger = nltk.UnigramTagger(train_data)
def parse(self, sentence):
pos_tags = [pos for (word,pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags]
conlltags = [(word, pos, chunktag) for ((word,pos),chunktag)
in zip(sentence, chunktags)]
return nltk.chunk.conlltags2tree(conlltags)
#class for Bigram Chunking
class BigramChunker(nltk.ChunkParserI):
def __init__(self, train_sents):
train_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)]
for sent in train_sents]
self.tagger = nltk.BigramTagger(train_data)
def parse(self, sentence):
pos_tags = [pos for (word,pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags]
conlltags = [(word, pos, chunktag) for ((word,pos),chunktag)
in zip(sentence, chunktags)]
return nltk.chunk.conlltags2tree(conlltags)
#class for Ngram Chunking
class NgramChunker(nltk.ChunkParserI):
def __init__(self, n, train_sents):
train_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)]
for sent in train_sents]
self.tagger = nltk.NgramTagger(n, train_data)
def parse(self, sentence):
pos_tags = [pos for (word,pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags]
conlltags = [(word, pos, chunktag) for ((word,pos),chunktag)
in zip(sentence, chunktags)]
return nltk.chunk.conlltags2tree(conlltags)
#Rule-based chunking
def regexp_chunk():
#define rules here
grammar = r"""NP: {<DT|PDT|CD|PRP\$>?<JJ>*<N.*>+}
VP: {<V.*>+<TO>?<V.*>*}
PP: {<IN>+}
"""
cp = nltk.RegexpParser(grammar)
return(cp)
#train Unigram chunker on conll2000 dataset
def unigram_chunk():
train_sents = conll2000.chunked_sents('train.txt')
unigram_chunker = UnigramChunker(train_sents)
return(unigram_chunker)
#train Bigram chunker on conll2000 dataset
def bigram_chunk():
train_sents = conll2000.chunked_sents('train.txt')
bigram_chunker = BigramChunker(train_sents)
return(bigram_chunker)
#train Ngram chunker on conll2000 dataset
def ngram_chunk(n):
train_sents = conll2000.chunked_sents('train.txt')
ngram_chunker = NgramChunker(n, train_sents)
return(ngram_chunker)
#Call best performing chunker
def split_phrases(tagged_phrase):
bigram_chunker = bigram_chunk()
chunks = bigram_chunker.parse(tagged_phrase)
return(chunks)
"""
text = nltk.word_tokenize('My yellow dog loves eating breakfast and I like to watch netflix')
tags = nltk.pos_tag(text)
print(unigram_chunker.parse(tags))
"""
if __name__ == '__main__':
regexp_chunker = regexp_chunk()
unigram_chunker = ngram_chunk(1)
bigram_chunker = ngram_chunk(2)
trigram_chunker = ngram_chunk(3)
fourgram_chunker = ngram_chunk(4)
fivegram_chunker = ngram_chunk(5)
"""
phrase = "My yellow dog has been asking to eat the whole day because of hunger"
text = nltk.word_tokenize(phrase)
tags = nltk.pos_tag(text)
print(regexp_chunker.parse(tags))
print(unigram_chunker.parse(tags))
print(bigram_chunker.parse(tags))
"""
test_sents = conll2000.chunked_sents('test.txt')
print(regexp_chunker.evaluate(test_sents))
print(unigram_chunker.evaluate(test_sents))
print(bigram_chunker.evaluate(test_sents))
print(trigram_chunker.evaluate(test_sents))
print(fourgram_chunker.evaluate(test_sents))
print(fivegram_chunker.evaluate(test_sents))
"""
phrase = "play football and watch netflix"
text = nltk.word_tokenize(phrase)
tags = nltk.pos_tag(text)
chunks = split_phrases(tags)
print(chunks)
"""
"""
for chunk in chunks:
if type(chunk) is nltk.Tree:
for word,tag in chunk:
print(word)
else:
print(chunk[0])
""" | 29.433333 | 94 | 0.69966 | 1,890 | 0.428086 | 0 | 0 | 0 | 0 | 0 | 0 | 1,139 | 0.257984 |
d071b10778e993030b61af048f8e39a048b36969 | 3,014 | py | Python | tests/test_class/import_export_test.py | ThomasLandstra/pysave | acc3da93858f541ea4f3ffc6062d9689d28f5ee5 | [
"MIT"
]
| 1 | 2022-03-14T04:32:42.000Z | 2022-03-14T04:32:42.000Z | tests/test_class/import_export_test.py | ThomasLandstra/pysave | acc3da93858f541ea4f3ffc6062d9689d28f5ee5 | [
"MIT"
]
| 6 | 2021-10-11T07:20:40.000Z | 2022-03-17T01:31:33.000Z | tests/test_class/import_export_test.py | ThomasLandstra/pysave | acc3da93858f541ea4f3ffc6062d9689d28f5ee5 | [
"MIT"
]
| null | null | null | # Imports
from os.path import join, isfile
from os import remove, rmdir
from pysav import Save, does_save_exist, does_app_dir_exist, generate_environment_path
from utils.random_data import generate_dict
# Test
def test_answer():
"""Does Save work as expected"""
# Test Importing
# Create file to be imported
save = Save("python_test", "test_file")
save.data = generate_dict()
save.save_data()
# Create save to import into
save2 = Save("python_test", "test_file_two")
# Test merge mode 0 (Replace all data)
save2.import_data(save._save_file_path)
save2.save_data()
try:
assert save2.data == save.data
# Ensure that save is deleted in event of error
except AssertionError as excpt:
save.annihilate() # Delete file
save2.annihilate(True) # Delete file and folder
raise AssertionError from excpt
save.annihilate()
save2.annihilate(True)
# Test Exporting
save = Save("python_test", "test_file")
save.data = generate_dict()
save.export_data(generate_environment_path("DESKTOP"), True)
try:
assert does_save_exist("python_test", "test_file", environment="DESKTOP")
assert does_app_dir_exist("python_test", environment="DESKTOP")
remove(
join(
generate_environment_path("DESKTOP"),
save._app_name,
save._save_name + "." + save._extension,
)
)
rmdir(join(generate_environment_path("DESKTOP"), save._app_name))
assert not does_save_exist("python_test", "test_file", environment="DESKTOP")
assert not does_app_dir_exist("python_test", environment="DESKTOP")
save.export_data(generate_environment_path("DESKTOP"), False)
assert isfile(join(generate_environment_path("DESKTOP"), "test_file.json"))
assert not does_app_dir_exist("python_test", environment="DESKTOP")
remove(
join(
generate_environment_path("DESKTOP"),
save._save_name + "." + save._extension,
)
)
# Ensure that save is deleted in event of error
except AssertionError as excpt:
# Remove export if any
try:
remove(
join(
generate_environment_path("DESKTOP"),
save._app_name,
save._save_name + "." + save._extension,
)
)
except: pass
try:
rmdir(join(generate_environment_path("DESKTOP"), save._app_name))
except: pass
try:
remove(
join(
generate_environment_path("DESKTOP"),
save._save_name + "." + save._extension,
)
)
except: pass
# Delete file and folder
save.annihilate(True)
raise AssertionError from excpt
save.annihilate(True) # Delete file
| 29.54902 | 86 | 0.595886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 680 | 0.225614 |
d0724f179ed3e0352058e826e5ed580723fd7c4d | 1,250 | py | Python | insights/parsers/tests/test_ansible_tower_settings.py | maxamillion/insights-core | aa11e07e68077df97b6c85219911f8382be6e2fa | [
"Apache-2.0"
]
| null | null | null | insights/parsers/tests/test_ansible_tower_settings.py | maxamillion/insights-core | aa11e07e68077df97b6c85219911f8382be6e2fa | [
"Apache-2.0"
]
| null | null | null | insights/parsers/tests/test_ansible_tower_settings.py | maxamillion/insights-core | aa11e07e68077df97b6c85219911f8382be6e2fa | [
"Apache-2.0"
]
| null | null | null | import doctest
import pytest
from insights.parsers import ansible_tower_settings, SkipException
from insights.tests import context_wrap
ANSIBLE_TOWER_CONFIG_CUSTOM = '''
AWX_CLEANUP_PATHS = False
LOGGING['handlers']['tower_warnings']['level'] = 'DEBUG'
'''.strip()
ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID1 = '''
'''.strip()
ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID2 = '''
AWX_CLEANUP_PATHS
'''.strip()
def test_ansible_tower_settings():
conf = ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM))
assert conf['AWX_CLEANUP_PATHS'] == 'False'
with pytest.raises(SkipException) as exc:
ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID1))
assert 'No Valid Configuration' in str(exc)
with pytest.raises(SkipException) as exc:
ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID2))
assert 'No Valid Configuration' in str(exc)
def test_ansible_tower_settings_documentation():
failed_count, tests = doctest.testmod(
ansible_tower_settings,
globs={'conf': ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM))}
)
assert failed_count == 0
| 32.051282 | 110 | 0.7848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.1616 |
d073713261d4accea1d939cebd542842ecae613a | 1,320 | py | Python | app/utils/zones.py | Xerrors/Meco-Server | f2111bab7691c0b567d5c3b3f38b83fee152a689 | [
"MIT"
]
| 1 | 2021-07-28T11:24:02.000Z | 2021-07-28T11:24:02.000Z | app/utils/zones.py | Xerrors/Meco-Server | f2111bab7691c0b567d5c3b3f38b83fee152a689 | [
"MIT"
]
| null | null | null | app/utils/zones.py | Xerrors/Meco-Server | f2111bab7691c0b567d5c3b3f38b83fee152a689 | [
"MIT"
]
| null | null | null | import os
import json
from app.config import DATA_PATH
"""
_id: ID
date: 日期 eg "2020-02-06T15:24:59.942Z"
msg: 消息内容 eg "这是内容"
status: 状态 eg "😫" (a emoji)
"""
def get_zones():
with open(os.path.join(DATA_PATH, 'zone.json'), 'r') as f:
data = json.load(f)
return data['data']
def save_zone(data):
with open(os.path.join(DATA_PATH, 'zone.json'), 'w') as f:
json.dump({'data': data}, f)
def add_zone(msg:dict):
data = get_zones()
# 判断是否存在数据
if len(data) == 0:
_id = 0
else:
_id = data[-1]['id'] + 1
msg['id'] = _id
data.append(msg)
save_zone(data)
return data
def delete_zone(msg_id):
data = get_zones()
for i in range(len(data)):
if int(data[i]['id']) == int(msg_id):
del data[i]
break
save_zone(data)
return data
def update_zone(msg:dict):
data = get_zones()
for i in range(len(data)):
if int(data[i]['id']) == int(msg['id']):
data[i] = msg
break
save_zone(data)
return data
if __name__ == '__main__':
DATA_PATH = '../../data'
with open('../../../../Node/data/zoneMsg.json', 'r') as f:
data = json.load(f)
data = data['data']
for i in data[::-1]:
add_zone(i)
| 18.082192 | 62 | 0.524242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.220513 |
d078c0acdf412550824a96d5fadcbd21aeb88416 | 2,534 | py | Python | fungal_automata/utils.py | ranyishere/fungal_automata_comap2021 | 1ef4f00a3e6f17413a60f6882dbee6f156aadfa0 | [
"MIT"
]
| null | null | null | fungal_automata/utils.py | ranyishere/fungal_automata_comap2021 | 1ef4f00a3e6f17413a60f6882dbee6f156aadfa0 | [
"MIT"
]
| null | null | null | fungal_automata/utils.py | ranyishere/fungal_automata_comap2021 | 1ef4f00a3e6f17413a60f6882dbee6f156aadfa0 | [
"MIT"
]
| null | null | null | import random
import pprint
import matplotlib.pyplot as plt
import numpy as np
from cells import *
pp = pprint.PrettyPrinter(indent=2)
random.seed(5)
def get_image_from_state(cells, time, debug=False):
"""
Generates an image from the cell states
"""
# print("time: ", time)
img = []
for rix, row in enumerate(cells):
img_row = []
for cix, col in enumerate(row):
img_row.append(col.color)
img.append(img_row)
if debug == True:
plt.imshow(np.array(img), origin='lower')
plt.show()
return img
def get_heatmap_of_temp(cells, optimal=31.5, debug=False):
img = []
optimal_pts = []
for rix, row in enumerate(cells):
img_row = []
for cix, col in enumerate(row):
# if rix == 2 and cix == 4:
# print("rix: {0} cix: {1}".format(rix, cix))
# print(col.color)
# if col.temperature <= optimal+0.1 and col.temperature >= optimal-0.1:
# print("col.temperature: ", col.temperature)
# optimal_pts.append([rix,cix])
img_row.append(col.temperature)
# img_row.append(col.color[3])
img.append(img_row)
# print("img: ", img)
if debug == True:
for opt in optimal_pts:
plt.plot(opt, marker='x')
heatmap = plt.imshow(np.array(img), origin='lower', cmap='hot')
plt.colorbar(heatmap)
plt.show()
print("showing")
return img
def get_heatmap_of_food(cells, debug=False):
img = []
for rix, row in enumerate(cells):
img_row = []
for cix, col in enumerate(row):
# if rix == 2 and cix == 4:
# print("rix: {0} cix: {1}".format(rix, cix))
# print(col.color)
img_row.append(col.color[3])
# img_row.append(col.color[3])
img.append(img_row)
if debug == True:
plt.imshow(np.array(img), origin='lower')
plt.show()
return img
def get_moistmap(cells, debug=False):
img = []
for rix, row in enumerate(cells):
img_row = []
for cix, col in enumerate(row):
# if rix == 2 and cix == 4:
# print("rix: {0} cix: {1}".format(rix, cix))
# print(col.color)
img_row.append(col.moisture)
# img_row.append(col.color[3])
img.append(img_row)
if debug == True:
plt.imshow(np.array(img), origin='lower', cmap='Blues')
plt.show()
return img
| 23.247706 | 83 | 0.54341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 658 | 0.259669 |
d07c7ec019295c93900e320c5fcec0bc4db8705b | 415 | py | Python | src/server/event_test.py | cnlohr/bridgesim | ff33b63db813eedfc8155c9fecda4c8f1c06ab60 | [
"MIT"
]
| 4 | 2015-05-03T07:37:34.000Z | 2018-05-09T22:27:33.000Z | src/server/event_test.py | cnlohr/bridgesim | ff33b63db813eedfc8155c9fecda4c8f1c06ab60 | [
"MIT"
]
| 1 | 2016-08-07T16:56:38.000Z | 2016-08-07T16:56:38.000Z | src/server/event_test.py | cnlohr/bridgesim | ff33b63db813eedfc8155c9fecda4c8f1c06ab60 | [
"MIT"
]
| null | null | null | #! /usr/bin/python3
import time
from events import *
def test1(foo, *args):
print("foo: %s otherargs: %s time: %06.3f" % (foo, args, time.time() % 100))
q = QueueExecutor()
q.addEvent(test1, time.time() + 3, 1, 5, "foo", "bar", "baz")
q.addEvent(test1, time.time() + .5, .3, 20, "foo2", "bar")
print("Main thread asleep at %s" % (time.time(),))
time.sleep(6)
print("Main thread awake, terminating...")
q.stop() | 27.666667 | 78 | 0.621687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.342169 |
d07d20e45fea750c32612fcddef24ffc98a05b67 | 1,845 | py | Python | gd/iter_utils.py | nekitdev/gd.py | b9d5e29c09f953f54b9b648fb677e987d9a8e103 | [
"MIT"
]
| 58 | 2020-09-30T16:51:22.000Z | 2022-02-13T17:27:48.000Z | gd/iter_utils.py | NeKitDS/gd.py | b9d5e29c09f953f54b9b648fb677e987d9a8e103 | [
"MIT"
]
| 30 | 2019-07-29T12:03:41.000Z | 2020-09-15T17:01:37.000Z | gd/iter_utils.py | NeKitDS/gd.py | b9d5e29c09f953f54b9b648fb677e987d9a8e103 | [
"MIT"
]
| 20 | 2019-12-06T03:16:57.000Z | 2020-09-16T17:45:27.000Z | from typing import Any, Callable, Dict, Iterable, Mapping, Tuple, TypeVar, Union, cast, overload
__all__ = ("extract_iterable_from_tuple", "is_iterable", "item_to_tuple", "mapping_merge")
KT = TypeVar("KT")
VT = TypeVar("VT")
T = TypeVar("T")
def mapping_merge(*mappings: Mapping[KT, VT], **arguments: VT) -> Dict[KT, VT]:
final: Dict[KT, VT] = {}
for mapping in mappings:
final.update(mapping)
final.update(arguments) # type: ignore
return final
def is_iterable(maybe_iterable: Union[Iterable[T], T], use_iter: bool = True) -> bool:
if use_iter:
try:
iter(maybe_iterable) # type: ignore
return True
except TypeError: # "T" object is not iterable
return False
return isinstance(maybe_iterable, Iterable)
@overload # noqa
def item_to_tuple(item: Iterable[T]) -> Tuple[T, ...]: # noqa
...
@overload # noqa
def item_to_tuple(item: T) -> Tuple[T, ...]: # noqa
...
def item_to_tuple(item: Union[T, Iterable[T]]) -> Tuple[T, ...]: # noqa
if is_iterable(item):
return tuple(cast(Iterable[T], item))
return (cast(T, item),)
@overload # noqa
def extract_iterable_from_tuple( # noqa
tuple_to_extract: Tuple[Iterable[T]], check: Callable[[Any], bool]
) -> Iterable[T]:
...
@overload # noqa
def extract_iterable_from_tuple( # noqa
tuple_to_extract: Tuple[T, ...], check: Callable[[Any], bool]
) -> Iterable[T]:
...
def extract_iterable_from_tuple( # noqa
tuple_to_extract: Union[Tuple[Iterable[T]], Tuple[T, ...]],
check: Callable[[Any], bool] = is_iterable,
) -> Iterable[T]:
if len(tuple_to_extract) == 1:
maybe_return = tuple_to_extract[0]
if check(maybe_return):
return cast(Iterable[T], maybe_return)
return cast(Iterable[T], tuple_to_extract)
| 24.276316 | 96 | 0.635772 | 0 | 0 | 0 | 0 | 471 | 0.255285 | 0 | 0 | 199 | 0.107859 |
d07d7eac9f05f51f4abf2075d7c3883791a41eb9 | 937 | py | Python | spacetime/distort_ss.py | uhrwecker/GRDonuts | 3087aeb5c169251bdb711b425dcc3040ff962da7 | [
"MIT"
]
| null | null | null | spacetime/distort_ss.py | uhrwecker/GRDonuts | 3087aeb5c169251bdb711b425dcc3040ff962da7 | [
"MIT"
]
| 25 | 2020-03-26T11:16:58.000Z | 2020-09-10T18:31:52.000Z | spacetime/distort_ss.py | uhrwecker/GRDonuts | 3087aeb5c169251bdb711b425dcc3040ff962da7 | [
"MIT"
]
| null | null | null | import numpy as np
from spacetime.potential import Potential
class DistortedSchwarzschild(Potential):
def __init__(self, theta=np.pi/2, l=3.8, o=1, r_range=(2, 20),
num=10000, cont_without_eq=False, verbose=True):
super().__init__(r_range=r_range, num=num,
cont_without_eq=cont_without_eq, verbose=verbose)
self.theta = theta
self.l = l
self.o = o
def compute_w(self):
exponent = 0.5 * self.o * ( 3*self.r**2*np.cos(self.theta)**2 - \
6*self.r*np.cos(self.theta)**2 + \
2*np.cos(self.theta) - self.r**2 -2*self.r)
oben = (self.r**2 - self.r*2) * np.sin(self.theta)**2
unten = np.exp(-2*exponent) * self.r**2 * np.sin(self.theta)**2 - \
self.l**2 * np.exp(2*exponent) * (1 - 2/self.r)
w = 0.5 * np.log(oben/unten)
return w
| 37.48 | 79 | 0.526147 | 874 | 0.932764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d07df814cb6748757669097a641e731236e968f6 | 7,672 | py | Python | scripts/deploy.py | tallywiesenberg/algorand-tellorflex | 5fa673a637338e4dfa15b0cf4937a757890a5e14 | [
"MIT"
]
| null | null | null | scripts/deploy.py | tallywiesenberg/algorand-tellorflex | 5fa673a637338e4dfa15b0cf4937a757890a5e14 | [
"MIT"
]
| null | null | null | scripts/deploy.py | tallywiesenberg/algorand-tellorflex | 5fa673a637338e4dfa15b0cf4937a757890a5e14 | [
"MIT"
]
| null | null | null | from typing import Tuple, List
from algosdk.v2client.algod import AlgodClient
from algosdk.future import transaction
from algosdk.logic import get_application_address
from algosdk import account, encoding
from pyteal import compileTeal, Mode, Keccak256
from tellorflex.methods import report
from utils.account import Account
from tellorflex.contracts import approval_program, clear_state_program
from utils.helpers import add_standalone_account, fund_account
from utils.util import (
waitForTransaction,
fullyCompileContract,
getAppGlobalState,
)
APPROVAL_PROGRAM = b""
CLEAR_STATE_PROGRAM = b""
class Scripts:
def __init__(self, client, tipper, reporter, governance_address) -> None:
self.client = client
self.tipper = tipper
self.reporter = reporter
self.governance_address = governance_address.getAddress()
def get_contracts(self, client: AlgodClient) -> Tuple[bytes, bytes]:
"""Get the compiled TEAL contracts for the tellor contract.
Args:
client: An algod client that has the ability to compile TEAL programs.
Returns:
A tuple of 2 byte strings. The first is the approval program, and the
second is the clear state program.
"""
global APPROVAL_PROGRAM
global CLEAR_STATE_PROGRAM
if len(APPROVAL_PROGRAM) == 0:
APPROVAL_PROGRAM = fullyCompileContract(client, approval_program())
CLEAR_STATE_PROGRAM = fullyCompileContract(client, clear_state_program())
return APPROVAL_PROGRAM, CLEAR_STATE_PROGRAM
def deploy_tellor_flex(
self,
query_id: str,
query_data: str
) -> int:
"""Create a new tellor reporting contract.
Args:
client: An algod client.
sender: The account that will request data through the contract
governance_address: the account that can vote to dispute reports
query_id: the ID of the data requested to be put on chain
query_data: the in-depth specifications of the data requested
Returns:
The ID of the newly created auction app.
"""
approval, clear = self.get_contracts(self.client)
globalSchema = transaction.StateSchema(num_uints=7, num_byte_slices=5)
localSchema = transaction.StateSchema(num_uints=0, num_byte_slices=0)
app_args = [
encoding.decode_address(self.governance_address),
query_id.encode("utf-8"),
query_data.encode("utf-8"),
]
txn = transaction.ApplicationCreateTxn(
sender=self.tipper.getAddress(),
on_complete=transaction.OnComplete.NoOpOC,
approval_program=approval,
clear_program=clear,
global_schema=globalSchema,
local_schema=localSchema,
app_args=app_args,
sp=self.client.suggested_params(),
)
signedTxn = txn.sign(self.tipper.getPrivateKey())
self.client.send_transaction(signedTxn)
response = waitForTransaction(self.client, signedTxn.get_txid())
assert response.applicationIndex is not None and response.applicationIndex > 0
self.app_id = response.applicationIndex
self.app_address = get_application_address(self.app_id)
def stake(self) -> None:
"""Place a bid on an active auction.
Args:
client: An Algod client.
appID: The app ID of the auction.
reporter: The account staking to report.
"""
appAddr = get_application_address(self.app_id)
# appGlobalState = getAppGlobalState(client, appID)
# if any(appGlobalState[b"bid_account"]):
# # if "bid_account" is not the zero address
# prevBidLeader = encoding.encode_address(appGlobalState[b"bid_account"])
# else:
# prevBidLeader = None
stake_amount = 180*1000000 #200 dollars of ALGO
suggestedParams = self.client.suggested_params()
payTxn = transaction.PaymentTxn(
sender=self.reporter.getAddress(),
receiver=self.app_address,
amt=stake_amount,
sp=suggestedParams,
)
optInTx = transaction.ApplicationOptInTxn(
sender=self.reporter.getAddress(),
index=self.app_id,
sp=suggestedParams,
)
transaction.assign_group_id([payTxn, optInTx])
signedPayTxn = payTxn.sign(self.reporter.getPrivateKey())
signedAppCallTxn = optInTx.sign(self.reporter.getPrivateKey())
self.client.send_transactions([signedPayTxn, signedAppCallTxn])
waitForTransaction(self.client, optInTx.get_txid())
def closeAuction(self, client: AlgodClient, appID: int, closer: Account):
"""Close an auction.
This action can only happen before an auction has begun, in which case it is
cancelled, or after an auction has ended.
If called after the auction has ended and the auction was successful, the
NFT is transferred to the winning bidder and the auction proceeds are
transferred to the seller. If the auction was not successful, the NFT and
all funds are transferred to the seller.
Args:
client: An Algod client.
appID: The app ID of the auction.
closer: The account initiating the close transaction. This must be
either the seller or auction creator if you wish to close the
auction before it starts. Otherwise, this can be any account.
"""
appGlobalState = getAppGlobalState(client, appID)
nftID = appGlobalState[b"nft_id"]
accounts: List[str] = [encoding.encode_address(appGlobalState[b"seller"])]
if any(appGlobalState[b"bid_account"]):
# if "bid_account" is not the zero address
accounts.append(encoding.encode_address(appGlobalState[b"bid_account"]))
deleteTxn = transaction.ApplicationDeleteTxn(
sender=closer.getAddress(),
index=appID,
accounts=accounts,
foreign_assets=[nftID],
sp=client.suggested_params(),
)
signedDeleteTxn = deleteTxn.sign(closer.getPrivateKey())
client.send_transaction(signedDeleteTxn)
waitForTransaction(client, signedDeleteTxn.get_txid())
if __name__ == "__main__":
def setup():
algo_address = "http://localhost:4001"
algo_token = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
client = AlgodClient(algod_address=algo_address, algod_token=algo_token)
gov_address = Account.FromMnemonic("figure adapt crumble always cart twist scatter timber smooth artist gaze raise genre say scissors arena hidden poem mimic worry race burst yard about key")
tipper = Account.FromMnemonic("lava side salad unit door frozen clay skate project slogan choose poverty magic arrow pond swing alcohol bachelor witness monkey iron remind team abstract mom")
reporter = Account.FromMnemonic("gaze hockey eight fog scrub bind calm scrub change cannon recipe face shield smart member toward turkey pyramid item quote explain witness music ability weapon")
print("gov", gov_address.getAddress())
print("tipper", tipper.getAddress())
print("reporter", reporter.getAddress())
s = Scripts(client=client, tipper=tipper, reporter=reporter, governance_address=gov_address)
return s
s = setup()
app_id = s.deploy_tellor_flex(
query_id="hi",
query_data="hi",
)
s.stake() | 37.607843 | 202 | 0.669317 | 5,808 | 0.757039 | 0 | 0 | 0 | 0 | 0 | 0 | 2,758 | 0.359489 |
d07f63644facb997bb2d4b1393d95a337bd66e35 | 11,118 | py | Python | parse-vocab-list.py | kltm/kanji-textbook-table | e97630b47c4b00b734f0f29794331e7a0fd857d5 | [
"BSD-3-Clause"
]
| null | null | null | parse-vocab-list.py | kltm/kanji-textbook-table | e97630b47c4b00b734f0f29794331e7a0fd857d5 | [
"BSD-3-Clause"
]
| null | null | null | parse-vocab-list.py | kltm/kanji-textbook-table | e97630b47c4b00b734f0f29794331e7a0fd857d5 | [
"BSD-3-Clause"
]
| null | null | null | ####
#### Convert a TSV into a fully parsed JSON list blob that could be
#### used by a mustache (or other logicless) template.
####
#### Example usage to analyze the usual suspects:
#### python3 parse.py --help
####
#### Get report of current problems:
#### python3 parse-vocab-list.py --tsv ~/Downloads/UCSC中上級教科書_漢字・単語リスト\ -\ 単語リス ト\(4\).tsv --output /tmp/parsed-vocab-list.json
####
#### As part of a pipeline for vocab list:
#### python3 parse-vocab-list.py --tsv ~/Downloads/UCSC中上級教科書_漢字・単語リスト\ -\ 単語リス ト\(13\).tsv --output /tmp/parsed-vocab-list.json && python3 chapter-bin.py -v --input /tmp/parsed-vocab-list.json --output /tmp/chapters.json && python3 apply-to-chapters.py --input /tmp/chapters.json --template ./word-html-frame.template.html --output /tmp/chapter
####
#### As part of a pipeline for glossary:
#### python3 parse-vocab-list.py --tsv ~/Downloads/UCSC中上級教科書_漢字・単語リスト\ -\ 単語リス ト\(13\).tsv --output /tmp/parsed-vocab-list.json && python3 jalphabetical-bin.py --pattern vocab-list --input /tmp/parsed-vocab-list.json --output /tmp/jalphed-vocab-list.json && python3 apply-globally.py --input /tmp/jalphed-vocab-list.json --template ./manual-glossary.template.html --output /tmp/glossary.html
####
import sys
import argparse
import logging
import csv
import pystache
import json
import os
## Logger basic setup.
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger('parse')
LOGGER.setLevel(logging.WARNING)
def die_screaming(string):
""" Die and take our toys home. """
LOGGER.error(string)
sys.exit(1)
def main():
## Deal with incoming.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='More verbose output')
parser.add_argument('-t', '--tsv',
help='The TSV data file to read in')
parser.add_argument('-o', '--output',
help='The file to output to')
args = parser.parse_args()
## Up the verbosity level if we want.
if args.verbose:
LOGGER.setLevel(logging.INFO)
LOGGER.info('Verbose: on')
## Ensure arguments and read in what is necessary.
if not args.tsv:
die_screaming('need an input tsv argument')
LOGGER.info('Will use "' + args.tsv + '" as data')
if not args.output:
die_screaming('need an output file argument')
LOGGER.info('Will output to: ' + args.output)
## Setup some general metadata checking for the different formats.
required_total_columns = 10
required_columns = ["level", "chapter", "raw-japanese", "reading", "meaning"]
## Bring on all data in one sweep, formatting and adding
## appropriate parts to internal format so that we can simply
## output in any mustache template.
data_list = []
with open(args.tsv, 'r') as tsv_in:
tsv_in = csv.reader(tsv_in, delimiter='\t')
## Process data.
first_line_p = True
i = 0
for line in tsv_in:
i = i + 1
if first_line_p:
first_line_p = False
continue
else:
count = len(line)
if len(set(line)) == 1 and line[0] == "":
LOGGER.info("Skipping completely empty line: " + str(i))
continue
elif not count == required_total_columns:
die_screaming('malformed line: '+ str(i) +' '+ '\t'.join(line))
else:
# LOGGER.info("-------")
# LOGGER.info(type(line[3]))
# LOGGER.info(len(line[3]))
# LOGGER.info(line[3])
## Base parsing everything into a common object.
## Additional metadata that we'll want.
data_object = {}
data_object["row"] = str(i) # inserted
data_object["level"] = str(line[0]) # req
data_object["chapter"] = str(line[1]) # req
data_object["raw-japanese"] = str(line[2]) # req
data_object["raw-ruby"] = line[3] if (type(line[3]) is str and len(line[3]) > 0) else None # opt
data_object["reading"] = str(line[4]) # req
data_object["meaning"] = line[5] # req
data_object["section"] = line[6] if (type(line[6]) is str and len(line[6]) > 0) else None # opt
data_object["extra"] = True if (type(line[7]) is str and line[7] == '*') else None # opt
data_object["grammar-point"] = line[8] if (type(line[8]) is str and len(line[8]) > 0) else None # opt
data_object["notes"] = line[9] if (type(line[9]) is str and len(line[9]) > 0) else None # opt
## Basic error checking.
for required_entry in required_columns:
if not data_object[required_entry] is str and not len(data_object[required_entry]) > 0:
die_screaming('malformed line with "'+required_entry+'" at '+ str(i) +': '+ '\t'.join(line))
## Make some other mappings for commonly used
## sections names.
section_names_alt = {#None: "",
"読み物 一": "R.1",
"会話 一": "D.1",
"読み物 二": "R.2",
"会話 二": "D.2",
"読み物 三": "R.3",
"会話 三": "D.3",
"読み物 四": "R.4",
"会話 四": "D.4"}
if data_object["section"] in section_names_alt.keys():
data_object["section-alt-en-short"] = section_names_alt[data_object["section"]]
## Transform the comma/pipe-separated data raw "Ruby"
## object into something usable, if extant.
# LOGGER.info(data_object["raw-ruby"])
ruby = []
if data_object["raw-ruby"]:
try:
ruby_set_list_raw = data_object["raw-ruby"].split(",")
for ruby_set_raw in ruby_set_list_raw:
ruby_set_pre = ruby_set_raw.strip()
LOGGER.info("ruby_set_pre: " + ruby_set_pre)
ruby_set = ruby_set_pre.split("|")
ruby_kanji = ruby_set[0].strip()
ruby_reading = ruby_set[1].strip()
ruby.append({"kanji": ruby_kanji,
"reading": ruby_reading})
except:
die_screaming('error parsing ruby at '+ str(i) +': '+ '\t'.join(line))
data_object["ruby"] = ruby
## Now that we have the ruby parsed, create a new
## version of the "Japanese" ("raw-japanese")
## column with mustache renderable data hints.
LOGGER.info('^^^')
j = data_object["raw-japanese"]
remaining_rubys = len(ruby)
ruby_parse_data = []
for r in ruby:
## Case when kanji not found in remaining
## japanese.
LOGGER.info("japanese: " + j)
LOGGER.info("kanji: " + r["kanji"])
LOGGER.info("reading: " + r["reading"])
if j.find(r["kanji"]) == -1:
LOGGER.info('malformed line at '+ str(i) +': '+ '\t'.join(line))
die_screaming('bad japanese/ruby at line '+ str(i))
else:
## Some numbers we'll want on hand.
jl = len(j) # the remaining length of the japanese
rl = len(r["kanji"]) # the length of the ruby
offset = j.find(r["kanji"]) # the offset of the kanji
LOGGER.info(str(jl))
LOGGER.info(str(rl))
LOGGER.info(str(offset))
## Get the pre-ruby string added, if
## extant.
if offset == 0:
pass
else:
pre_string = j[0:(offset)]
LOGGER.info('pre_string: ' + pre_string)
ruby_parse_data.append({"string": pre_string,
"has-ruby": False})
## Add the ruby string section.
ruby_string = j[offset:(offset+rl)]
LOGGER.info('ruby_string: ' + ruby_string)
ruby_parse_data.append({"string": ruby_string,
"reading":r["reading"],
"has-ruby": True})
## If this is the last ruby we're dealing
## with, we're done and add the rest of
## the string. Otherwise, "soft loop" on
## the shorter string and next ruby.
remaining_rubys = remaining_rubys - 1
if remaining_rubys == 0:
## Last one, add any remaining string.
if (offset+rl) < jl:
post_string = j[(offset+rl):jl]
LOGGER.info('post_string: ' + post_string)
ruby_parse_data.append({"string": post_string,
"has-ruby": False})
else:
j = j[(offset+rl):jl]
data_object["rich-japanese"] = ruby_parse_data
## Basic error checking.
for required_entry in required_columns:
if not data_object[required_entry] is str and not len(data_object[required_entry]) > 0:
die_screaming('malformed line with "'+required_entry+'" at '+ str(i) +': '+ '\t'.join(line))
## Onto the pile.
data_list.append(data_object)
## Dump to given file.
#LOGGER.info(json.dumps(data_list, indent = 4))
with open(args.output, 'w') as output:
output.write(json.dumps(data_list, indent = 4))
## You saw it coming...
if __name__ == '__main__':
main()
| 48.977974 | 394 | 0.477154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,050 | 0.35828 |
d080168f53e03ca48aa7bb5ade9767788f28ed6f | 2,716 | py | Python | expressy/importer.py | timedata-org/expressy | 0aaeffa430c1ccfe649ee2bcdb69e7feb48ada95 | [
"MIT"
]
| 1 | 2017-01-22T18:53:52.000Z | 2017-01-22T18:53:52.000Z | expressy/importer.py | timedata-org/expressy | 0aaeffa430c1ccfe649ee2bcdb69e7feb48ada95 | [
"MIT"
]
| 10 | 2017-01-23T15:41:15.000Z | 2017-07-11T11:55:46.000Z | expressy/importer.py | timedata-org/expressy | 0aaeffa430c1ccfe649ee2bcdb69e7feb48ada95 | [
"MIT"
]
| null | null | null | import builtins, importlib
class Importer(object):
"""An Importer imports either a namespace or a symbol within a namespace.
It's like a more general version of importlib.import_module which handles
builtins and attributes within a module.
An Importer has a symbol_table that's always used to try to resolve
symbols before anything else. By default, symbol_table is the Python
built-in symbols as found in the module `builtins`:
ArithmeticError, AssertionError, ..., abs, all, ... zip
It also has a module_importer which imports Python modules or raises
an ImportError. By default this is just importlib.import_module.
"""
def __init__(self, symbol_table=vars(builtins),
module_importer=importlib.import_module):
"""Args:
symbol_table: a dictionary which maps symbols to values.
module_importer: a function that imports namespaces by path or
raises an ImportError otherwise.
"""
self.symbol_table = symbol_table
self.module_importer = module_importer
def getter(self, symbol):
"""Return a function that gets the value for symbol when called.
This function will return the new value when that value changes,
but will *not* reload a module when that module changes.
"""
try:
value = self.symbol_table[symbol]
return lambda: value
except KeyError:
pass
*body, last = symbol.split('.')
try:
imported = self.module_importer(symbol)
return lambda: imported
except ImportError:
if not (body and last):
raise # Can't recurse any more!
# Call getter recursively on the parent.
parent_name = '.'.join(body)
parent = self.getter(parent_name)
parent_value = parent()
def getter():
try:
return getattr(parent_value, last)
except AttributeError:
raise ImportError("No module named '%s'" % symbol, name=symbol)
return getter
def __call__(self, symbol):
"""Import the value for symbol, or raise an ImportError if it can't be
found.
"""
return self.getter(symbol)()
def make(self, typename, args=(), **kwds):
"""Make an object from its type.
Args:
typename: name of the class or other constructor for the object.
args: positional arguments to the constructor.
keyword arguments to the constructor.
"""
constructor = self(typename)
return constructor(*args, **kwds)
importer = Importer()
| 33.121951 | 79 | 0.621134 | 2,662 | 0.980118 | 0 | 0 | 0 | 0 | 0 | 0 | 1,484 | 0.546392 |
d0805f0b04c5487557b620b5f784fdf929044314 | 13,190 | py | Python | maintenance_utils.py | lawrluor/matchstats | 0c8cd08403d5fa2772b3d5d9391a804866d15dce | [
"BSD-3-Clause"
]
| 6 | 2015-06-22T16:51:03.000Z | 2017-12-05T22:18:39.000Z | maintenance_utils.py | lawrluor/matchstats | 0c8cd08403d5fa2772b3d5d9391a804866d15dce | [
"BSD-3-Clause"
]
| 1 | 2021-06-01T21:44:48.000Z | 2021-06-01T21:44:48.000Z | maintenance_utils.py | lawrluor/matchstats | 0c8cd08403d5fa2772b3d5d9391a804866d15dce | [
"BSD-3-Clause"
]
| null | null | null | from app import app, db
from app.models import *
import datetime
import sys
sys.path.append('./sanitize')
from sanitize_utils import *
from trueskill import setup, Rating, quality_1vs1, rate_1vs1
from trueskill_functions import MU, SIGMA, CONS_MU, BETA, TAU, DRAW_PROBABILITY, populate_trueskills
from misc_utils import *
# Make some of these Class functions in app.models??
# Changes User's tag, given string new_tag. Also ensures that user's tag is changed in the Sets he has played
def change_tag(tag, new_tag):
user = User.query.filter(User.tag==tag).first()
if user is None:
print "User %s not found." % tag
return
print "ORIGINAL USER: ", print_ignore(user)
won_sets = user.get_won_sets()
for set in won_sets:
set.winner_tag = new_tag
print_ignore(set)
lost_sets = user.get_lost_sets()
for set in lost_sets:
set.loser_tag = new_tag
print_ignore(set)
user.tag = new_tag
db.session.commit()
print "UPDATED USER: ", print_ignore(user)
return user
# transfers the data the User represented by joined_tag has to User root_tag, while deleting the User represented by joined_tag
# currently doesn't actually link the Users or tag in any way before deletion
# currently doesn't change Matches
def merge_user(root_tag, joined_tag):
root_user = User.query.filter(User.tag==root_tag).first()
joined_user = User.query.filter(User.tag==joined_tag).first()
if root_user is None:
print "root_user not found"
return
elif joined_user is None:
print "joined_user " + joined_tag + " not found"
return
# transfer Set data by simply editing Sets to have the root_user as the winner/loser tag and id
joined_sets = joined_user.get_all_sets()
for set in joined_sets:
if set.winner_tag==joined_user.tag:
set.winner_tag = root_user.tag
set.winner_id = root_user.id
else:
set.loser_tag = root_user.tag
set.loser_id = root_user.id
# merge Placement in joined_user by setting Placement.user = root_user
# Placement object removed (from beginning of list, index 0) from joined_user.tournament_assocs upon changing identity of Placement.user, so start again from index 0
placements = joined_user.tournament_assocs
duplicate_found = False
for placement in placements:
# Screen for case in which both joined_user and root_user present in same tourney
# Example: http://bigbluees.challonge.com/NGP44, joined_user="elicik", root_user="Elicik"
duplicates = Placement.query.filter(and_(Placement.user_id==root_user.id, Placement.tournament_id==placement.tournament_id)).all()
if len(duplicates)>0:
duplicate_found = True
continue
# Reassign the placement to root_user
placement.user = root_user
print root_user.tournament_assocs[-1]
print '\n'
# If duplicate was found during iteration, don't delete the joined_user as it is a legit other User who happens to have the same tag after conversion.
if duplicate_found==True:
print "DUPLICATE " + root_user.tag + " WAS FOUND"
return root_user
else:
db.session.delete(joined_user)
db.session.commit()
return root_user
def capitalize_all_tags():
# Not used in sanitize because if people come with sponsor tags, they will be deleted anyways and real tag won't be the same
userlist = User.query.all()
for user in userlist:
if not user.tag[0].isalpha() or user.tag[0].isupper():
continue
else:
print "USER TAG", print_ignore(user.tag)
temp = user.tag
capitalize_first = lambda s: s[:1].upper() + s[1:] if s else ''
cap_tag = capitalize_first(temp)
print "CAP TAG", print_ignore(cap_tag)
root_user = User.query.filter(User.tag==cap_tag).first()
if root_user is not None:
print "ORIGINAL USER", print_ignore(user)
print "ROOT USER", print_ignore(root_user)
merge_user(root_user.tag, user.tag)
else:
# if can't find root tag to merge with, then root tag doesn't exist. Change the tag
change_tag(user.tag, cap_tag)
print '\n'
return
def remove_team(separator):
userlist = User.query.filter(User.tag.contains(separator)).all()
for user in userlist:
print "USER TAG", user.tag
sep_index = user.tag.rfind(separator)
# ensure you are removing from the front. checks against cases where separator is last char
if sep_index!=len(user.tag)-1:
new_tag = user.tag[sep_index+len(separator):]
new_tag = new_tag.strip()
print "NEW TAG", new_tag
else:
print "Not a team separator"
continue
if user.region is None:
sanitized_tag = check_and_sanitize_tag(new_tag)
else:
sanitized_tag = check_and_sanitize_tag(new_tag, user.region.region)
print "SANITIZED TAG", sanitized_tag
# Find User if tag not registered
if sanitized_tag==new_tag:
# this means user was not matched to sanitized tag, so query for user with tag==new_tag
root_user = User.query.filter(User.tag==new_tag).first()
if root_user is not None:
print "ROOT USER", print_ignore(root_user)
merge_user(root_user.tag, user.tag)
else:
# if still can't find root tag to merge with, then root tag doesn't exist. Change the tag
change_tag(user.tag, new_tag)
else:
# if found sanitized User, merge them
merge_user(sanitized_tag, user.tag)
print '\n'
def search_and_replace_user(tag_string):
userlist = User.query.filter(User.tag.contains(tag_string)).all()
for user in userlist:
print "USER TAG", user.tag
new_tag = user.tag.replace(tag_string, '')
new_tag = new_tag.strip()
print "NEW_TAG", new_tag
if user.region is None:
sanitized_tag = check_and_sanitize_tag(new_tag)
else:
sanitized_tag = check_and_sanitize_tag(new_tag, user.region.region)
print "SANITIZED TAG", sanitized_tag
# Find User if tag not registered
if sanitized_tag==new_tag:
# this means user was not matched to sanitized tag, so query for user with tag==new_tag
root_user = User.query.filter(User.tag==new_tag).first()
if root_user is not None:
print "ROOT USER", root_user
merge_user(root_user.tag, user.tag)
else:
# if still can't find root tag to merge with, then root tag doesn't exist. Change the tag
change_tag(user.tag, new_tag)
else:
# if found sanitized User, merge them
merge_user(sanitized_tag, user.tag)
print '\n'
# Given a User tag and region name, changes user.region and changes regional trueskill if region is valid, otherwise deletes it
def change_region(tag, region_name):
user = User.query.filter(User.tag==tag).first()
region = Region.query.filter(Region.region==region_name).first()
if not user and not region:
return "User or Region not found"
if user.region is not None and user.region.region==region_name:
return "Region %s is already region of %s" % (region_name, tag)
user.region = region
# Repopulate regional trueskill for new region, to the default. First call deletes obsolete region, second call repopulates regional trueskill with new region.
# If new region is None, deletes obsolete region and does nothing else
# If user.region was already None, does nothing
populate_trueskills(user)
populate_trueskills(user)
db.session.commit()
return user
def add_characters(tag, characters):
found_tag = check_and_sanitize_tag(tag)
print found_tag
user = User.query.filter(User.tag==found_tag).first()
print user
if user is None:
return "User not found"
if characters is not None and characters !=[]:
for character in characters:
user.add_character(character)
db.session.commit()
db.session.commit()
return user
def add_region(region_name):
found_region = Region.query.filter(Region.region=="region_name").first()
if found_region is not None:
print "Region " + region_name + " already exists"
return
else:
new_region = Region(region=region_name)
db.session.add(new_region)
db.session.commit()
print "New region " + region_name + " added"
return new_region
def delete_character(tag, character):
user = User.query.filter(User.tag==tag).first()
user.remove_character(character)
db.session.commit()
# Given parameter tournament name and a list of integers representing year, month, and day, queries for Tournament object and assigns a date for it.
def change_date(tournament_name, date_numbers):
tournament = Tournament.query.filter(Tournament.name==tournament_name).first()
if tournament is None:
return "Tournament not found"
# Create date object, index 0 = year, index 1 = month, index 2 = day
date = datetime.date(date_numbers[0], date_numbers[1], date_numbers[2])
tournament.date = date
db.session.commit()
return tournament
# given Tournament object, if tournament name already exists, if tournament is a pool of a larger one, add placements and sets to Tournament object and return it, else simply return original Tournament object
def check_tournament(tournament):
same_tournament = Tournament.query.filter(Tournament.name==tournament.name).first()
# if tournament already exists, only add matches to Tournament, else create tournament as usual
if same_tournament is not None:
# if tournament.type == "Pool"
same_tournament.sets.append(tournament.sets)
else:
print "Tournament already exists"
return same_tournament
# given user tag, returns a simple dictionary with keys tournament_name and value placement for a tournament a User has attended
def get_tournament_name_and_placing(user_tag):
user = User.query.filter(User.tag==user_tag).first()
user_placements = {}
for tournament_placement in user.tournament_assocs:
tournament_name = tournament_placement.tournament.name
placement = convert_placement(tournament_placement.placement)
user_placements[tournament_name] = placement
print user_placements
return user_placements
# deletes a Set given tournament name, set winner, and set loser
def delete_set(tournament_name, winner_tag, loser_tag):
winner_user = User.query.filter(User.tag==winner_tag).first()
loser_user = User.query.filter(User.tag==loser_tag).first()
if winner_user is None:
print "winner_user not found"
return
elif loser_user is None:
print "loser_user not found"
return
found_set = Set.query.filter(and_(Set.loser_tag==loser_tag, Set.winner_tag==winner_tag, Set.tournament_name==tournament_name)).all()
if len(found_set)==1:
db.session.delete(found_set[0])
db.session.commit()
print "Set deleted"
return
elif len(found_set) < 1:
return "No set found"
elif len(found_set) > 1:
return "Multiple Sets found"
# reassigns Tournament Placement and Sets from one User to another
def reassign_user(tournament_name, user_tag, reassigned_tag):
# Query for objects
found_user = User.query.filter(User.tag==user_tag).first()
reassigned_user = User.query.filter(User.tag==reassigned_tag).first()
# find placement User has in Tournament, and replace found_user with reassigned_user
found_placement = Placement.query.filter(and_(Placement.tournament_name==tournament_name, Placement.user==found_user)).first()
if found_placement is not None:
found_placement.user=reassigned_user
else:
print "Placement not found"
# find Sets User played in Tournament, and replace the found_user with reassigned_user
found_sets = Set.query.filter(and_(Set.tournament_name==tournament_name), or_(Set.winner_tag==user_tag, Set.loser_tag==user_tag)).all()
print found_sets
if found_sets is not None:
for found_set in found_sets:
if found_set.winner_tag==user_tag:
found_set.winner_tag=reassigned_tag
elif found_set.loser_tag==user_tag:
found_set.loser_tag=reassigned_tag
else:
print "No Sets found"
db.session.commit()
return "Tournament entries reassigned"
def delete_tournament(header_name):
'''
Give TournamentHeader name, query for and delete TournamentHeader
'''
header = TournamentHeader.query.filter(TournamentHeader.name==header_name).first()
if header is None:
return "TournamentHeader not found"
else:
db.session.delete(header)
db.session.commit()
deleted_header = TournamentHeader.query.filter(TournamentHeader.name==header_name).first()
if deleted_header:
return "Failure"
else:
return "Successful deletion"
# Doesn't actually delete
def find_subtournament(tournament_name):
tournamentlist = Tournament.query.filter(Tournament.name==tournament_name).all()
for x in tournamentlist:
print x
return tournamentlist
# delete by index
def change_tournament_name(current_name, new_name):
# Will only give first matchin tournament; rename this one
tournament = Tournament.query.filter(Tournament.name==current_name).first()
placements = Placement.query.filter(Placement.tournament_id==tournament.id).all()
for placement in placements:
placement.tournament_name = new_name
sets = Set.query.filter(Set.tournament_id==tournament.id).all()
for set in sets:
set.tournament_name = new_name
tournament.name = new_name
db.session.commit() | 37.050562 | 208 | 0.731463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,171 | 0.316224 |
d08121c2bb321f45165a5c878bb41be255cfeaf3 | 406,902 | py | Python | tools/tests/bourne.py | CoprHD/sds-controller | a575ec96928b1e9258313efe92c930bfe9d6753a | [
"Apache-2.0"
]
| 91 | 2015-06-06T01:40:34.000Z | 2020-11-24T07:26:40.000Z | tools/tests/bourne.py | CoprHD/sds-controller | a575ec96928b1e9258313efe92c930bfe9d6753a | [
"Apache-2.0"
]
| 3 | 2015-07-14T18:47:53.000Z | 2015-07-14T18:50:16.000Z | tools/tests/bourne.py | CoprHD/sds-controller | a575ec96928b1e9258313efe92c930bfe9d6753a | [
"Apache-2.0"
]
| 71 | 2015-06-05T21:35:31.000Z | 2021-11-07T16:32:46.000Z | # coding=utf-8
#
# Copyright (c) 2015 EMC Corporation
# All Rights Reserved
#
import httplib
import cjson
import argparse
import sys
import os
import time
import json
import uuid
import base64
import urllib
import requests
import email
from email.Utils import formatdate
import cookielib
import telnetlib
import xml.etree.ElementTree as ET
#import xml2obj as x2o
import copy
import hmac
import re
import hashlib
import cookielib
import binascii
import datetime
import socket
import zlib
import struct
from time import sleep
try:
# OpenSUSE CoprHD kits tend to display certificate warnings which aren't
# relevant to running sanity tests
requests.packages.urllib3.disable_warnings()
except AttributeError:
# Swallow error, likely ViPR devkit
pass
URI_SERVICES_BASE = ''
URI_CATALOG = URI_SERVICES_BASE + '/catalog'
URI_CATALOG_SERVICES = URI_CATALOG + '/services'
URI_CATALOG_SERVICE = URI_CATALOG_SERVICES + '/{0}'
URI_CATALOG_SERVICE_SEARCH = URI_CATALOG_SERVICES + '/search'
URI_CATALOG_SERVICE_SEARCH_NAME = URI_CATALOG_SERVICE_SEARCH + '?name={0}'
URI_CATALOG_CATEGORIES = URI_CATALOG + '/categories'
URI_CATALOG_CATEGORY = URI_CATALOG_CATEGORIES + '/{0}'
URI_CATALOG_CATEGORY_UPGRADE = URI_CATALOG_CATEGORIES + '/upgrade?tenantId={0}'
URI_CATALOG_ORDERS = URI_CATALOG + '/orders'
URI_CATALOG_ORDER = URI_CATALOG_ORDERS + '/{0}'
URI_CATALOG_VPOOL = URI_CATALOG + '/vpools'
URI_CATALOG_VPOOL_FILE = URI_CATALOG_VPOOL + '/file'
URI_CATALOG_VPOOL_BLOCK = URI_CATALOG_VPOOL + '/block'
URI_CATALOG_VPOOL_OBJECT = URI_CATALOG_VPOOL + '/object'
URI_VPOOLS = URI_SERVICES_BASE + '/{0}/vpools'
URI_VPOOLS_MATCH = URI_SERVICES_BASE + '/{0}/vpools/matching-pools'
URI_OBJ_VPOOL = URI_SERVICES_BASE + '/{0}/data-services-vpools'
URI_VPOOL_INSTANCE = URI_VPOOLS + '/{1}'
URI_OBJ_VPOOL_INSTANCE = URI_OBJ_VPOOL + '/{1}'
URI_VPOOL_ACLS = URI_VPOOL_INSTANCE + '/acl'
URI_VPOOL_UPDATE = URI_VPOOL_INSTANCE + '/assign-matched-pools'
URI_VPOOL_DEACTIVATE = URI_VPOOL_INSTANCE + '/deactivate'
URI_VPOOL_REFRESH = URI_VPOOL_INSTANCE + '/refresh-matched-pools'
URI_BLOCKVPOOLS_BULKGET = URI_SERVICES_BASE + '/block/vpools/bulk'
URI_FILEVPOOLS_BULKGET = URI_SERVICES_BASE + '/file/vpools/bulk'
URI_SMISPROVIDER_BULKGET = URI_SERVICES_BASE + '/vdc/smis-providers/bulk'
URI_BLOCKSNAPSHOT_BULKGET = URI_SERVICES_BASE + '/block/snapshots/bulk'
URI_FILESNAPSHOT_BULKGET = URI_SERVICES_BASE + '/file/snapshots/bulk'
URI_EXPORTGROUP_BULKGET = URI_SERVICES_BASE + '/block/exports/bulk'
URI_LOGOUT = URI_SERVICES_BASE + '/logout'
URI_MY_PASSWORD_CHANGE = URI_SERVICES_BASE + '/password'
URI_USER_PASSWORD_CHANGE = URI_MY_PASSWORD_CHANGE + '/reset/'
URI_USER_PASSWORD_GET = URI_SERVICES_BASE + '/config/properties'
URI_USER_PASSWORD_PATTERN = 'system_{0}_encpassword","value":"(.+?)"'
URI_TENANT = URI_SERVICES_BASE + '/tenant'
URI_TENANTS = URI_SERVICES_BASE + '/tenants/{0}'
URI_TENANTS_DEACTIVATE = URI_TENANTS + '/deactivate'
URI_TENANTS_ROLES = URI_TENANTS + '/role-assignments'
URI_TENANTS_SUBTENANT = URI_TENANTS + '/subtenants'
URI_TENANTS_BULKGET = URI_SERVICES_BASE + '/tenants/bulk'
URI_TENANTS_HOSTS = URI_TENANTS + '/hosts'
URI_TENANTS_CLUSTERS = URI_TENANTS + '/clusters'
URI_TENANTS_VCENTERS = URI_TENANTS + '/vcenters'
URI_NODEOBJ = '/nodeobj/?name={0}'
URI_PROJECTS = URI_TENANTS + '/projects'
URI_PROJECT = URI_SERVICES_BASE + '/projects/{0}'
URI_PROJECT_ACLS = URI_PROJECT + '/acl'
URI_PROJECTS_BULKGET = URI_SERVICES_BASE + '/projects/bulk'
URI_FILESYSTEMS_LIST = URI_SERVICES_BASE + '/file/filesystems'
URI_FILESYSTEM = URI_SERVICES_BASE + '/file/filesystems/{0}'
URI_FILESHARE_BULKGET = URI_FILESYSTEMS_LIST + '/bulk'
URI_FILESYSTEMS_EXPORTS = URI_FILESYSTEM + '/exports'
URI_FILESYSTEMS_EXPORTS_UPDATE = URI_FILESYSTEM + '/export'
URI_FILESYSTEMS_UNEXPORT = URI_FILESYSTEM + '/export'
URI_FILESYSTEMS_EXPAND = URI_FILESYSTEM + '/expand'
URI_FILESYSTEMS_SHARES = URI_FILESYSTEM + '/shares'
URI_FILESYSTEMS_UNSHARE = URI_FILESYSTEMS_SHARES + '/{1}'
URI_FILESYSTEMS_SHARES_ACL = URI_FILESYSTEMS_SHARES + '/{1}/acl'
URI_FILESYSTEMS_SHARES_ACL_SHOW = URI_FILESYSTEMS_SHARES + '/{1}/acl'
URI_FILESYSTEMS_SHARES_ACL_DELETE = URI_FILESYSTEMS_SHARES + '/{1}/acl'
URI_FILESYSTEM_SNAPSHOT = URI_FILESYSTEM + '/protection/snapshots'
URI_FILESYSTEMS_SEARCH = URI_FILESYSTEMS_LIST + '/search'
URI_FILESYSTEMS_SEARCH_PROJECT = URI_FILESYSTEMS_SEARCH + '?project={0}'
URI_FILESYSTEMS_SEARCH_PROJECT_NAME = URI_FILESYSTEMS_SEARCH_PROJECT + '&name={1}'
URI_FILESYSTEMS_SEARCH_NAME = URI_FILESYSTEMS_SEARCH + '?name={0}'
URI_FILESYSTEMS_SEARCH_TAG = URI_FILESYSTEMS_SEARCH + '?tag={0}'
URI_FILE_SNAPSHOTS = URI_SERVICES_BASE + '/file/snapshots'
URI_FILE_SNAPSHOT = URI_FILE_SNAPSHOTS + '/{0}'
URI_FILE_SNAPSHOT_EXPORTS = URI_FILE_SNAPSHOT + '/exports'
URI_FILE_SNAPSHOT_UNEXPORT = URI_FILE_SNAPSHOT + '/export'
URI_FILE_SNAPSHOT_RESTORE = URI_FILE_SNAPSHOT + '/restore'
URI_FILE_SNAPSHOT_SHARES = URI_FILE_SNAPSHOT + '/shares'
URI_FILE_SNAPSHOT_SHARES_ACL = URI_FILE_SNAPSHOT_SHARES + '/{1}/acl'
URI_FILE_SNAPSHOT_SHARES_ACL_SHOW = URI_FILE_SNAPSHOT_SHARES + '/{1}/acl'
URI_FILE_SNAPSHOT_SHARES_ACL_DELETE = URI_FILE_SNAPSHOT_SHARES + '/{1}/acl'
URI_FILE_SNAPSHOT_UNSHARE = URI_FILE_SNAPSHOT_SHARES + '/{1}'
URI_FILE_SNAPSHOT_TASKS = URI_FILE_SNAPSHOT + '/tasks/{1}'
URI_FILE_QUOTA_DIR_LIST = URI_FILESYSTEM + '/quota-directories'
URI_FILE_QUOTA_DIR_BASE = URI_SERVICES_BASE + '/file/quotadirectories'
URI_FILE_QUOTA_DIR = URI_FILE_QUOTA_DIR_BASE + '/{0}'
URI_FILE_QUOTA_DIR_DELETE = URI_FILE_QUOTA_DIR + '/deactivate'
URI_DR = URI_SERVICES_BASE + '/site'
URI_DR_GET = URI_DR + '/{0}'
URI_DR_GET_DETAILS = URI_DR + '/{0}' + '/details'
URI_DR_DELETE = URI_DR + '/{0}'
URI_DR_PAUSE = URI_DR + '/{0}' + '/pause'
URI_DR_RESUME = URI_DR + '/{0}' + '/resume'
URI_DR_SWITCHOVER = URI_DR + '/{0}/switchover'
URI_DR_FAILOVER = URI_DR + '/{0}/failover'
URI_VDC = URI_SERVICES_BASE + '/vdc'
URI_VDC_GET = URI_VDC + '/{0}'
URI_VDC_DISCONNECT_POST = URI_VDC + '/{0}/disconnect'
URI_VDC_RECONNECT_POST = URI_VDC + '/{0}/reconnect'
URI_VDC_SECRETKEY = URI_VDC + '/secret-key'
URI_VDC_CERTCHAIN = URI_VDC + '/keystore'
URI_TASK = URI_VDC + "/tasks"
URI_TASK_GET = URI_TASK + '/{0}'
URI_TASK_LIST = URI_TASK
URI_TASK_LIST_SYSTEM = URI_TASK + "?tenant=system"
URI_TASK_DELETE = URI_TASK_GET + '/delete'
URI_EVENT = URI_VDC + "/events"
URI_EVENT_GET = URI_EVENT + '/{0}'
URI_EVENT_LIST = URI_EVENT + '?tenant={0}'
URI_EVENT_DELETE = URI_EVENT_GET + "/deactivate"
URI_EVENT_APPROVE = URI_EVENT_GET + "/approve"
URI_EVENT_DECLINE = URI_EVENT_GET + "/decline"
URI_IPSEC = '/ipsec'
URI_IPSEC_STATUS = '/ipsec?status={0}'
URI_IPSEC_KEY = '/ipsec/key'
URI_VDCINFO = '/object/vdcs'
URI_VDCINFO_GET = URI_VDCINFO + '/vdc' + '/{0}'
URI_VDCINFO_INSERT = URI_VDCINFO_GET
URI_VDCINFO_LOCAL = URI_VDCINFO + '/vdc/local'
URI_VDCINFO_LIST = URI_VDCINFO + '/vdc/list'
URI_CONTROL = URI_SERVICES_BASE + '/control'
URI_RECOVERY = URI_CONTROL + '/cluster/recovery'
URI_DB_REPAIR = URI_CONTROL + '/cluster/dbrepair-status'
URI_BACKUP = URI_SERVICES_BASE + '/backupset'
URI_BACKUP_CREATE = URI_BACKUP + '/backup?tag={0}'
URI_BACKUP_DELETE = URI_BACKUP + '/backup?tag={0}'
URI_BACKUP_LIST = URI_BACKUP
URI_BACKUP_LIST_EXTERNAL = URI_BACKUP + '/external'
URI_BACKUP_DOWNLOAD = URI_BACKUP + '/download?tag={0}'
URI_BACKUP_UPLOAD = URI_BACKUP + '/backup/upload?tag={0}'
URI_BACKUP_QUERY_UPLOAD = URI_BACKUP + '/backup?tag={0}'
URI_BACKUP_QUERY_INFO = URI_BACKUP + '/backup/info?backupname={0}&isLocal={1}'
URI_BACKUP_PULL = URI_BACKUP + '/pull?file={0}'
URI_BACKUP_QUERY_PULL = URI_BACKUP + '/restore/status?backupname={0}&isLocal={1}'
URI_BACKUP_RESTORE = URI_BACKUP + '/restore?backupname={0}&isLocal={1}&password={2}'
URI_VOLUME_LIST = URI_SERVICES_BASE + '/block/volumes'
URI_VOLUME_BULKGET = URI_VOLUME_LIST + '/bulk'
URI_VOLUME = URI_VOLUME_LIST + '/{0}'
URI_VOLUME_EXPAND = URI_VOLUME + '/expand'
URI_VOLUMES_EXPORTS = URI_VOLUME + '/exports'
URI_VOLUMES_UNEXPORTS = URI_VOLUME + '/exports/{1},{2},{3}'
URI_VOLUMES_DEACTIVATE = '/block/volumes/deactivate'
URI_BLOCK_SNAPSHOTS_LIST = URI_VOLUME + '/protection/snapshots'
URI_BLOCK_SNAPSHOTS = URI_SERVICES_BASE + '/block/snapshots/{0}'
URI_BLOCK_SNAPSHOTS_EXPORTS = URI_BLOCK_SNAPSHOTS + '/exports'
URI_BLOCK_SNAPSHOTS_UNEXPORTS = URI_BLOCK_SNAPSHOTS + '/exports/{1},{2},{3}'
URI_BLOCK_SNAPSHOTS_RESTORE = URI_BLOCK_SNAPSHOTS + '/restore'
URI_BLOCK_SNAPSHOTS_EXPAND = URI_BLOCK_SNAPSHOTS + '/expand'
URI_BLOCK_SNAPSHOTS_ACTIVATE = URI_BLOCK_SNAPSHOTS + '/activate'
URI_BLOCK_SNAPSHOTS_EXPOSE = URI_BLOCK_SNAPSHOTS + '/expose'
URI_BLOCK_SNAPSHOTS_TASKS = URI_BLOCK_SNAPSHOTS + '/tasks/{1}'
URI_VOLUME_CHANGE_VPOOL = URI_VOLUME_LIST + '/vpool-change'
URI_VOLUME_CHANGE_VPOOL_MATCH = URI_VOLUME + '/vpool-change/vpool'
URI_VOLUMES_SEARCH = URI_VOLUME_LIST + '/search'
URI_VOLUMES_SEARCH_PROJECT = URI_VOLUMES_SEARCH + '?project={0}'
URI_VOLUMES_SEARCH_PROJECT_NAME = URI_VOLUMES_SEARCH_PROJECT + '&name={1}'
URI_VOLUMES_SEARCH_NAME = URI_VOLUMES_SEARCH + '?name={0}'
URI_VOLUMES_SEARCH_TAG = URI_VOLUMES_SEARCH + '?tag={0}'
URI_VOLUMES_SEARCH_WWN = URI_VOLUMES_SEARCH + '?wwn={0}'
URI_VOLUME_CHANGE_VARRAY = URI_VOLUME + '/varray'
URI_VOLUME_CONTINUOUS = URI_VOLUME + '/protection/continuous-copies'
URI_VOLUME_CHANGE_LINK = URI_VOLUME_CONTINUOUS
URI_VOLUME_FULL_COPY = URI_VOLUME_LIST + '/{0}/protection/full-copies'
URI_VOLUME_FULL_COPY_ACTIVATE = URI_VOLUME_LIST + '/{0}/protection/full-copies/{1}/activate'
URI_VOLUME_FULL_COPY_DETACH = URI_VOLUME_LIST + '/{0}/protection/full-copies/{1}/detach'
URI_VOLUME_FULL_COPY_CHECK_PROGRESS = URI_VOLUME_LIST + '/{0}/protection/full-copies/{1}/check-progress'
URI_FULL_COPY = URI_SERVICES_BASE + '/block/full-copies'
URI_FULL_COPY_RESTORE = URI_FULL_COPY + '/{0}/restore'
URI_FULL_COPY_RESYNC = URI_FULL_COPY + '/{0}/resynchronize'
URI_ADD_JOURNAL = URI_VOLUME_LIST + '/protection/addJournalCapacity'
URI_BLOCK_SNAPSHOT_SESSION = URI_SERVICES_BASE + '/block/snapshot-sessions/{0}'
URI_BLOCK_SNAPSHOT_SESSION_TASK = URI_BLOCK_SNAPSHOT_SESSION + '/tasks/{1}'
URI_BLOCK_SNAPSHOT_SESSION_CREATE = URI_VOLUME + '/protection/snapshot-sessions'
URI_BLOCK_SNAPSHOT_SESSION_DELETE = URI_BLOCK_SNAPSHOT_SESSION + '/deactivate'
URI_BLOCK_SNAPSHOT_SESSION_RESTORE = URI_BLOCK_SNAPSHOT_SESSION + '/restore'
URI_BLOCK_SNAPSHOT_SESSION_LINK_TARGETS = URI_BLOCK_SNAPSHOT_SESSION + '/link-targets'
URI_BLOCK_SNAPSHOT_SESSION_RELINK_TARGETS = URI_BLOCK_SNAPSHOT_SESSION + '/relink-targets'
URI_BLOCK_SNAPSHOT_SESSION_UNLINK_TARGETS = URI_BLOCK_SNAPSHOT_SESSION + '/unlink-targets'
URI_BLOCK_SNAPSHOT_SESSIONS_LIST = URI_BLOCK_SNAPSHOT_SESSION_CREATE
URI_UNMANAGED = URI_VDC + '/unmanaged'
URI_UNMANAGED_UNEXPORTED_VOLUMES = URI_UNMANAGED + '/volumes/ingest'
URI_UNMANAGED_VOLUMES_SEARCH = URI_UNMANAGED + "/volumes/search"
URI_UNMANAGED_VOLUMES_SEARCH_NAME= URI_UNMANAGED_VOLUMES_SEARCH + "?name={0}"
URI_UNMANAGED_EXPORTED_VOLUMES = URI_UNMANAGED + '/volumes/ingest-exported'
URI_UNMANAGED_TASK = URI_VDC + '/tasks/{0}'
URI_BLOCK_MIRRORS_BASE = URI_VOLUME + '/protection/continuous-copies'
URI_BLOCK_MIRRORS_LIST = URI_BLOCK_MIRRORS_BASE
URI_BLOCK_MIRRORS_READ = URI_BLOCK_MIRRORS_BASE + '/{1}'
URI_BLOCK_MIRRORS_ATTACH = URI_BLOCK_MIRRORS_BASE + '/start'
URI_BLOCK_MIRRORS_DETACH_ALL = URI_BLOCK_MIRRORS_BASE + '/stop'
#URI_BLOCK_MIRRORS_DETACH = URI_BLOCK_MIRRORS_BASE + '/{1}/stop'
URI_BLOCK_MIRRORS_PAUSE_ALL = URI_BLOCK_MIRRORS_BASE + '/pause'
#URI_BLOCK_MIRRORS_PAUSE = URI_BLOCK_MIRRORS_BASE + '/{1}/pause'
URI_BLOCK_MIRRORS_RESUME_ALL = URI_BLOCK_MIRRORS_BASE + '/resume'
URI_BLOCK_MIRRORS_DEACTIVATE = URI_BLOCK_MIRRORS_BASE + '/deactivate'
#URI_BLOCK_MIRRORS_RESUME = URI_BLOCK_MIRRORS_BASE + '/{1}/resume'
#URI_BLOCK_SNAPSHOTS_RESTORE = URI_BLOCK_SNAPSHOTS + '/restore'
URI_BLOCK_CONSISTENCY_GROUP_BASE = URI_SERVICES_BASE + '/block/consistency-groups'
URI_BLOCK_CONSISTENCY_GROUP_CREATE = URI_BLOCK_CONSISTENCY_GROUP_BASE
URI_BLOCK_CONSISTENCY_GROUP = URI_BLOCK_CONSISTENCY_GROUP_BASE + '/{0}'
URI_BLOCK_CONSISTENCY_GROUP_TASKS = URI_BLOCK_CONSISTENCY_GROUP + '/tasks/{1}'
URI_BLOCK_CONSISTENCY_GROUP_DELETE = URI_BLOCK_CONSISTENCY_GROUP + '/deactivate'
URI_BLOCK_CONSISTENCY_GROUP_BULK = URI_BLOCK_CONSISTENCY_GROUP_BASE + "/bulk"
URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_BASE = URI_BLOCK_CONSISTENCY_GROUP + "/protection/snapshots"
URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_CREATE = URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_BASE
URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_LIST = URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_BASE
URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT = URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_BASE + "/{1}"
URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_TASKS = URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT + "/tasks/{2}"
URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_ACTIVATE = URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT + "/activate"
URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_DEACTIVATE = URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT + "/deactivate"
URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_RESTORE = URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT + "/restore"
URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_SESSION_BASE = URI_BLOCK_CONSISTENCY_GROUP + "/protection/snapshot-sessions"
URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_SESSION_CREATE = URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_SESSION_BASE
URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_SESSION_LIST = URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_SESSION_BASE
URI_BLOCK_CONSISTENCY_GROUP_PROTECTION_BASE = URI_BLOCK_CONSISTENCY_GROUP + "/protection/continuous-copies"
URI_BLOCK_CONSISTENCY_GROUP_SWAP = URI_BLOCK_CONSISTENCY_GROUP_PROTECTION_BASE + "/swap"
URI_BLOCK_CONSISTENCY_GROUP_ACCESS_MODE = URI_BLOCK_CONSISTENCY_GROUP_PROTECTION_BASE + "/accessmode"
URI_BLOCK_CONSISTENCY_GROUP_FAILOVER = URI_BLOCK_CONSISTENCY_GROUP_PROTECTION_BASE + "/failover"
URI_BLOCK_CONSISTENCY_GROUP_FAILOVER_CANCEL = URI_BLOCK_CONSISTENCY_GROUP_PROTECTION_BASE + "/failover-cancel"
#Object Platform ECS bucket definitions
URI_ECS_BUCKET_LIST = URI_SERVICES_BASE + '/object/buckets'
URI_ECS_BUCKET = URI_SERVICES_BASE + '/object/buckets/{0}'
URI_NETWORKSYSTEMS = URI_SERVICES_BASE + '/vdc/network-systems'
URI_NETWORKSYSTEM = URI_NETWORKSYSTEMS + '/{0}'
URI_NETWORKSYSTEM_DISCOVER = URI_NETWORKSYSTEMS + '/{0}/discover'
URI_NETWORKSYSTEM_FCENDPOINTS = URI_NETWORKSYSTEMS + '/{0}/fc-endpoints'
URI_NETWORKSYSTEM_FCENDPOINTS_FABRIC = URI_NETWORKSYSTEM_FCENDPOINTS + '?fabric-id={1}'
URI_NETWORKSYSTEM_VDCREFERENCES = URI_NETWORKSYSTEMS + '/san-references/{0},{1}'
URI_NETWORKSYSTEM_REGISTER = URI_NETWORKSYSTEMS + '/{0}/register'
URI_NETWORKSYSTEM_DEREGISTER = URI_NETWORKSYSTEMS + '/{0}/deregister'
URI_NETWORKSYSTEM_ALIASES = URI_NETWORKSYSTEM + '/san-aliases'
URI_NETWORKSYSTEM_ALIASES_FABRIC = URI_NETWORKSYSTEM_ALIASES + '?fabric-id={1}'
URI_NETWORKSYSTEM_ALIASES_REMOVE = URI_NETWORKSYSTEM_ALIASES + '/remove'
URI_NETWORKSYSTEM_ZONES = URI_NETWORKSYSTEM + '/san-fabrics/{1}/san-zones'
URI_NETWORKSYSTEM_ZONES_QUERY = URI_NETWORKSYSTEM_ZONES + '?zone-name={2}&exclude-members={3}&exclude-aliases={4}'
URI_NETWORKSYSTEM_ZONES_REMOVE = URI_NETWORKSYSTEM_ZONES + '/remove'
URI_NETWORKSYSTEM_ZONES_ACTIVATE = URI_NETWORKSYSTEM_ZONES + '/activate'
URI_DISCOVERED_STORAGEDEVICES = URI_SERVICES_BASE + '/vdc/storage-systems'
URI_DISCOVERED_STORAGEDEVICE = URI_DISCOVERED_STORAGEDEVICES + '/{0}'
URI_STORAGEDEVICES = URI_SERVICES_BASE + '/vdc/storage-systems'
URI_STORAGEDEVICE = URI_STORAGEDEVICES + '/{0}'
URI_STORAGEDEVICE_DISCOVERALL = URI_STORAGEDEVICES + '/discover'
URI_STORAGEDEVICE_DEREGISTER = URI_STORAGEDEVICE + '/deregister'
URI_STORAGESYSTEMS_BULKGET = URI_DISCOVERED_STORAGEDEVICES + '/bulk'
URI_DISCOVERED_STORAGEDEVICE_DISCOVER = URI_STORAGEDEVICE + '/discover'
URI_DISCOVERED_STORAGEDEVICE_NS = URI_DISCOVERED_STORAGEDEVICE_DISCOVER + '?namespace={1}'
URI_STORAGEPOOLS = URI_STORAGEDEVICE + '/storage-pools'
URI_STORAGEPOOL = URI_SERVICES_BASE + '/vdc/storage-pools/{0}'
URI_STORAGEPOOL_SHOW = URI_STORAGEPOOLS + '/{1}'
URI_STORAGEPOOL_REGISTER = URI_STORAGEPOOLS + '/{1}/register'
URI_STORAGEPOOL_DEREGISTER = URI_STORAGEPOOL + '/deregister'
URI_STORAGEPOOL_UPDATE = URI_STORAGEPOOL
URI_STORAGEPOOLS_BULKGET = URI_SERVICES_BASE + '/vdc/storage-pools/bulk'
URI_STORAGEPORTS = URI_STORAGEDEVICE + '/storage-ports'
URI_STORAGEPORT = URI_SERVICES_BASE + '/vdc/storage-ports/{0}'
URI_STORAGEPORT_SHOW = URI_STORAGEPORTS + '/{1}'
URI_STORAGEPORT_UPDATE = URI_STORAGEPORT
URI_STORAGEPORT_REGISTER = URI_STORAGEPORTS + '/{1}/register'
URI_STORAGEPORT_DEREGISTER = URI_STORAGEPORT + '/deregister'
URI_STORAGEPORTS_BULKGET = URI_SERVICES_BASE + '/vdc/storage-ports/bulk'
URI_VARRAYS = URI_SERVICES_BASE + '/vdc/varrays'
URI_VARRAY = URI_VARRAYS + '/{0}'
URI_VARRAY_PORTS = URI_VARRAY + '/storage-ports'
URI_VARRAY_ACLS = URI_VARRAY + '/acl'
URI_VARRAYS_BULKGET = URI_VARRAYS + '/bulk'
URI_NETWORKS = URI_SERVICES_BASE + '/vdc/networks'
URI_VARRAY_NETWORKS = URI_VARRAY + '/networks'
URI_NETWORK = URI_NETWORKS + '/{0}'
URI_NETWORK_ENDPOINTS = URI_NETWORK + '/endpoints'
URI_NETWORK_ASSIGN = URI_NETWORK + ''
URI_NETWORK_UNASSIGN = URI_NETWORK + ''
URI_NETWORKS_BULKGET = URI_NETWORKS + '/bulk'
URI_NETWORK_DEACTIVATE = URI_NETWORK + '/deactivate?force={1}'
URI_NETWORK_REGISTER = URI_NETWORK + '/register'
URI_NETWORK_DEREGISTER = URI_NETWORK + '/deregister'
URI_SMISPROVIDERS = URI_SERVICES_BASE + '/vdc/smis-providers'
URI_SMISPROVIDER = URI_SMISPROVIDERS + '/{0}'
URI_FILE_POLICIES = '/file/file-policies'
URI_FILE_POLICY_SHOW = URI_FILE_POLICIES + '/{0}'
URI_FILE_POLICY_DELETE = URI_FILE_POLICIES + '/{0}'
URI_FILE_POLICY_UPDATE = URI_FILE_POLICIES + '/{0}'
URI_FILE_POLICY_ASSIGN = URI_FILE_POLICIES + '/{0}/assign-policy'
URI_FILE_POLICY_UNASSIGN = URI_FILE_POLICIES + '/{0}/unassign-policy'
URI_STORAGEPROVIDERS = URI_SERVICES_BASE + '/vdc/storage-providers'
URI_STORAGEPROVIDER = URI_STORAGEPROVIDERS + '/{0}'
URI_STORAGETIER = URI_SERVICES_BASE + '/vdc/storage-tiers/{0}'
URI_STORAGETIERS = URI_SERVICES_BASE + '/vdc/storage-tiers'
URI_EXPORTGROUP_LIST = URI_SERVICES_BASE + '/block/exports'
URI_EXPORTGROUP_INSTANCE = URI_SERVICES_BASE + '/block/exports/{0}'
URI_EXPORTGROUP_VOLUMES = URI_SERVICES_BASE + '/block/exports/{0}/volumes'
URI_EXPORTGROUP_VOLUME_INSTANCE = URI_SERVICES_BASE + '/block/exports/{0}/volumes/{1}'
URI_EXPORTGROUP_VOLUMES_REMOVE = URI_SERVICES_BASE + '/block/exports/{0}/remove-volumes'
URI_EXPORTGROUP_INITS = URI_SERVICES_BASE + '/block/exports/{0}/initiators'
URI_EXPORTGROUP_INIT_DELETE = URI_SERVICES_BASE + '/block/exports/{0}/initiators/{1},{2}'
URI_EXPORTGROUP_INITS_REMOVE = URI_SERVICES_BASE + '/block/exports/{0}/remove-initiators'
URI_EXPORTGROUP_REALLOC = URI_SERVICES_BASE + '/block/exports/{0}/paths-adjustment-preview'
URI_EXPORTGROUP_REBALANCE = URI_SERVICES_BASE + '/block/exports/{0}/paths-adjustment'
URI_EXPORTGROUP_CHANGEPORTGROUP = URI_SERVICES_BASE + '/block/exports/{0}/change-port-group'
URI_EXPORTGROUP_SEARCH_PROJECT = URI_EXPORTGROUP_LIST + '/search?project={0}'
URI_HOSTS = URI_SERVICES_BASE + '/compute/hosts'
URI_HOST = URI_SERVICES_BASE + '/compute/hosts/{0}'
URI_HOST_DEACTIVATE = URI_HOST + '/deactivate?detach_storage={1}'
URI_HOSTS_BULKGET = URI_HOSTS + '/bulk'
URI_HOST_INITIATORS = URI_SERVICES_BASE + '/compute/hosts/{0}/initiators'
URI_HOST_IPINTERFACES = URI_SERVICES_BASE + '/compute/hosts/{0}/ip-interfaces'
URI_INITIATORS = URI_SERVICES_BASE + '/compute/initiators'
URI_INITIATOR = URI_SERVICES_BASE + '/compute/initiators/{0}'
URI_INITIATOR_REGISTER = URI_SERVICES_BASE + '/compute/initiators/{0}/register'
URI_INITIATOR_DEREGISTER = URI_SERVICES_BASE + '/compute/initiators/{0}/deregister'
URI_INITIATOR_ALIASGET = URI_SERVICES_BASE + "/compute/initiators/{0}/alias/{1}"
URI_INITIATOR_ALIASSET = URI_SERVICES_BASE + "/compute/initiators/{0}/alias"
URI_INITIATORS_BULKGET = URI_SERVICES_BASE + '/compute/initiators/bulk'
URI_IPINTERFACES = URI_SERVICES_BASE + '/compute/ip-interfaces'
URI_IPINTERFACE = URI_SERVICES_BASE + '/compute/ip-interfaces/{0}'
URI_IPINTERFACE_REGISTER = URI_SERVICES_BASE + '/compute/ip-interfaces/{0}/register'
URI_IPINTERFACE_DEREGISTER = URI_SERVICES_BASE + '/compute/ip-interfaces/{0}/deregister'
URI_IPINTERFACES_BULKGET = URI_SERVICES_BASE + '/compute/ip-interfaces/bulk'
URI_VCENTERS = URI_SERVICES_BASE + '/compute/vcenters'
URI_VCENTER = URI_SERVICES_BASE + '/compute/vcenters/{0}'
URI_VCENTER_DISCOVER = URI_VCENTER + '/discover'
URI_VCENTERS_BULKGET = URI_VCENTERS + '/bulk'
URI_VCENTER_DATACENTERS = URI_VCENTER + '/vcenter-data-centers'
URI_CLUSTERS = URI_SERVICES_BASE + '/compute/clusters'
URI_CLUSTER = URI_SERVICES_BASE + '/compute/clusters/{0}'
URI_CLUSTER_DEACTIVATE = URI_CLUSTER + '/deactivate?detach-storage={1}'
URI_CLUSTERS_BULKGET = URI_CLUSTERS + '/bulk'
URI_DATACENTERS = URI_SERVICES_BASE + '/compute/vcenter-data-centers'
URI_DATACENTER = URI_SERVICES_BASE + '/compute/vcenter-data-centers/{0}'
URI_DATACENTERS_BULKGET = URI_SERVICES_BASE + '/compute/vcenter-data-centers/bulk'
URI_DATA_STORE_LIST = URI_SERVICES_BASE + '/vdc/data-stores'
URI_DATA_STORE = URI_SERVICES_BASE + '/vdc/data-stores/{0}'
URI_DATA_STORE_BULKGET = URI_DATA_STORE_LIST + '/bulk'
URI_KEYPOOLS = URI_SERVICES_BASE + '/object/keypools'
URI_KEYPOOLS_INSTANCE = URI_KEYPOOLS + '/{0}'
URI_KEYPOOLS_ACCESSMODE_INSTANCE = URI_KEYPOOLS + '/access-mode' + '/{0}'
URI_KEYPOOLS_FILEACCESS_INSTANCE = URI_KEYPOOLS + '/fileaccess' + '/{0}'
URI_KEY_INSTANCE = URI_KEYPOOLS_INSTANCE + '/{1}'
URI_KEYS = URI_SERVICES_BASE + '/object/keypools'
URI_KEYS_INSTANCE = URI_KEYS + '/{0}'
URI_ATMOS_DEVICE_LIST = URI_SERVICES_BASE + '/object/atmos-importer'
URI_ATMOS_DEVICE_TASK = URI_SERVICES_BASE + '/object/atmos-importer/{0}/tasks/{1}'
URI_ATMOS_DEVICE = URI_SERVICES_BASE + '/object/atmos-importer/{0}'
URI_ATMOS_DEVICE_DELETE = URI_SERVICES_BASE + '/object/atmos-importer/{0}/deactivate'
URI_OBJECT_INGESTION_LIST = URI_SERVICES_BASE + '/object/ingestion'
URI_OBJECT_INGESTION = URI_SERVICES_BASE + '/object/ingestion/{0}'
URI_OBJECT_INGESTION_DELETE = URI_SERVICES_BASE + '/object/ingestion/{0}/deactivate'
URI_OBJECT_INGESTION_OP_STATUS = URI_SERVICES_BASE + '/object/ingestion/{0}/tasks/{1}'
URI_OBJECTTZ = URI_SERVICES_BASE + '/object/networks'
URI_OBJECTTZ_INSTANCE = URI_OBJECTTZ + '/{0}'
URI_OBJECTTZ_DELETE = URI_OBJECTTZ + '/{0}/deactivate'
URI_DISCOVERED_PROTECTION_SYSTEMS = URI_SERVICES_BASE + '/vdc/protection-systems'
URI_DISCOVERED_PROTECTION_SYSTEM = URI_DISCOVERED_PROTECTION_SYSTEMS + '/{0}'
URI_PROTECTION_SYSTEM = URI_SERVICES_BASE + '/vdc/protection-systems/{0}'
URI_PROTECTION_SYSTEMS = URI_SERVICES_BASE + '/vdc/protection-systems'
URI_PROTECTION_SYSTEM_DISCOVER = URI_PROTECTION_SYSTEM + '/discover'
URI_PROTECTION_SYSTEM_UPDATE = URI_PROTECTION_SYSTEM
URI_DISCOVERED_PROTECTION_SYSTEM_DISCOVER = URI_PROTECTION_SYSTEM + '/discover'
URI_DISCOVERED_PROTECTION_SYSTEM_NS = URI_DISCOVERED_PROTECTION_SYSTEM_DISCOVER + '?namespace={1}'
URI_PROTECTIONSET = URI_SERVICES_BASE + '/block/protection-sets/{0}'
URI_PROTECTIONSETS = URI_SERVICES_BASE + '/block/protection-sets'
URI_VDC_ROLES = URI_SERVICES_BASE + '/vdc/role-assignments'
URI_VDC_AUTHN_PROFILE = URI_SERVICES_BASE + '/vdc/admin/authnproviders'
URI_VDC_AUTHN_PROFILES = URI_SERVICES_BASE + '/vdc/admin/authnproviders/{0}'
URI_AUTO_TIER_POLICY = URI_SERVICES_BASE + '/vdc/auto-tier-policies/{0}'
URI_WORKFLOW_LIST = URI_SERVICES_BASE + '/vdc/workflows'
URI_WORKFLOW_RECENT = URI_WORKFLOW_LIST + '/recent'
URI_WORKFLOW_INSTANCE = URI_WORKFLOW_LIST + '/{0}'
URI_WORKFLOW_STEPS = URI_WORKFLOW_INSTANCE + '/steps'
URI_WORKFLOW_RESUME = URI_WORKFLOW_LIST + '/{0}/resume'
URI_WORKFLOW_ROLLBACK = URI_WORKFLOW_LIST + '/{0}/rollback'
URI_WORKFLOW_SUSPEND = URI_WORKFLOW_LIST + '/{0}/suspend/{1}'
URI_AUDIT_QUERY = URI_SERVICES_BASE + '/audit/logs/?time_bucket={0}&language={1}'
URI_MONITOR_QUERY = URI_SERVICES_BASE + '/monitoring/events/?time_bucket={0}'
URI_RESOURCE_DEACTIVATE = '{0}/deactivate'
URI_S3_SERVICE_BASE = ''
URI_S3_BUCKET_INSTANCE = URI_S3_SERVICE_BASE + '/{0}'
URI_S3_KEY_INSTANCE = URI_S3_SERVICE_BASE + '/{0}/{1}'
URI_S3_KEY_INSTANCE_ALTERNATE = URI_S3_SERVICE_BASE + '/{0}' #used when the bucket name is part of the Host header
URI_S3_PING = URI_S3_SERVICE_BASE + '/'
URI_S3_DATANODE = URI_S3_SERVICE_BASE + '/'
URI_ATMOS_SERVICE_BASE = '/rest'
URI_ATMOS_OBJECTS = URI_ATMOS_SERVICE_BASE + '/objects'
URI_ATMOS_OBJECTS_OID = URI_ATMOS_OBJECTS + '/{0}'
URI_ATMOS_NAMESPACE = URI_ATMOS_SERVICE_BASE + '/namespace'
URI_ATMOS_NAMESPACE_PATH = URI_ATMOS_NAMESPACE + '{0}'
URI_ATMOS_SUBTENANT_BASE = URI_ATMOS_SERVICE_BASE + '/subtenant'
URI_ATMOS_SUBTENANT_INSTANCE = URI_ATMOS_SUBTENANT_BASE + '/{0}'
URI_ATMOS_OBJECT_INSTANCE = URI_ATMOS_OBJECTS + '/{0}'
URI_ATMOS_NAMESPACE_INSTANCE = URI_ATMOS_NAMESPACE + '/{0}'
URI_SWIFT_SERVICE_BASE = '/v1'
URI_SWIFT_ACCOUNT_INSTANCE = URI_SWIFT_SERVICE_BASE + '/{0}'
URI_SWIFT_CONTAINER_INSTANCE = URI_SWIFT_SERVICE_BASE + '/{0}/{1}'
URI_SWIFT_KEY_INSTANCE = URI_SWIFT_SERVICE_BASE + '/{0}/{1}/{2}'
URI_NAMESPACE_COMMON = URI_SERVICES_BASE + '/object/namespaces'
URI_NAMESPACE_BASE = URI_NAMESPACE_COMMON + '/namespace'
URI_NAMESPACE_INSTANCE = URI_NAMESPACE_BASE + '/{0}'
URI_NAMESPACE_TENANT_BASE = URI_NAMESPACE_COMMON + '/tenant'
URI_NAMESPACE_TENANT_INSTANCE = URI_NAMESPACE_TENANT_BASE + '/{0}'
URI_NAMESPACE_RETENTION_BASE = URI_NAMESPACE_INSTANCE + '/retention'
URI_NAMESPACE_RETENTION_INSTANCE= URI_NAMESPACE_RETENTION_BASE + '/{1}'
URI_BUCKET_COMMON = '/object/bucket'
URI_BUCKET_INSTANCE = URI_BUCKET_COMMON + '/{0}'
URI_BUCKET_RETENTION = URI_BUCKET_INSTANCE + '/retention'
URI_BUCKET_UPDATE_OWNER = URI_BUCKET_INSTANCE + '/owner'
URI_SECRET_KEY = URI_SERVICES_BASE + '/object/secret-keys'
URI_SECRET_KEY_USER = URI_SERVICES_BASE + '/object/user-secret-keys/{0}'
URI_DELETE_SECRET_KEY_USER = URI_SERVICES_BASE + '/object/user-secret-keys/{0}/deactivate'
URI_WEBSTORAGE_USER = URI_SERVICES_BASE + '/object/users'
URI_WEBSTORAGE_USER_DEACTIVATE = URI_WEBSTORAGE_USER + '/deactivate'
URI_BASEURL_BASE = URI_SERVICES_BASE + '/object/baseurl'
URI_BASEURL_INSTANCE = URI_BASEURL_BASE + '/{0}'
URI_BASEURL_DEACTIVATE = URI_BASEURL_BASE + '/{0}/deactivate'
URI_PASSWORDGROUP = URI_SERVICES_BASE + '/object/user-password/{0}'
URI_PASSWORDGROUP_DEACTIVATE = URI_PASSWORDGROUP + '/deactivate'
URI_MIGRATIONS = URI_SERVICES_BASE + '/block/migrations'
URI_MIGRATION = URI_MIGRATIONS + '/{0}'
URI_ZONE = URI_SERVICES_BASE + '/zone/{0}'
URI_ZONES = URI_SERVICES_BASE + '/zone'
URI_ZONE_CAPACITY = URI_SERVICES_BASE + '/zone/capacity'
URI_CUSTOMCONFIGS = URI_SERVICES_BASE + '/config/controller'
URI_CUSTOMCONFIG = URI_CUSTOMCONFIGS + '/{0}'
URI_CUSTOMCONFIG_DELETE = URI_CUSTOMCONFIG + '/deactivate'
URI_REPLICATION_GROUP = URI_SERVICES_BASE + '/vdc/data-service/vpools/{0}'
URI_REPLICATION_GROUPS = URI_SERVICES_BASE + '/vdc/data-service/vpools'
URI_REPLICATION_EXTEND = URI_SERVICES_BASE + '/vdc/data-service/vpools/{0}/addvarrays'
URI_REPLICATION_COMPRESS = URI_SERVICES_BASE + '/vdc/data-service/vpools/{0}/removevarrays'
URI_VNAS_SERVERS = URI_SERVICES_BASE + '/vdc/vnas-servers'
URI_VNAS_SERVER = URI_SERVICES_BASE + '/vdc/vnas-servers/{0}'
URI_VNAS_SERVER_ASSIGN = URI_SERVICES_BASE + '/projects/{0}/assign-vnas-servers'
URI_VNAS_SERVER_UNASSIGN = URI_SERVICES_BASE + '/projects/{0}/unassign-vnas-servers'
URI_GEO_SERVICES_BASE = ''
URI_CHUNKINFO = URI_GEO_SERVICES_BASE + '/chunkinfo'
URI_CHUNKDATA = URI_GEO_SERVICES_BASE + '/chunkdata/{0}'
URI_OBJ_CERT = '/object-cert/keystore'
URI_OBJ_SECRET_KEY = '/object-cert/secret-key'
URI_COMPUTE_SYSTEMS = URI_SERVICES_BASE + '/vdc/compute-systems'
URI_COMPUTE_SYSTEM = URI_COMPUTE_SYSTEMS + '/{0}'
URI_COMPUTE_SYSTEM_COMPUTEELEMENTS = URI_COMPUTE_SYSTEM + '/compute-elements'
URI_COMPUTE_SYSTEM_DEREGISTER = URI_COMPUTE_SYSTEM + '/deregister'
URI_COMPUTE_SYSTEM_DISCOVER = URI_COMPUTE_SYSTEM + '/discover'
URI_COMPUTE_IMAGESERVERS = URI_SERVICES_BASE + '/compute/imageservers'
URI_COMPUTE_IMAGESERVER = URI_COMPUTE_IMAGESERVERS + '/{0}'
URI_COMPUTE_IMAGES = URI_SERVICES_BASE + '/compute/images'
URI_COMPUTE_IMAGE = URI_COMPUTE_IMAGES + '/{0}'
URI_COMPUTE_VIRTUAL_POOLS = URI_SERVICES_BASE + '/compute/vpools'
URI_COMPUTE_VIRTUAL_POOL = URI_COMPUTE_VIRTUAL_POOLS + '/{0}'
URI_COMPUTE_VIRTUAL_POOL_ASSIGN = URI_COMPUTE_VIRTUAL_POOL + '/assign-matched-elements'
OBJCTRL_INSECURE_PORT = '9010'
OBJCTRL_PORT = '4443'
S3_INSECURE_PORT = '9020'
S3_PORT = '9021'
ATMOS_INSECURE_PORT = '9022'
ATMOS_PORT = '9023'
SWIFT_INSECURE_PORT = '9024'
SWIFT_PORT = '9025'
GEO_PORT = '9096'
GEO_INSECURE_PORT = '9096'
URI_KICKSTART = URI_SERVICES_BASE + '/kickstart'
URI_WHOAMI = URI_SERVICES_BASE + '/user/whoami'
URI_OBJECT_PROPERTIES = URI_SERVICES_BASE + '/config/object/properties'
URI_PROXY_TOKEN = URI_SERVICES_BASE + '/proxytoken'
URI_STORAGEPORTGROUPS = URI_STORAGEDEVICE + '/storage-port-groups'
URI_STORAGEPORTGROUP = URI_STORAGEPORTGROUPS + '/{1}'
URI_STORAGEPORTGROUP_REGISTER = URI_STORAGEPORTGROUP + '/register'
URI_STORAGEPORTGROUP_DEREGISTER = URI_STORAGEPORTGROUP + '/deregister'
URI_STORAGEPORTGROUP_DELETE = URI_STORAGEPORTGROUP + '/deactivate'
PROD_NAME = 'storageos'
TENANT_PROVIDER = 'urn:storageos:TenantOrg:provider:'
API_SYNC_TIMEOUT = os.getenv('BOURNE_API_SYNC_TIMEOUT', 120000)
USE_SSL = os.getenv('BOURNE_USE_SSL', 1)
PORT = os.getenv('BOURNE_PORT', '4443')
BOURNE_DEBUG = os.getenv('BOURNE_DEBUG', 0)
FILE_ACCESS_MODE_HEADER = "x-emc-file-access-mode"
FILE_ACCESS_DURATION_HEADER = "x-emc-file-access-duration"
HOST_LIST_HEADER = "x-emc-file-access-host-list"
USER_HEADER = "x-emc-file-access-uid"
TOKEN_HEADER = "x-emc-file-access-token"
START_TOKEN_HEADER = "x-emc-file-access-start-token"
END_TOKEN_HEADER = "x-emc-file-access-end-token"
FILE_ACCESS_PRESERVE_DIR_STRUCTURE_HEADER = "x-emc-file-access-preserve-directory-structure"
SKIP_SECURITY = os.getenv('BOURNE_SECURITY_DISABLED', 0)
SWIFT_AUTH_TOKEN = 'X-Auth-Token'
SWIFT_AUTH_USER = 'X-Auth-User'
SWIFT_AUTH_KEY = 'X-Auth-Key'
SWIFT_DELETE_AT = 'X-Delete-At'
SWIFT_COPY_FROM = 'X-Copy-From'
SWIFT_DELETE_AFTER = 'X-Delete-After'
SWIFT_X_CONTAINER_READ = "X-Container-Read"
SWIFT_X_CONTAINER_WRITE = "X-Container-Write"
HTTP_OK = 200
HTTP_NO_CONTENT = 204
HTTP_NOT_FOUND=404
S3_XML_NS = 'http://s3.amazonaws.com/doc/2006-03-01/'
OPENSTACK_XML_NS = "http://docs.openstack.org/identity/api/v2.0"
SEC_REDIRECT = 302
SEC_TOKEN_FILE = os.getenv('BOURNE_TOKEN_FILE', 'token.txt')
SEC_AUTHTOKEN_HEADER = 'X-SDS-AUTH-TOKEN'
SEC_PROXYTOKEN_HEADER = 'X-SDS-AUTH-PROXY-TOKEN'
PROXY_USER_NAME = 'proxyuser'
PROXY_USER_PASSWORD = 'ChangeMe1!'
COOKIE_FILE = os.getenv('BOURNE_COOKIE_FILE', 'cookiejar')
# Number of seconds a request should wait for response.
# It only effects the connection process itself, not the downloading of the response body
REQUEST_TIMEOUT_SECONDS = 120
# Total time for server reconnection
MAX_WAIT_TIME_IN_SECONDS=480
CONTENT_TYPE_JSON='application/json'
CONTENT_TYPE_XML='application/xml'
CONTENT_TYPE_OCTET='application/octet-stream'
LB_GUI_PORT = '443'
LB_API_PORT = '4443'
APISVC_PORT = '8443'
_headers = {'Content-Type': 'application/json', 'ACCEPT': 'application/json,text/html,application/octet-stream'}
_ipaddr = None
_port = LB_API_PORT
class ServiceNotAvailableError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# Define the exceptions
class LoginError(Exception):
def __init__(self, msg, code):
self.msg = msg
self.code = code
class Bourne:
_DEFAULT_HEADERS = { 'Content-Type': 'application/json',
'ACCEPT': 'application/json,application/xml,text/html,application/octet-stream' }
def __init__(self):
self._reset_headers()
def _reset_headers(self):
self._headers = copy.deepcopy(Bourne._DEFAULT_HEADERS)
# decorator to reset headers to default
# use this if an api changes _headers
def resetHeaders(func):
def inner_func(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self._reset_headers()
return inner_func
# This method is responsible for setting the ip address and port that will be
# used to connect to the backend storageos services.
# Need to be a bit smarter when it comes to figuring out what port to set.
# There are 3 situations we need to test for:
# 1. the port parameter is 4443 and the BOURNE_PORT env. variable is not set
# 2. the port parameter is 4443 and the BOURNE_PORT env. variable is set
# 3. the port parameter is not 4443
# Cases #1 & #2 are mainly about connecting to the APISvc. If the BOURNE_PORT env.
# variable is set, then use the BOURNE_PORT variable. Case #3 is about
# connecting to something other than the APISvc. In that case, just use the supplied
# port parameter.
def connect(self, ipaddr, port = LB_API_PORT):
self._ipaddr = ipaddr
if str(port) == LB_API_PORT:
if len(PORT) > 0:
self._port = PORT
else:
self._port = port
else:
self._port = port
def test_firewall(self, ipaddr):
testfirewall=os.getenv('TEST_APPLIANCE', 'yes')
if (testfirewall == 'yes'):
self.test_firewall_port(ipaddr, 9160, False)
self.test_firewall_port(ipaddr, 9083, True)
self.test_firewall_port(ipaddr, 9998, True)
# helper function to test that the provided port is open or closed.
# If the toBeOpen parameter is True, this function will throw an exception if that port is closed by firewall.
# If the toBeOpen parameter is False, this function will throw an exception if that port is open by firewall.
def test_firewall_port(self, ipaddr, port, toBeOpen):
fwMessage='To disable firewall test, use the environment variable TEST_APPLIANCE=no: \n' + \
' TEST_APPLIANCE=no ./sanity <bourne_ip> security'
throwException=False
errorMessage=""
ipaddrLocal = re.sub('[\[\]]', '',ipaddr)
try:
timeout=5
session = telnetlib.Telnet(ipaddrLocal, port, timeout)
if (not (toBeOpen)):
errorMessage="The following port is open, but shoud be closed by firewall: " + str(port)
throwException=True
except:
if (toBeOpen):
errorMessage="The following port is closed, but shoud be open by firewall: " + str(port)
throwException=True
if(throwException):
print (errorMessage)
print (fwMessage)
raise Exception(errorMessage)
#
# This function handles the login request flow when a request is submitted directly to the APISvc. That means a request
# has been sent directly to: https://[host]:8443/path/to/svc?[params]. In this situation, the APISvc has a servlet
# filter that examines the requests header for an authentication token. If the token is not found, the request is
# redirected to the Authsvc for authorization/authentiation. This function follows the steps required to properly
# authenticate the User so that their original APISvc request can continue.
#
# param url - the URL to submit to the backend storageos services.
# param user - username to log in with
# param password - the password to log in with
# param cookiejar - structure to store cookie information sent from the backend storageos services.
# return - the response structure after the User has been properly authenticated.
# exception - will be thrown if any errors occur during the login process.
#
def _apisvc_login(self, url, user, password, cookiejar):
login_response = requests.get(url, headers=self._headers, verify=False, cookies=cookiejar, allow_redirects=False)
# If we get a redirect we are working with a non load-balanced environment. The original behaviour was to send
# the /login request to the APISvc. A servlet filter in front of that service would capture that /login request and
# redirect the request to the AuthSvc.
if(login_response.status_code == SEC_REDIRECT):
# Pull the requested location from the header
location = login_response.headers['Location']
# Header doesn't contain original request location, throw error
if(not location):
raise LoginError('The redirect location of the authentication service is not provided', login_response.status_code)
# Make the second request
# The expectation is that the second request will go to the authsvc, which will then prompt us for our credentials ( HTTP 401 )
login_response = requests.get(location, headers=self._headers, verify=False, cookies=cookiejar, allow_redirects=False)
if(not login_response.status_code == requests.codes['ok']):
if(not login_response.status_code == requests.codes['unauthorized']):
print 'ERROR: The authentication service failed to ask for credentials (401) to authenticate the user'
raise LoginError('The authentication service failed to reply with 401', login_response.status_code)
# We got the expected HTTP 401 response code, now provide the credentials
# Again, the request to /login should get redirected to the AuthSvc
login_response = requests.get(location, headers=self._headers, auth=(user,password), verify=False, cookies=cookiejar, allow_redirects=False)
if(not login_response.status_code == SEC_REDIRECT):
raise LoginError('The authentication service failed to authenticate the user', login_response.status_code)
# If we don't get a 302 ( redirect ) after the first request to /login
# then something is wrong. Report the HTTP response code to the User and throw an error.
# This will stop the execution.
else:
message = '''ERROR: The first request to login page does not redirect/forwarded to the authentication service.
ERROR (cont): Expecting HTTP 302 status code
ERROR (cont): Are you running against an appliance with security disabled?
ERROR (cont): Try with setting BOURNE_SECURITY_DISABLED set to 1.'''
raise LoginError(message, login_response.status_code)
return login_response
#
# This function handles the login request flow when a request is submitted through a Load-Balancer. That means a request
# has been sent to: https://[host]:4443/path/to/svc?[params]. In this situation, the Load-Balancer forwards the request off
# to the APISvc. The APISvc has a security servelt filter that examines the requests header for an authentication token.
# If the token is not found, the request is redirected to the Authsvc ( via the Load-Balancer ) for authorization/authentiation.
# This function follows the steps required to properly authenticate the User so that their original APISvc request can continue.
#
# param url - the URL to submit to the backend storageos services.
# param user - username to log in with
# param password - the password to log in with
# param cookiejar - structure to store cookie information sent from the backend storageos services.
# return - the response structure after the User has been properly authenticated.
# exception - will be thrown if any errors occur during the login process.
#
def _lb_login(self, url, user, password, cookiejar):
login_response = requests.get(url, headers=self._headers, verify=False, cookies=cookiejar, allow_redirects=False)
# If we make a request and we get a 401, that means we are running within a Load-Balanced deployment.
# Reason we know this: because the load-balancer is configured to forward all requests to /login context
# directly to the AuthSvc. This bypasses the redirection in a non load-balanced configuration.
if(login_response.status_code == requests.codes['unauthorized']):
# Now provide the credentials
login_response = requests.get(url, headers=self._headers, auth=(user,password), verify=False, cookies=cookiejar, allow_redirects=False)
# If we don't get a 401 ( unauthorized ) after the first request to /login
# then something is wrong. Report the HTTP response code to the User and throw an error.
# This will stop the execution.
else:
message = '''ERROR: The first request to login page does not redirect/forwarded to the authentication service.
ERROR (cont): Expecting HTTP 401 status code
ERROR (cont): Are you running against an appliance with security disabled?
ERROR (cont): Try with setting BOURNE_SECURITY_DISABLED set to 1.'''
raise LoginError(message, login_response.status_code)
if(login_response.status_code != requests.codes['ok']):
raise LoginError(' Error: '+login_response.text, login_response.status_code)
return login_response
#
#
#
def login(self, user, password):
if SKIP_SECURITY == '1':
return
self._reset_headers()
scheme = 'https://'
ipaddr = self._ipaddr
port=PORT
cookiejar = cookielib.LWPCookieJar()
if USE_SSL == '0':
return
loginURL = scheme+ipaddr+':'+port+'/login'
startTime = time.time()
while True:
try:
if(port == APISVC_PORT):
login_response = self._apisvc_login(loginURL, user, password, cookiejar)
elif(port == LB_API_PORT):
login_response = self._lb_login(loginURL, user, password, cookiejar)
else:
print 'ERROR: Invalid port specified (port = ' + str(port) + ')'
raise Exception('Invalid port: ' + str(port) + '.')
break
except LoginError as e:
if(e.code <=500 or e.code >= 600):
print 'Login failed with non-retryable error'
raise
elif(time.time()>(startTime+MAX_WAIT_TIME_IN_SECONDS)):
print 'Login retry timed out'
raise
else:
print 'The login failed with code: ' + repr(e.code) +' Retrying . . .'
time.sleep(5)
authToken = login_response.headers[SEC_AUTHTOKEN_HEADER]
if (not authToken):
raise Exception('The token is not generated by authentication service')
newHeaders = self._headers
newHeaders[SEC_AUTHTOKEN_HEADER] = authToken
# Make the final call to get the page with the token
login_response = requests.get(loginURL, headers=newHeaders, verify=False, cookies=cookiejar, allow_redirects=False)
if(login_response.status_code != requests.codes['ok']):
raise Exception('Login failure code: ' + str(login_response.status_code) + ' Error: ' + login_response.text)
# Token file handling
if (os.path.exists(SEC_TOKEN_FILE)):
os.remove(SEC_TOKEN_FILE)
tokenFile = open(SEC_TOKEN_FILE , "w")
tokenFile.write(authToken)
tokenFile.close()
# Cookie handling
for cookie in login_response.cookies:
cookiejar.set_cookie(cookie)
if(os.path.exists(COOKIE_FILE)):
os.remove(COOKIE_FILE)
cookiejar.save(COOKIE_FILE, ignore_discard=True, ignore_expires=True);
def pretty_print_json(self, jsonObj):
print json.dumps(jsonObj, sort_keys=True, indent=4)
def pretty_print_xml(self, etNode):
#reader = Sax2.Reader()
#docNode = reader.fromString(ElementTree.tostring(etNode))
#tmpStream = StringIO()
#PrettyPrint(docNode, stream=tmpStream)
#print tmpStream.getvalue()
print ET.tostring(etNode)
# req_timeout: Number of seconds a request should wait for response. It only effects the connection process itself, not the downloading of the response body
def __run_request(self, method, uri, body, req_timeout, headers=None):
scheme = 'https://'
ipaddr = self._ipaddr
cookies=None
port = str(self._port)
if uri == URI_KICKSTART:
scheme = 'https://'
port = '6443'
elif USE_SSL == '0':
scheme = 'http://'
if port == OBJCTRL_PORT:
port = OBJCTRL_INSECURE_PORT
elif port == S3_PORT:
port = S3_INSECURE_PORT
elif port == ATMOS_PORT:
port = ATMOS_INSECURE_PORT
elif port == SWIFT_PORT:
port = SWIFT_INSECURE_PORT
elif port == GEO_PORT:
port = GEO_INSECURE_PORT
else:
port = '8080'
# HACK, to remove
if port == GEO_PORT:
scheme = 'http://'
url = scheme+ipaddr+':'+port+uri
cookiejar = cookielib.LWPCookieJar()
if SKIP_SECURITY != '1':
if (not os.path.exists(COOKIE_FILE)):
raise Exception(COOKIE_FILE + ' : Cookie not found : Please authenticate user')
if (not os.path.isfile(COOKIE_FILE)):
raise Exception(COOKIE_FILE + ' : Not a cookie file')
cookiejar.load(COOKIE_FILE, ignore_discard=True, ignore_expires=True)
newHeaders = copy.deepcopy(self._headers)
if(headers != None):
for hdr in headers.iterkeys():
newHeaders[hdr] = headers[hdr]
newHeaders = self.update_headers(newHeaders)
startTime = time.time()
response=None
while True:
try:
if method == 'POST':
if(BOURNE_DEBUG == '1'):
print 'POST: ' + url
if (body is not None):
try:
self.pretty_print_json(cjson.decode(body))
except:
try:
print 'Body: ' + body;
except:
print 'No Body'
else:
print 'No Body'
response = requests.post(url,data=body,headers=newHeaders, verify=False, cookies=cookiejar, timeout=req_timeout)
elif method == 'PUT':
if(BOURNE_DEBUG == '1'):
print 'PUT: ' + url
try:
self.pretty_print_json(cjson.decode(body))
except:
if (body):
print 'Body: ' + body;
response = requests.put(url,data=body,headers=newHeaders, verify=False, cookies=cookiejar, timeout=req_timeout)
elif method == 'DELETE':
if(BOURNE_DEBUG == '1'):
print 'DELETE: ' + url
response = requests.delete(url,headers=newHeaders,verify=False, cookies=cookiejar, timeout=req_timeout)
elif method == 'HEAD':
if(BOURNE_DEBUG == '1'):
print 'HEAD: ' + url
response = requests.head(url,headers=newHeaders,verify=False,cookies=cookiejar, timeout=req_timeout)
elif method == 'OPTIONS':
if(BOURNE_DEBUG == '1'):
print 'OPTIONS: ' + url
response = requests.options(url,headers=newHeaders,verify=False,cookies=cookiejar, timeout=req_timeout)
elif method == 'GET':
if(BOURNE_DEBUG == '1'):
print 'GET ' + url
print 'Headers', newHeaders
response = requests.get(url,headers=newHeaders,verify=False, cookies=cookiejar, timeout=req_timeout)
elif method == 'GET-stream':
if(BOURNE_DEBUG == '1'):
print 'GET ' + url
print 'Headers', newHeaders
response = requests.get(url,headers=newHeaders,verify=False, cookies=cookiejar, timeout=req_timeout, stream=True)
else:
raise Exception("Unsupported method:", method)
if BOURNE_DEBUG == '1':
try:
print 'Headers: ', newHeaders
print 'Response code ' + str(response.status_code)
print 'Response '
self.pretty_print_json(cjson.decode(response.text))
except:
print 'Exception printing debug output'
except requests.exceptions.Timeout:
# continue with retry
if ((startTime+MAX_WAIT_TIME_IN_SECONDS)<time.time()):
raise
else:
time.sleep(3)
continue
except:
raise
if response.status_code<=500 or response.status_code>=600 or (startTime + MAX_WAIT_TIME_IN_SECONDS) < time.time():
return response
else:
print 'The return status is ' + str(response.status_code) + ', retry'
time.sleep(3)
def __json_decode(self, rsp):
if (not rsp):
print 'empty rsp'
return ''
try:
return cjson.decode(str(rsp))
except:
raise Exception('Response JSON decode failure. RSP = ' + rsp)
def __xml_decode(self, rsp):
if not rsp:
return ''
try:
return ET.fromstring(str(rsp))
except:
raise Exception('Response XML decode failure. Response= '+rsp)
def update_headers(self, currentHeaders):
if SKIP_SECURITY == '1':
return currentHeaders
# Get the saved token
if (not os.path.exists(SEC_TOKEN_FILE)):
raise Exception(SEC_TOKEN_FILE + ' : Token file not found : Please authenticate user')
if (not os.path.isfile(SEC_TOKEN_FILE)):
raise Exception(SEC_TOKEN_FILE + ' : The token.txt is not a regular file')
tokenFile = open(SEC_TOKEN_FILE , "r")
authToken = tokenFile.read()
if (not authToken):
raise Exception('Failed to get the saved token from token.txt')
newHeaders = currentHeaders
newHeaders[SEC_AUTHTOKEN_HEADER] = authToken
return newHeaders
def __op_status(self, obj, opid):
status = obj['operationStatus']
if (opid in status):
return status.get(opid).get('status')
raise Exception('operation status not found for ' + opid + ' ' + str(status))
# This method calls the REST API and returns the response received, if it is success. It retries upon failure to connect to server.
# Retry Logic: When the request takes more than REQUEST_TIMEOUT_SECONDS or when the node/service is down it throws TimeOut or ConnectionError respectively. When these exceptions are caught, it sleeps for REQUEST_TIMEOUT_SECONDS (if not already elapsed) and retries untill MAX_WAIT_TIME_IN_SECONDS is reached. If response is not received in this time, it raises exception.
def api_check_success(self, method, uri, parms = None, qparms = None, content_type=CONTENT_TYPE_JSON, accept=CONTENT_TYPE_JSON, req_timeout=REQUEST_TIMEOUT_SECONDS):
message_timer=0
start_time = time.time()
while True:
try:
# get current time in seconds
request_time=time.time()
return self.__api_check_success(method, uri, parms, qparms, content_type, accept, REQUEST_TIMEOUT_SECONDS)
except (requests.ConnectionError, requests.Timeout, requests.exceptions.ConnectionError, requests.exceptions.Timeout, ServiceNotAvailableError) as e:
# get elapsed time by subtracting the original request time in seconds from current time in seconds
if time.time() < request_time + REQUEST_TIMEOUT_SECONDS:
time.sleep(REQUEST_TIMEOUT_SECONDS)
# add elapsed time to the message_time
message_timer += (time.time() - request_time)
if time.time() > start_time + MAX_WAIT_TIME_IN_SECONDS:
print("re-throwing the exception since we have gone through allocated time")
raise
else:
# only show messages every 30 seconds.
if message_timer > 30:
print("Connection error while making request: " + str(e)+". Retrying...")
# zero out the message timer
message_timer=0
continue
# content_type: The MIME type of the body of the request. Ex:application/xml,application/json,application/octet-stream
# accept: Content types that are acceptable for response. Ex:application/xml,application/json,application/octet-stream
# req_timeout: Number of seconds a request should wait for response. It only effects the connection process itself, not the downloading of the response body
def __api_check_success(self, method, uri, parms = None, qparms = None, content_type=CONTENT_TYPE_JSON, accept=CONTENT_TYPE_JSON, req_timeout=REQUEST_TIMEOUT_SECONDS):
response = self.__api(method, uri, parms, qparms, content_type=content_type, accept=accept, req_timeout=req_timeout)
if (response.status_code != 200 and response.status_code != 202):
print response.status_code, response.reason
print response.text
raise Exception("Request is not successful: "+ method + " " + uri)
try:
if method == 'GET-stream':
return response
if accept == CONTENT_TYPE_JSON:
return self.__json_decode(response.text)
elif accept == CONTENT_TYPE_XML:
return self.__xml_decode(response.text)
else:
return response
except:
raise Exception('Unable to decode reponse: '+response)
def __api(self, method, uri, parms = None, qparms = None, content_type=CONTENT_TYPE_JSON, accept=CONTENT_TYPE_JSON, req_timeout = REQUEST_TIMEOUT_SECONDS, headers = None):
body = None
if (parms and content_type==CONTENT_TYPE_JSON):
body = cjson.encode(parms)
else:
body = parms
if (qparms):
if( '?' in uri ):
first = False
else:
uri += "?"
first = True
for qk in qparms.iterkeys():
if (not first):
uri += '&'
uri += qk
else:
first = False
uri += qk
if (qparms[qk] != None):
uri += '=' + qparms[qk]
if(content_type==None):
del self._headers['Content-Type']
else:
self._headers['Content-Type'] = content_type
self._headers['ACCEPT'] = accept
return self.__run_request(method, uri, body, req_timeout=req_timeout, headers=headers)
#the newly added headers param is used to explicitly set the HTTP request headers
#for just the current API call. They take the highest precedence when in conflict
def coreapi(self, method, uri, parms = None, qparms = None, user = None, content_type = CONTENT_TYPE_JSON, headers = None):
return self.__api(method, uri, parms, qparms, content_type=content_type, accept=content_type, req_timeout=120, headers=headers)
def api(self, method, uri, parms = None, qparms = None, content_type = CONTENT_TYPE_JSON, req_timeout = REQUEST_TIMEOUT_SECONDS):
response = self.__api(method, uri, parms, qparms, content_type=content_type, accept=content_type, req_timeout=req_timeout)
h = response.headers
ctype = h.get('content-type',"")
if ctype == CONTENT_TYPE_OCTET:
return response.content
try:
return self.__json_decode(response.text)
except:
return response.text
def api_check_error(self, method, uri, status_code, service_code, message, parms = None, qparms = None):
response = self.__api(method, uri, parms, qparms)
if (int(response.status_code) != status_code):
raise Exception("Unexpected HTTP status: expected %d, actual %d" % (status_code, response.status_code))
error = self.__json_decode(response.text)
print error
if (error["code"] != service_code):
raise Exception("Unexpected ServiceCode: Expected: %d. Actual: %d." % (service_code, error["code"]))
actualMessage = None
if ("details" in error):
actualMessage = error["details"]
elif ("description" in error):
actualMessage = error["description"]
if(actualMessage != message):
raise Exception("Unexpected ServiceCode detail: Expected: %s. Actual: %s." % (message, actualMessage))
def api_sync(self, id, op, showfn, ignore_error=False):
obj = showfn(id)
tmo = 0
while (self.__op_status(obj, op) == 'pending'):
time.sleep(1)
obj = showfn(id)
tmo += 1
if (tmo > API_SYNC_TIMEOUT):
break
if (self.__op_status(obj, op) == 'pending'):
raise Exception('Timed out waiting for request in pending state: ' + op)
if (self.__op_status(obj, op) == 'error' and not ignore_error):
raise Exception('There was an error encountered: ' + str(op))
return self.__op_status(obj, op)
def api_sync_2(self, id, op, show_opfn, ignore_error=False):
tmo = 0
while True:
try:
obj_op = show_opfn(id, op)
if (obj_op['state'] != 'pending' and obj_op['state'] != 'queued'):
break
except requests.exceptions.ConnectionError:
print "ConnectionError received"
tmo += 3
if (tmo > API_SYNC_TIMEOUT):
break
time.sleep(3)
if (type(obj_op) is dict):
if (obj_op['state'] == 'pending'):
raise Exception('Timed out waiting for request in pending state: ' + op)
if (obj_op['state'] == 'queued'):
raise Exception('Timed out waiting for request in queued state: ' + op)
if (obj_op['state'] == 'error' and not ignore_error):
self.pretty_print_json(obj_op)
raise Exception('There was an error encountered:\n' + json.dumps(obj_op, sort_keys=True, indent=4))
if (obj_op['state'] == 'suspended_no_error'):
# Important to not change this format as sanity and other scripts rely on it in this order with commas
print 'Operation suspended, ' + obj_op['id'] + ", " + obj_op['workflow']['id']
return obj_op
#
# Handles the case where a URI requires two ID parameters, for example:
# /block/volumes/<pid>/protection/mirrors/<sid>
# pid = Primary ID
# sid = Secondary ID
#
def api_sync_3(self, pid, sid, op, show_opfn, ignore_error=False):
obj_op = show_opfn(pid, sid, op)
tmo = 0
while (obj_op['state'] == 'pending'):
time.sleep(3)
obj_op = show_opfn(pid, sid, op)
tmo += 3
if (tmo > API_SYNC_TIMEOUT):
break
if (type(obj_op) is dict):
print str(obj_op)
if (obj_op['state'] == 'pending'):
raise Exception('Timed out waiting for request in pending state: ' + op)
if (obj_op['state'] == 'error' and not ignore_error):
raise Exception('There was an error encountered:\n' + json.dumps(obj_op, sort_keys=True, indent=4))
return obj_op
#
# Handles looping over a task object. If the task is suspended, we will loop waiting
# for it to come out of suspended. It has a short trigger since some tests go from one
# suspended state to another, and the state transitions happen too fast for this method
# to detect. So don't wait for more than a minute. If we return the same suspended state
# in the test case, the test will fail down the road anyway.
def api_sync_4(self, id, showfn, ignore_error=False):
obj_op = showfn(id)
tmo = 0
seen_pending = 0
while (obj_op['state'] == 'pending' or obj_op['state'] == 'suspended_no_error' or obj_op['state'] == 'queued'):
time.sleep(1)
if (obj_op['state'] == 'pending'):
seen_pending = 1;
tmo = 0;
if (obj_op['state'] == 'suspended_no_error' and seen_pending == 1):
break
obj_op = showfn(id)
tmo += 1
if (tmo > API_SYNC_TIMEOUT or tmo > 30):
break
if (type(obj_op) is dict):
print str(obj_op)
if (obj_op['state'] == 'pending'):
raise Exception('Timed out waiting for request in pending state: ' + op)
if (obj_op['state'] == 'queued'):
raise Exception('Timed out waiting for request in queued state: ' + op)
if (obj_op['state'] == 'error' and not ignore_error):
raise Exception('There was an error encountered:\n' + json.dumps(obj_op, sort_keys=True, indent=4))
return obj_op
def __is_uri(self, name):
try:
(urn, prod, trailer) = name.split(':', 2)
return (urn == 'urn' and prod == PROD_NAME)
except:
return False
#
# Encode HashSet as an array
#
def __encode_map(self, stringmap):
entry = dict()
for mapentry in stringmap:
(key, value) = mapentry.split('=', 1)
entry[key] = value
return entry
#
# Encode HashSet as a list
#
def __encode_list(self,stringmap):
entry = list();
for mapentry in stringmap:
(name, value) = mapentry.split('=', 1)
entry.append({ 'name' : name,
'value' : value })
return entry
def blockcos_bulkgetids(self):
ids = self.__blockcos_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def blockcos_bulkpost(self, ids):
return self.__blockcos_bulkget_reps(ids)
def __blockcos_bulkget_ids(self):
return self.api('GET', URI_BLOCKVPOOLS_BULKGET)
def __blockcos_bulkget_reps(self, ids):
return self.api('POST', URI_BLOCKVPOOLS_BULKGET, ids)
def filecos_bulkgetids(self):
ids = self.__filecos_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def filecos_bulkpost(self, ids):
return self.__filecos_bulkget_reps(ids)
def __filecos_bulkget_ids(self):
return self.api('GET', URI_FILEVPOOLS_BULKGET)
def __filecos_bulkget_reps(self, ids):
return self.api('POST', URI_FILEVPOOLS_BULKGET, ids)
def smisprovider_bulkgetids(self):
ids = self.__smisprovider_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def smisprovider_bulkpost(self, ids):
return self.__smisprovider_bulkget_reps(ids)
def __smisprovider_bulkget_ids(self):
return self.api('GET', URI_SMISPROVIDER_BULKGET)
def __smisprovider_bulkget_reps(self, ids):
return self.api('POST', URI_SMISPROVIDER_BULKGET, ids)
def blocksnapshot_bulkgetids(self):
ids = self.__blocksnapshot_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def blocksnapshot_bulkpost(self, ids):
return self.__blocksnapshot_bulkget_reps(ids)
def __blocksnapshot_bulkget_ids(self):
return self.api('GET', URI_BLOCKSNAPSHOT_BULKGET)
def __blocksnapshot_bulkget_reps(self, ids):
return self.api('POST', URI_BLOCKSNAPSHOT_BULKGET, ids)
def filesnapshot_bulkgetids(self):
ids = self.__filesnapshot_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def filesnapshot_bulkpost(self, ids):
return self.__filesnapshot_bulkget_reps(ids)
def __filesnapshot_bulkget_ids(self):
return self.api('GET', URI_FILESNAPSHOT_BULKGET)
def __filesnapshot_bulkget_reps(self, ids):
return self.api('POST', URI_FILESNAPSHOT_BULKGET, ids)
def exportgroup_bulkgetids(self):
ids = self.__exportgroup_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def exportgroup_bulkpost(self, ids):
return self.__exportgroup_bulkget_reps(ids)
def __exportgroup_bulkget_ids(self):
return self.api('GET', URI_EXPORTGROUP_BULKGET)
def __exportgroup_bulkget_reps(self, ids):
return self.api('POST', URI_EXPORTGROUP_BULKGET, ids)
def update_chunkinfo(self, primaryZone, message):
self._headers['x-emc-primaryzone'] = primaryZone
self._port = GEO_PORT
return self.coreapi('POST', URI_CHUNKINFO, message, None, content_type=CONTENT_TYPE_OCTET)
@resetHeaders
def send_chunkdata(self, chunkId, primaryZone, secondaryZone, repGroup, data):
if len(chunkId) != 36:
raise Exception('wrong chunkId format (must be uuid) ' + chunkId)
all_data = ""
for d in data:
d = self.getDataValueFromCli(d)
d = self._addChecksum(d, chunkId)
all_data += d
length = len(all_data)
self._headers['x-emc-primaryzone'] = primaryZone
self._headers['x-emc-secondaryzone'] = secondaryZone
self._headers['x-emc-dataservice-vpool'] = repGroup
self._headers['x-emc-chunklength'] = str(length)
# TODO: just REPO
self._headers['x-emc-chunk-datatype'] = "0"
self._port = GEO_PORT
return self.coreapi('POST', URI_CHUNKDATA.format(chunkId), all_data, None, content_type=CONTENT_TYPE_OCTET)
@resetHeaders
def delete_chunkdata(self, chunkId, repGroup):
self._headers['x-emc-dataservice-vpool'] = repGroup
self._port = GEO_PORT
return self.coreapi('DELETE', URI_CHUNKDATA.format(chunkId), None, None, content_type=CONTENT_TYPE_OCTET)
def repgroup_create(self, repGrpId, name, cos_list, isAllowAllNamespaces):
parms = dict()
parms['id'] = repGrpId
parms['name'] = name
parms['description'] = name
parms['isAllowAllNamespaces'] = isAllowAllNamespaces
parms['zone_mappings'] = []
for cos in cos_list.split(','):
pair = cos.split('.')
zone_uuid = self.vdcinfo_query(pair[0])
cos_uuid = self.neighborhood_query(pair[1])
parms['zone_mappings'].append({"name" : zone_uuid, "value" : cos_uuid})
return self.coreapi('POST', URI_REPLICATION_GROUPS, parms)
def repgroup_add(self, repGrpId, cos_list):
parms = dict()
parms['mappings'] = []
for cos in cos_list.split(','):
pair = cos.split('.')
zone_uuid = self.vdcinfo_query(pair[0])
cos_uuid = self.neighborhood_query(pair[1])
parms['mappings'].append({"name" : zone_uuid, "value" : cos_uuid})
return self.coreapi('PUT', URI_REPLICATION_EXTEND.format(repGrpId), parms)
def repgroup_remove(self, repGrpId, cos_list):
parms = dict()
parms['mappings'] = []
for cos in cos_list.split(','):
pair = cos.split('.')
zone_uuid = self.vdcinfo_query(pair[0])
cos_uuid = self.neighborhood_query(pair[1])
parms['mappings'].append({"name" : zone_uuid, "value" : cos_uuid})
return self.coreapi('PUT', URI_REPLICATION_COMPRESS.format(repGrpId), parms)
def repgroup_show(self, grpId):
o = self.api('GET', URI_REPLICATION_GROUP.format(grpId))
if (not o):
return None
else:
return o;
def repgroup_list(self):
o = self.api('GET', URI_REPLICATION_GROUPS)
if (not o):
return {};
else:
return o;
# replication group name from uuid
def repgroup_query(self, name):
if (self.__is_uri(name)):
return name
rg_res = self.repgroup_list()
rg_list = rg_res['data_service_vpool']
for rg_iter in rg_list :
rg = self.repgroup_show(rg_iter['id'])
if (rg['name'] == name):
return rg['id']
return None
#
# Encode HashSet of VPOOL parameters as a list
#
def encode_cos(self,stringmap):
cos_params = {'vpool_param' : self.__encode_list(stringmap)}
return cos_params
def cos_list(self, type):
o = self.api('GET', URI_VPOOLS.format(type))
if (not o):
return {};
return o['virtualpool']
def cos_create(self, type, name, description, useMatchedPools,
protocols, numpaths, minpaths, pathsperinitiator, systemtype,
highavailability, haNhUri, haCosUri, activeProtectionAtHASite, metropoint, file_cos, provisionType,
mirrorCosUri, neighborhoods, expandable, sourceJournalSize, journalVarray, journalVpool, standbyJournalVarray,
standbyJournalVpool, rp_copy_mode, rp_rpo_value, rp_rpo_type, protectionCoS,
multiVolumeConsistency, max_snapshots, max_mirrors, thin_volume_preallocation_percentage,
long_term_retention, drive_type, system_type, srdf, auto_tiering_policy_name, host_io_limit_bandwidth, host_io_limit_iops,
auto_cross_connect, placement_policy, compressionEnabled, snapshot_schedule, replication_support,
filepolicy_at_project, filepolicy_at_fs):
if (type != 'block' and type != 'file' and type != "object" ):
raise Exception('wrong type for vpool: ' + str(type))
parms = dict()
if (name):
parms['name'] = name
if (description):
parms['description'] = description
if (useMatchedPools):
parms['use_matched_pools'] = useMatchedPools
if (protocols):
parms['protocols'] = protocols
if (drive_type):
parms['drive_type'] = drive_type
if (system_type):
parms['system_type'] = system_type
if (numpaths):
parms['num_paths'] = numpaths
if (minpaths):
parms['min_paths'] = minpaths
if (pathsperinitiator):
parms['paths_per_initiator'] = pathsperinitiator
if (systemtype):
parms['system_type'] = systemtype
if (compressionEnabled):
parms['compression_enabled'] = compressionEnabled
if (highavailability):
if (highavailability == 'vplex_local'):
parms['high_availability'] = {'type' : highavailability, 'autoCrossConnectExport' : auto_cross_connect}
else:
parms['high_availability'] = {'type' : highavailability, 'metroPoint' : metropoint, 'ha_varray_vpool' : {'varray' : haNhUri, 'vpool' : haCosUri, 'activeProtectionAtHASite' : activeProtectionAtHASite}, 'autoCrossConnectExport' : auto_cross_connect}
if (file_cos):
parms['file_vpool'] = file_cos
if (provisionType):
parms['provisioning_type'] = provisionType
if (expandable):
parms['expandable'] = expandable
if(multiVolumeConsistency):
parms['multi_volume_consistency'] = multiVolumeConsistency
if (thin_volume_preallocation_percentage):
parms['thin_volume_preallocation_percentage'] = thin_volume_preallocation_percentage;
if (auto_tiering_policy_name):
parms['auto_tiering_policy_name'] = auto_tiering_policy_name;
if (long_term_retention):
parms['long_term_retention'] = long_term_retention;
if (type == 'block' and placement_policy):
parms['placement_policy'] = placement_policy;
if (max_snapshots or max_mirrors or protectionCoS or srdf):
cos_protection_params = dict()
if (type == 'block'):
if (srdf):
cos_protection_srdf_params = dict()
copies = srdf.split(',')
copyEntries = []
for copy in copies:
copyParam = copy.split(":")
copy = dict()
copy['varray'] = self.neighborhood_query(copyParam[0])
copy['vpool'] = self.cos_query("block", copyParam[1])
try:
copy['remote_copy_mode'] = copyParam[2]
except:
pass
copyEntries.append(copy)
cos_protection_srdf_params['remote_copy_settings'] = copyEntries
cos_protection_params['remote_copies'] = cos_protection_srdf_params
if (max_mirrors):
cos_protection_mirror_params = dict()
cos_protection_mirror_params['max_native_continuous_copies'] = max_mirrors
if (mirrorCosUri):
cos_protection_mirror_params['protection_mirror_vpool'] = mirrorCosUri
cos_protection_params['continuous_copies'] = cos_protection_mirror_params
if (protectionCoS):
cos_protection_rp_params = dict()
if (sourceJournalSize or rp_copy_mode or rp_rpo_value or standbyJournalVarray or standbyJournalVpool or journalVarray or journalVpool):
sourcePolicy = dict();
if (sourceJournalSize):
sourcePolicy['journal_size'] = sourceJournalSize
if (rp_copy_mode):
sourcePolicy['remote_copy_mode'] = rp_copy_mode;
if (rp_rpo_value):
sourcePolicy['rpo_value'] = rp_rpo_value;
if (rp_rpo_type):
sourcePolicy['rpo_type'] = rp_rpo_type;
if (journalVarray):
sourcePolicy['journal_varray'] = self.neighborhood_query(journalVarray);
if (journalVpool):
sourcePolicy['journal_vpool'] = self.cos_query("block", journalVpool);
if (standbyJournalVarray):
sourcePolicy['standby_journal_varray'] = self.neighborhood_query(standbyJournalVarray);
if (standbyJournalVpool):
sourcePolicy['standby_journal_vpool'] = self.cos_query("block", standbyJournalVpool);
cos_protection_rp_params['source_policy'] = sourcePolicy
copies = protectionCoS.split(',')
copyEntries = []
for copy in copies:
copyParam = copy.split(":")
copy = dict()
copy['varray'] = self.neighborhood_query(copyParam[0])
copy['vpool'] = self.cos_query("block", copyParam[1])
try:
copyPolicy = dict()
copyPolicy['journal_size'] = copyParam[2]
copyPolicy['journal_varray'] = self.neighborhood_query(copyParam[3])
copyPolicy['journal_vpool'] = self.cos_query("block", copyParam[4])
copy['policy'] = copyPolicy
except:
pass
copyEntries.append(copy)
cos_protection_rp_params['copies'] = copyEntries
cos_protection_params['recoverpoint'] = cos_protection_rp_params
if (max_snapshots):
cos_protection_snapshot_params = dict()
cos_protection_snapshot_params['max_native_snapshots'] = max_snapshots
cos_protection_params['snapshots'] = cos_protection_snapshot_params
if(snapshot_schedule is not None):
cos_protection_params['schedule_snapshots'] = snapshot_schedule
if(replication_support is not None):
cos_protection_params['replication_supported'] = replication_support
if(filepolicy_at_project is not None):
cos_protection_params['allow_policy_at_project_level'] = filepolicy_at_project
if(filepolicy_at_fs is not None):
cos_protection_params['allow_policy_at_fs_level'] = filepolicy_at_fs
parms['protection'] = cos_protection_params
nhs = list()
if(neighborhoods):
for n in neighborhoods:
nhs.append(self.neighborhood_query(n))
parms['varrays'] = nhs
if (host_io_limit_bandwidth):
parms['host_io_limit_bandwidth'] = host_io_limit_bandwidth
if (host_io_limit_iops):
parms['host_io_limit_iops'] = host_io_limit_iops
if (type == 'object'):
del parms['protection']
print "VPOOL CREATE Params = ", parms
return self.api('POST', URI_VPOOLS.format(type), parms)
def cos_match(self, type, useMatchedPools,
protocols, numpaths, highavailability, haNhUri, haCosUri, activeProtectionAtHASite, metropoint, file_cos, provisionType,
mirrorCosUri, neighborhoods, expandable, sourceJournalSize, journalVarray, journalVpool, standbyJournalVarray,
standbyJournalVpool, rp_copy_mode, rp_rpo_value, rp_rpo_type, protectionCoS,
multiVolumeConsistency, max_snapshots, max_mirrors, thin_volume_preallocation_percentage, drive_type,
system_type, srdf, compressionEnabled):
if (type != 'block' and type != 'file' and type != "object" ):
raise Exception('wrong type for vpool: ' + str(type))
parms = dict()
if (useMatchedPools):
parms['use_matched_pools'] = useMatchedPools
if (protocols):
parms['protocols'] = protocols
if (drive_type):
parms['drive_type'] = drive_type
if (system_type):
parms['system_type'] = system_type
if (numpaths):
parms['num_paths'] = numpaths
if (compressionEnabled):
parms['compression_enabled'] = compressionEnabled
if (highavailability):
if (highavailability == 'vplex_local'):
parms['high_availability'] = {'type' : highavailability}
else:
parms['high_availability'] = {'type' : highavailability, 'metroPoint' : metropoint, 'ha_varray_vpool' : {'varray' : haNhUri, 'vpool' : haCosUri, 'activeProtectionAtHASite' : activeProtectionAtHASite}}
if (file_cos):
parms['file_vpool'] = file_cos
if (provisionType):
parms['provisioning_type'] = provisionType
if (expandable):
parms['expandable'] = expandable
if(multiVolumeConsistency):
parms['multi_volume_consistency'] = multiVolumeConsistency
if (thin_volume_preallocation_percentage):
parms['thin_volume_preallocation_percentage'] = thin_volume_preallocation_percentage;
if (max_snapshots or max_mirrors or protectionCoS or srdf):
cos_protection_params = dict()
if (type == 'block'):
if (srdf):
cos_protection_srdf_params = dict()
copies = srdf.split(',')
copyEntries = []
for copy in copies:
copyParam = copy.split(":")
copy = dict()
copy['varray'] = self.neighborhood_query(copyParam[0])
copy['vpool'] = self.cos_query("block", copyParam[1])
try:
copy['remote_copy_mode'] = copyParam[2]
except:
pass
copyEntries.append(copy)
cos_protection_srdf_params['remote_copy_settings'] = copyEntries
cos_protection_params['remote_copies'] = cos_protection_srdf_params
if (max_mirrors):
cos_protection_mirror_params = dict()
cos_protection_mirror_params['max_native_continuous_copies'] = max_mirrors
if (mirrorCosUri):
cos_protection_mirror_params['protection_mirror_vpool'] = mirrorCosUri
cos_protection_params['continuous_copies'] = cos_protection_mirror_params
if (protectionCoS):
cos_protection_rp_params = dict()
if (sourceJournalSize):
sourcePolicy = dict()
sourcePolicy['journal_size'] = sourceJournalSize
sourcePolicy['journal_varray'] = journalVarray
sourcePolicy['journal_vpool'] = journalVpool
sourcePolicy['standby_journal_varray'] = standbyJournalVarray
sourcePolicy['standby_journal_vpool'] = standbyJournalVpool
cos_protection_rp_params['source_policy'] = sourcePolicy
copies = protectionCoS.split(',')
copyEntries = []
for copy in copies:
copyParam = copy.split(":")
copy = dict()
copy['varray'] = self.neighborhood_query(copyParam[0])
copy['vpool'] = self.cos_query("block", copyParam[1])
try:
copyPolicy = dict()
copyPolicy['journal_size'] = copyParam[2]
copyPolicy['journal_varray'] = self.neighborhood_query(copyParam[3])
copyPolicy['journal_vpool'] = self.cos_query("block", copyParam[4])
copy['policy'] = copyPolicy
except:
pass
copyEntries.append(copy)
cos_protection_rp_params['copies'] = copyEntries
cos_protection_params['recoverpoint'] = cos_protection_rp_params
if (max_snapshots):
cos_protection_snapshot_params = dict()
cos_protection_snapshot_params['max_native_snapshots'] = max_snapshots
cos_protection_params['snapshots'] = cos_protection_snapshot_params
parms['protection'] = cos_protection_params
nhs = list()
if(neighborhoods):
for n in neighborhoods:
nhs.append(self.neighborhood_query(n))
parms['varrays'] = nhs
return self.api('POST', URI_VPOOLS_MATCH.format(type), parms)
#
# Assign pools to CoS or change the max snapshots/mirrors values
# Note that you can either do pool assignments or snapshot/mirror changes at a time
#
def cos_update(self, pooladds, poolrems, type, cosuri, max_snapshots, max_mirrors, expandable, use_matched, host_io_limit_bandwidth, host_io_limit_iops, placement_policy):
params = dict()
if (pooladds or poolrems):
poolassignments = dict();
if (pooladds):
pool = dict()
pool['storage_pool'] = []
for id in pooladds:
pool['storage_pool'].append(id)
poolassignments['add'] = pool
if (poolrems):
pool = dict();
pool['storage_pool'] = []
for id in poolrems:
pool['storage_pool'].append(id)
poolassignments['remove'] = pool;
params['assigned_pool_changes'] = poolassignments
return self.api('PUT', URI_VPOOL_UPDATE.format(type, cosuri), params)
if (max_snapshots or max_mirrors):
vpool_protection_param = dict()
if (max_snapshots):
vpool_protection_snapshot_params = dict() #base class attribute
vpool_protection_snapshot_params['max_native_snapshots'] = max_snapshots
vpool_protection_param['snapshots'] = vpool_protection_snapshot_params
if(max_mirrors):
vpool_protection_mirror_params = dict()
vpool_protection_mirror_params['max_native_continuous_copies'] = max_mirrors
vpool_protection_param['continuous_copies'] = vpool_protection_mirror_params
params['protection'] = vpool_protection_param
if (expandable):
params['expandable'] = expandable
if (use_matched):
params['use_matched_pools'] = use_matched
if (host_io_limit_bandwidth):
params['host_io_limit_bandwidth'] = host_io_limit_bandwidth
if (host_io_limit_iops):
params['host_io_limit_iops'] = host_io_limit_iops
if (type == 'block' and placement_policy):
params['placement_policy'] = placement_policy;
return self.api('PUT', URI_VPOOL_INSTANCE.format(type, cosuri), params)
def objcos_create(self, name, description):
parms = dict()
if (name):
parms['name'] = name
if (description):
parms['description'] = description
parms['type'] = 'OBJ_AND_HDFS'
return self.api('POST', URI_OBJ_VPOOL.format('object'), parms)
def objcos_query(self, name):
if (self.__is_uri(name)):
return name
cos_res = self.objcos_list()
neighborhoods = self.neighborhood_list()
for nb in neighborhoods:
neighborhood = self.neighborhood_show(nb['id'])
if (neighborhood['name'] == name):
return neighborhood['id']
raise Exception('bad vpool name ' + str(name) + ' of type: ' + str(type))
def objcos_show(self, uri):
return self.neighborhood_show(uri)
def objcos_list(self):
return self.neighborhood_list()
def objcos_delete(self, uri):
return self.api('POST', URI_OBJ_VPOOL_INSTANCE.format('object', uri) + "/deactivate" )
def cos_delete(self, type, uri):
if(type=='object'):
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_OBJ_VPOOL_INSTANCE.format(type, uri)))
else:
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_VPOOL_INSTANCE.format(type, uri)))
def cos_name(self, type, uri):
cos = self.cos_show(type, uri)
return cos['name']
def cos_show(self, type, uri):
return self.api('GET', URI_VPOOL_INSTANCE.format(type, uri))
def cos_refresh(self, type, uri):
return self.api('GET', URI_VPOOL_REFRESH.format(type, uri))
def cos_query(self, type, name):
if (self.__is_uri(name)):
return name
cos_res = self.cos_list(type)
for cs in cos_res :
if(BOURNE_DEBUG=='1'):
print "found vpool = ",cs
try:
cos = self.cos_show(type, cs['id'])
if (cos['name'] == name):
return cos['id']
except:
continue
raise Exception('bad vpool name ' + name + ' of type: ' + type)
def cos_add_acl(self, uri, type, tenant):
tenant = self.__tenant_id_from_label(tenant)
self.cos_add_tenant_acl(uri, type, tenant)
def cos_add_tenant_acl(self, uri, type, tenant_id):
parms = {
'add':[{
'privilege': ['USE'],
'tenant': tenant_id,
}]
}
if(type=='object'):
response = self.__api('PUT', URI_VPOOL_ACLS.format(type, uri), parms)
else:
response = self.__api('PUT', URI_VPOOL_ACLS.format(type, uri), parms)
if (response.status_code != 200):
print "cos_add_acl failed with code: ", response.status_code
raise Exception('cos_add_acl: failed')
def tenant_create(self, name, domain, key, value):
parms = {
'name': name,
'user_mappings': [{
'domain': domain,
'attributes':[{
'key':key,
'value':[value]
}],
}]
}
print parms
uri = self.tenant_getid()
return self.api('POST', URI_TENANTS_SUBTENANT.format(uri), parms)
def tenant_deactivate(self, subtId):
uri = self.tenant_getid()
return self.api('POST', URI_TENANTS_DEACTIVATE.format(subtId))
def tenant_list(self, uri=None):
if (not uri):
uri = self.tenant_getid()
o = self.api('GET', URI_TENANTS_SUBTENANT.format(uri))
if (not o):
return {}
#print 'tenant_list (', uri, ') :', o
return o['subtenant']
def tenant_getid(self):
o = self.api('GET', URI_TENANT)
return o['id']
def tenant_name(self, uri):
t = self.tenant_show(uri)
return t['name']
def tenant_show(self, name):
uri = self.__tenant_id_from_label(name)
return self.api('GET', URI_TENANTS.format(uri))
def tenant_delete(self, uri):
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_TENANTS.format(uri)))
def tenant_query(self, label):
return self.__tenant_query(label)
def tenant_bulkgetids(self):
ids = self.__tenant_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def tenant_bulkpost(self, ids):
return self.__tenant_bulkget_reps(ids)
def __tenant_bulkget_ids(self):
return self.api('GET', URI_TENANTS_BULKGET)
def __tenant_bulkget_reps(self, ids):
return self.api('POST', URI_TENANTS_BULKGET, ids)
def __tenant_query(self, label):
id = self.tenant_getid()
subtenants = self.tenant_list(id)
for tenant in subtenants:
if (tenant['name'] == label):
return tenant['id']
return None
def __tenant_id_from_label(self, tenant):
uri = None
if (not tenant):
uri = self.tenant_getid()
else:
if not self.__is_uri(tenant):
uri = self.__tenant_query(tenant)
else:
uri = tenant
if (not uri):
raise Exception('bad tenant name: ' + tenant)
return uri
def tenant_assign_admin(self, tenant, subject_id ):
uri = self.__tenant_id_from_label(tenant)
parms = {
'role_assignment_change': {
'add': [{
'role': ['TENANT_ADMIN'],
'subject_id': subject_id,
}]
}}
response = self.__api('PUT', URI_TENANTS_ROLES.format(uri), parms)
if (response.status_code != 200):
print "tenant_assign_admin failed with code: ", response.status_code
raise Exception('tenant_assign_admin: failed')
def tenant_add_attribute(self, tenant, domain, key, value):
uri = self.__tenant_id_from_label(tenant)
tenant = self.api('GET', URI_TENANTS.format(uri))
user_mappings = tenant['user_mappings']
for user_mapping in user_mappings:
if(domain == user_mapping['domain']):
for attribute in user_mapping['attributes']:
if (key == attribute['key'] and value in attribute['value']):
print "tenant contains attribute " + key + "=" + value + " already"
return
parms = {
'user_mapping_changes': {
'add': [{
'domain': domain,
'attributes':[{
'key':key,
'value':[value]
}],
}]}
}
self.api('PUT', URI_TENANTS.format(uri), parms)
def tenant_update_domain(self, tenantURI, domain, operation, key, value):
if( not operation in ['add', 'remove']):
raise Exception('type must be add or remove')
parms = {
'user_mapping_changes': {
operation: [{
'domain': domain,
'attributes':[{
'key':key,
'value':[value],
}],
}]}
}
self.api('PUT', URI_TENANTS.format(tenantURI), parms)
def tenant_add_group(self, tenant, domain, ingroup):
uri = self.__tenant_id_from_label(tenant)
tenant = self.api('GET', URI_TENANTS.format(uri))
user_mappings = tenant['user_mappings']
for user_mapping in user_mappings:
if(domain == user_mapping['domain']):
for group in user_mapping['groups']:
if (group == ingroup):
print "tenant contains group mapping " + group + " already"
return
parms = {
'user_mapping_changes': {
'add': [{
'domain': domain,
'groups': [ingroup],
}]}
}
print parms
self.api('PUT', URI_TENANTS.format(uri), parms)
def tenant_remove_group(self, tenant, domain, ingroup):
uri = self.__tenant_id_from_label(tenant)
parms = {
'user_mapping_changes': {
'remove': [{
'domain': domain,
'groups': [ingroup],
}]}
}
print parms
self.api('PUT', URI_TENANTS.format(uri), parms)
def tenant_update_namespace(self, tenant, namespace):
if( 'urn:storageos:' in tenant ):
print "URI passed in Tenant Namespace = ", tenant
uri = tenant
else:
uri = self.__tenant_id_from_label(tenant)
print "URI mapped in tenant namespace = ", uri
parms = {
'namespace' : namespace
}
self.api('PUT', URI_TENANTS.format(uri), parms)
def project_list(self, tenant):
uri = self.__tenant_id_from_label(tenant)
o = self.api('GET', URI_PROJECTS.format(uri), None)
if (not o):
return {}
return o['project']
def project_name(self, uri):
p = self.project_show(uri)
return p['name']
def project_show(self, uri):
return self.api('GET', URI_PROJECT.format(uri))
def project_create(self, label, tenant):
uri = self.__tenant_id_from_label(tenant)
self.project_create_with_tenant_id(label, uri)
def project_create_with_tenant_id(self, label, tenant_id):
parms = { 'name' : label, }
return self.api('POST', URI_PROJECTS.format(tenant_id), parms)
def tenant_project_query(self, tenant_id, proj_name):
projects = self.project_list(tenant_id)
for project in projects:
if (project['name'] == proj_name):
return project['id']
return None
def project_delete(self, uri):
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_PROJECT.format(uri)))
def project_query(self, name):
if (self.__is_uri(name)):
return name
label = name
tenants = self.tenant_list(self.tenant_getid())
ids = [self.tenant_getid()]
for tenant in tenants:
ids.append(tenant['id'])
# since we are using sysadmin as user, check on root tenant and all
# subtenants for now go in reverse order, most likely, we are in
# the latest subtenant
for tenant in ids:
projects = self.project_list(tenant)
for project in projects:
if (project['name'] == label):
return project['id']
raise Exception('bad project name: ', name)
def project_add_acl(self, name, user):
id = self.project_query(name)
parms = {
'add':[{
'privilege': ['ALL'],
'subject-id': user,
}]
}
response = self.__api('PUT', URI_PROJECT_ACLS.format(id), parms)
if (response.status_code != 200):
print "project_add_acl failed with code: ", response.status_code
raise Exception('project_add_acl: failed')
def project_bulkgetids(self):
ids = self.__project_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def project_bulkpost(self, ids):
return self.__project_bulkget_reps(ids)
def __project_bulkget_ids(self):
return self.api('GET', URI_PROJECTS_BULKGET)
def __project_bulkget_reps(self, ids):
return self.api('POST', URI_PROJECTS_BULKGET, ids)
def authn_provider_create(self, mode, url, managerdn, managerpwd, sbase,
sfilter, groupattr, name, domain, whitelist, searchscope,
groupobjectclasses, groupmemberattributes):
whitelist_array=[]
whitelist_array = whitelist.split(',')
groupobjectclasses_array=[]
if (groupobjectclasses != None):
groupobjectclasses_array=groupobjectclasses.split(',')
groupmemberattributes_array=[]
if (groupmemberattributes != None):
groupmemberattributes_array=groupmemberattributes.split(',')
parms = { 'mode' : mode,
'server_urls' : [ url ],
'manager_dn' : managerdn,
'manager_password' : managerpwd,
'search_base' :sbase,
'search_filter' : sfilter,
'group_attribute' : groupattr,
'name' : name,
'group_whitelist_values' : whitelist_array,
'search_scope' : searchscope,
'group_object_class': groupobjectclasses_array,
'group_member_attribute': groupmemberattributes_array}
# skip these negative tests if security is disabled
if (SKIP_SECURITY != '1'):
response = self.__api('POST', URI_VDC_AUTHN_PROFILE, parms)
if (response.status_code != 400):
print "Failed to validate a profile without domains tag: ", response.status_code
raise Exception("Create a bad authentication provider failed")
parms['domains'] = [ ]
response = self.__api('POST', URI_VDC_AUTHN_PROFILE, parms)
if (response.status_code != 400):
print "Failed to validate a profile without domain tag: ", response.status_code
raise Exception("Create a bad authentication provider failed")
# test bad content type
response = self.__api('POST', URI_VDC_AUTHN_PROFILE, parms, None, 'bad')
if (response.status_code != 400):
print "Failed to test against bad content type: ", response.status_code
raise Exception("Could not test bad content type POST")
# test missing content type
response = self.__api('POST', URI_VDC_AUTHN_PROFILE, parms, None, None)
if (response.status_code != 415):
print "Failed to test against missing content type: ", response.status_code
raise Exception("Could not test missing content type POST")
# now create the real authn provider
parms['domains'] = [ domain ]
response = self.__api('POST', URI_VDC_AUTHN_PROFILE, parms)
if (response.status_code != 200):
print "Failed to create authentication provider: ", response.status_code
rawresponse = response.text
print rawresponse
alreadyExists = rawresponse.find("already exists") != -1 or rawresponse.find("Duplicate label") != -1
if (alreadyExists):
print "Domain configuration already exists. Ignoring and continuing with the tests..."
return response
raise Exception("Create authentication provider failed")
else:
return response
def authn_provider_show(self, uri):
return self.api('GET', URI_VDC_AUTHN_PROFILES.format(uri))
def authn_provider_list(self):
response = self.api('GET', URI_VDC_AUTHN_PROFILE)
o = response['authnprovider']
profiles = []
for pr in o:
profiles.append(pr.get('id'))
return profiles
def authn_provider_query(self, name):
uris = self.authn_provider_list()
for uri in uris:
profile = self.authn_provider_show(uri)
if (profile['name'] == name):
return profile['id'] # return the first that matches for now
raise Exception('bad authn provider name : ', name)
def authn_provider_update(self, newName):
parms = { 'name' : newName }
uris = self.authn_provider_list()
for uri in uris:
parmsDomains = { 'domain_changes' : { 'add' : [ ] }}
profile = self.authn_provider_show(uri)
savedProviderName = profile['name']
parmsDomains['domain_changes']['add'] = [ 'test.dummydomain.com' ]
response = self.__api('PUT', URI_VDC_AUTHN_PROFILES.format(uri), parmsDomains)
# Test that you can update with a domain that is not empty (and does not exist already)
if (response.status_code != 200):
rawresponse = response.text
print rawresponse
alreadyExists = rawresponse.find("Domain") != -1 and rawresponse.find("already exists in another authentication provider") != -1
if (alreadyExists):
print "Domain configuration already exists during update. Ignoring and continuing with the tests..."
else:
print "Failed to update a profile with domain tag: ", response.status_code
raise Exception("Failed to update the profile with domains tag")
# Update the actual cert passed in and the provider name
response = self.__api('PUT', URI_VDC_AUTHN_PROFILES.format(uri), parms)
if (response.status_code != 200):
print "Failed to update authentication provider: ", response.status_code
raise Exception("Update authentication provider failed")
else:
profile = self.authn_provider_show(uri)
if (profile['name'] == newName):
# Verify that after the provider update the number of provider entries stays the same
urisLatest = self.authn_provider_list()
if (len(uris) != len(urisLatest)):
raise Exception("After updating the provider the number of providers in the system is changed")
else:
# restore the original provider name to run the rest of the tests
parms['name'] = savedProviderName
response = self.__api('PUT', URI_VDC_AUTHN_PROFILES.format(uri), parms)
if (response.status_code != 200):
raise Exception("Failed to restore the original provider name")
return uri
else:
raise Exception("Update authentication provider: the update actually did not happen")
def fileshare_list(self, project):
puri = self.project_query(project)
puri = puri.strip()
results = self.fileshare_search(None, puri, None);
resources = results['resource']
fileshares = []
for resource in resources:
fileshares.append(resource['id'])
return fileshares
def fileshare_bulkget(self):
ids = self.__fileshare_bulkget_ids()
# retrieve the first 10 fileshares only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return self.__fileshare_bulkget_reps(chunk)
def __fileshare_bulkget_ids(self):
return self.api('GET', URI_FILESHARE_BULKGET)
def __fileshare_bulkget_reps(self, ids):
return self.api('POST', URI_FILESHARE_BULKGET, ids)
def fileshare_bulkgetids(self):
ids = self.__fileshare_bulkget_ids()
# retrieve the first 10 fileshares only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def fileshare_bulkpost(self, ids):
return self.api('POST', URI_FILESHARE_BULKGET, ids)
def fileshare_show(self, uri):
return self.api('GET', URI_FILESYSTEM.format(uri))
def fileshare_show_task(self, fs, task):
uri_file_task = URI_FILESYSTEM + '/tasks/{1}'
return self.api('GET', uri_file_task.format(fs, task))
def fileshare_create(self, label, project, neighborhood, cos, size, protocols, protection):
parms = {
'name' : label,
'varray' : neighborhood,
'vpool' : cos,
'size' : size,
}
if (protocols):
parms['protocols'] = {'protocols' : protocols}
print 'parms: ' + str(parms)
o = self.api('POST', URI_FILESYSTEMS_LIST, parms, {'project': project})
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.fileshare_show_task)
return s
def fileshare_export(self, uri, endpoints, type, perm, rootuser, protocol, comments):
parms = {
'type' : type,
'permissions' : perm,
'root_user' : rootuser,
'protocol' : protocol,
'endpoints' : endpoints,
'comments' : comments,
}
o = self.api('POST', URI_FILESYSTEMS_EXPORTS.format(uri), parms)
print 'OOO: ' + str(o) + ' :OOO'
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.fileshare_show_task)
return s
def fileshare_export_update(self, uri, operation, securityflavor, user, roothosts, readonlyhosts, readwritehosts, subDir ):
exportRulesparam = dict()
exportRulesparam['secFlavor'] = securityflavor
if(roothosts):
exportRulesparam['rootHosts'] = roothosts
if(readonlyhosts):
exportRulesparam['readOnlyHosts'] = readonlyhosts
if(readwritehosts):
exportRulesparam['readWriteHosts'] = readwritehosts
if(user):
exportRulesparam['anon'] = user
exportRulerequest = {'exportRules':[exportRulesparam]}
if("add"== operation):
request = {'add': exportRulerequest}
elif("delete" == operation):
request = {'delete' : exportRulerequest}
else:
request = {'modify' : exportRulerequest}
o = self.api('PUT',URI_FILESYSTEMS_EXPORTS_UPDATE.format(uri), request)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.fileshare_show_task)
return s
def fileshare_unexport(self, uri):
url = URI_FILESYSTEMS_UNEXPORT.format(uri)
o = self.api('DELETE', url)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.fileshare_show_task)
return s
def fileshare_expand(self, uri, size):
url = URI_FILESYSTEMS_EXPAND.format(uri)
parms = {
'new_size' : size
}
o = self.api('POST', url, parms)
self.assert_is_dict(o)
print o
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.fileshare_show_task)
return s
def fileshare_share(self, uri, sharename, description):
parms = {
'name' : sharename,
'description' : description
}
o = self.api('POST', URI_FILESYSTEMS_SHARES.format(uri), parms)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.fileshare_show_task)
return s
def fileshare_share_acl(self, uri, sharename, user, permission, domain, group, operation):
cifs_acl_param = dict()
cifs_acl_param['share_name'] = sharename
if(permission):
cifs_acl_param['permission'] = permission
if(user):
cifs_acl_param['user'] = user
if(domain):
cifs_acl_param['domain'] = domain
if(group):
cifs_acl_param['group'] = group
acl_cifs_request = {'acl':[cifs_acl_param]}
if("add"== operation):
request = {'add': acl_cifs_request}
elif("delete" == operation):
request = {'delete' : acl_cifs_request}
elif("modify" == operation):
request = {'modify' : acl_cifs_request}
o = self.api('PUT',URI_FILESYSTEMS_SHARES_ACL.format(uri,sharename),request)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.fileshare_show_task)
return s
def fileshare_acl_show(self, uri, sharename):
return self.api('GET', URI_FILESYSTEMS_SHARES_ACL_SHOW.format(uri,sharename))
def fileshare_acl_delete(self, uri, sharename):
o = self.api('DELETE', URI_FILESYSTEMS_SHARES_ACL_DELETE.format(uri, sharename))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.fileshare_show_task)
return s
def fileshare_unshare(self, uri, sharename):
o = self.api('DELETE', URI_FILESYSTEMS_UNSHARE.format(uri, sharename))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.fileshare_show_task)
return s
def fileshare_delete(self, uri, forceDelete):
parms = {
'forceDelete' : forceDelete
}
print parms
o = self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_FILESYSTEM.format(uri)), parms)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.fileshare_show_task)
return (o, s)
def fileshare_query(self, name):
if (self.__is_uri(name)):
return name
(pname, label) = name.rsplit('/', 1)
puri = self.project_query(pname)
puri = puri.strip()
results = self.fileshare_search(None, puri)
resources = results['resource']
for resource in resources:
if (resource['match'] == label):
return resource['id']
raise Exception('bad fileshare name : ' + name )
def fileshare_search(self, name, project=None, tag=None):
if (self.__is_uri(name)):
return name
if (name):
if (project):
return self.api('GET', URI_FILESYSTEMS_SEARCH_PROJECT_NAME.format(project,name))
else:
return self.api('GET', URI_FILESYSTEMS_SEARCH_NAME.format(name))
if (tag):
return self.api('GET', URI_FILESYSTEMS_SEARCH_TAG.format(tag))
if (project):
return self.api('GET', URI_FILESYSTEMS_SEARCH_PROJECT.format(project))
def fileshare_quota_task(self, id, task):
uri_quota_task = '/vdc/tasks/{0}'
return self.api('GET', uri_quota_task.format(task))
def fileshare_list_quota_dir(self, uri):
return self.api('GET', URI_FILE_QUOTA_DIR_LIST.format(uri))
def fileshare_quota_dir_query(self, fsUri, name):
if (self.__is_uri(name)):
return name
results = self.fileshare_list_quota_dir(fsUri)
resources = results['quota_dir']
for resource in resources:
if (resource['name'] == name):
return resource['id']
raise Exception('bad quota dir name : ' + name )
def fileshare_create_quota_dir(self, fsuri, label, size, oplocks, sec):
parms = {
'name' : label
}
if (size):
parms['size'] = size
if (oplocks):
parms['oplock'] = oplocks
if (sec):
parms['security_style'] = sec
o = self.api('POST', URI_FILE_QUOTA_DIR_LIST.format(fsuri), parms)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['id'], self.fileshare_quota_task)
return s
def fileshare_update_quota_dir(self, uri, size, oplocks, sec):
parms = dict()
if (size):
parms['size'] = size
if (oplocks):
parms['oplock'] = oplocks
if (sec):
parms['security_style'] = sec
o = self.api('POST', URI_FILE_QUOTA_DIR.format(uri), parms)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['id'], self.fileshare_quota_task)
return s
def fileshare_delete_quota_dir(self, uri, forceDelete):
parms = dict()
if (forceDelete):
parms['forceDelete'] = forceDelete
o = self.api('POST', URI_FILE_QUOTA_DIR_DELETE.format(uri), parms)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['id'], self.fileshare_quota_task)
return s
def fileshare_show_quota_dir(self, uri):
return self.api('GET', URI_FILE_QUOTA_DIR.format(uri))
def snapshot_create(self, fsuri, snaplabel):
parms = {
'name' : snaplabel,
}
o = self.api('POST', URI_FILESYSTEM_SNAPSHOT.format(fsuri), parms)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.snapshot_show_task)
return s
def snapshot_export(self, uri, host):
parms = {
'host' : host,
}
o = self.api('POST', URI_FILE_SNAPSHOT_EXPORTS.format(uri), parms)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.snapshot_show_task)
return s
def snapshot_export(self, uri, endpoints, type, perm, rootuser, protocol):
parms = {
'type' : type,
'permissions' : perm,
'root_user' : rootuser,
'protocol' : protocol,
'endpoints' : endpoints
}
o = self.api('POST', URI_FILE_SNAPSHOT_EXPORTS.format(uri), parms)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.snapshot_show_task)
return s
def snapshot_unexport(self, uri):
url = URI_FILE_SNAPSHOT_UNEXPORT.format(uri)
o = self.api('DELETE', url)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.snapshot_show_task)
return s
def snapshot_share(self, uri, sharename, description, permission):
parms = {
'name' : sharename,
'description' : description,
'permission' : permission
}
o = self.api('POST', URI_FILE_SNAPSHOT_SHARES.format(uri), parms)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.snapshot_show_task)
return s
def snapshot_share_acl(self, uri, sharename, user, permission, domain, group, operation):
cifs_acl_param = dict()
cifs_acl_param['share_name'] = sharename
if(permission):
cifs_acl_param['permission'] = permission
if(user):
cifs_acl_param['user'] = user
if(domain):
cifs_acl_param['domain'] = domain
if(group):
cifs_acl_param['group'] = group
acl_cifs_request = {'acl':[cifs_acl_param]}
if("add"== operation):
request = {'add': acl_cifs_request}
elif("delete" == operation):
request = {'delete' : acl_cifs_request}
elif("modify" == operation):
request = {'modify' : acl_cifs_request}
o = self.api('PUT',URI_FILE_SNAPSHOT_SHARES_ACL.format(uri,sharename),request)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.snapshot_show_task)
return s
def snapshot_acl_show(self, uri, sharename):
return self.api('GET', URI_FILE_SNAPSHOT_SHARES_ACL_SHOW.format(uri,sharename))
def snapshot_acl_delete(self, uri, sharename):
o = self.api('DELETE', URI_FILE_SNAPSHOT_SHARES_ACL_DELETE.format(uri,sharename))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.snapshot_show_task)
return s
def snapshot_unshare(self, uri, sharename):
o = self.api('DELETE', URI_FILE_SNAPSHOT_UNSHARE.format(uri, sharename))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.snapshot_show_task)
return s
def snapshot_delete(self, uri):
o = self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_FILE_SNAPSHOT.format(uri)))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.snapshot_show_task)
return s
def snapshot_restore(self, uri):
o = self.api('POST', URI_FILE_SNAPSHOT_RESTORE.format(uri))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.snapshot_show_task)
return s
def snapshot_share_list(self, uri):
o = self.api('GET', URI_FILE_SNAPSHOT_SHARES.format(uri))
if (not o):
return {}
else:
return o
def snapshot_list(self, fsuri):
o = self.api('GET', URI_FILESYSTEM_SNAPSHOT.format(fsuri))
if (not o):
return {}
else:
return o['snapshot']
def snapshot_show(self, uri):
return self.api('GET', URI_FILE_SNAPSHOT.format(uri))
def snapshot_show_task(self, snap, task):
return self.api('GET', URI_FILE_SNAPSHOT_TASKS.format(snap,task))
def snapshot_query(self, name, fname):
if (self.__is_uri(name)):
return name
(sname, label) = name.rsplit('/', 1)
furi = self.fileshare_query(fname)
furi = furi.strip()
snaps = self.snapshot_list(furi)
for snap in snaps:
snapshot = self.snapshot_show(snap['id'])
if (snapshot['name'] == label):
return snapshot['id']
raise Exception('bad snapshot name : ' + name)
def networksystem_create(self, label, type, devip, devport, username, password,
smis_ip, smis_port, smisuser, smispw, smisssl):
parms = { 'name' : label,
'system_type' : type,
'ip_address' : devip,
'port_number' : devport,
'user_name' : username,
'password' : password,
}
if(smis_ip):
parms['smis_provider_ip'] = smis_ip
if(smis_port):
parms['smis_port_number'] = smis_port
if (smisuser):
parms['smis_user_name'] = smisuser
if (smispw):
parms['smis_password'] = smispw
if (smisssl):
parms['smis_use_ssl'] = smisssl
o = self.api('POST', URI_NETWORKSYSTEMS, parms)
print 'OOO: ' + str(o) + ' :OOO'
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.networksystem_show_task)
return s
def networksystem_show_task(self, device, task):
uri_device_task = URI_NETWORKSYSTEM + '/tasks/{1}'
return self.api('GET', uri_device_task.format(device,task))
def networksystem_update(self, label, type, devip, devport, username, password,
uri, smis_ip, smis_port, smisuser, smispw, smisssl):
parms = { 'name' : label,
'system_type' : type,
'ip_address' : devip,
'port_number' : devport,
'user_name' : username,
'password' : password,
}
if(smis_ip):
parms['smis_provider_ip'] = smis_ip
if(smis_port):
parms['smis_port_number'] = smis_port
if (smisuser):
parms['smis_user_name'] = smisuser
if (smispw):
parms['smis_password'] = smispw
if (smisssl):
parms['smis_use_ssl'] = smisssl
return self.api('PUT', URI_NETWORKSYSTEM.format(uri), parms)
def networksystem_delete(self, uri):
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_NETWORKSYSTEM.format(uri)))
def networksystem_show(self, uri):
return self.api('GET', URI_NETWORKSYSTEM.format(uri))
def networksystem_discover(self, uri):
return self.api('POST', URI_NETWORKSYSTEM_DISCOVER.format(uri))
def networksystem_list_connections(self, uri, fabricId):
if (fabricId):
return self.api('GET', URI_NETWORKSYSTEM_FCENDPOINTS_FABRIC.format(uri, fabricId))
else:
return self.api('GET', URI_NETWORKSYSTEM_FCENDPOINTS.format(uri))
def networksystem_physical_inventory(self, uri):
return 'Function not supported'
def networksystem_zonereferences(self, initiator, target):
return self.api('GET', URI_NETWORKSYSTEM_VDCREFERENCES.format(initiator,target))
def networksystem_query(self, name):
if (self.__is_uri(name)):
return name
systems = self.networksystem_list()
for system in systems:
if (system['name'] == name):
return system['id']
print systems;
raise Exception('bad networksystem name: ' + name)
def networksystem_list(self):
o = self.api('GET', URI_NETWORKSYSTEMS)
if (not o):
return {};
systems = o['network_system'];
if(type(systems) != list):
return [systems];
return systems;
def networksystem_register(self, uri):
return self.api('POST', URI_NETWORKSYSTEM_REGISTER.format(uri))
def networksystem_deregister(self, uri):
return self.api('POST', URI_NETWORKSYSTEM_DEREGISTER.format(uri))
def portalias_list(self, uri, fabricId):
if (fabricId):
return self.api('GET', URI_NETWORKSYSTEM_ALIASES_FABRIC.format(uri, fabricId))
else:
return self.api('GET', URI_NETWORKSYSTEM_ALIASES.format(uri))
def portalias_create(self, uri, fabricId, aliases):
if (not aliases):
raise Exception( 'No aliases were provided')
else:
aliasesArr = aliases.split('#')
if ( len(aliasesArr) <= 0):
raise Exception( 'No aliases were provided')
else:
aliasesParam=[]
i=0
for alias in aliasesArr:
nameAddress = alias.split(',');
try:
name = nameAddress[0];
address = nameAddress[1];
aliasesParam.append({'name':name, 'address':address})
i+=1
except:
raise Exception('Name or address was not provided for an alias');
createParam = dict()
createParam['wwn_alias']=aliasesParam
if (fabricId):
createParam['fabric_id']=fabricId
print createParam
o = self.api('POST', URI_NETWORKSYSTEM_ALIASES.format(uri), createParam)
return self.api_sync_2(o['resource']['id'], o['op_id'], self.networksystem_show_task)
def portalias_delete(self, uri, fabricId, aliases):
if (not aliases):
raise Exception( 'No aliases were provided')
else:
aliasesArr = aliases.split('#')
if ( len(aliasesArr) <= 0):
raise Exception( 'No aliases were provided')
aliasesParam=[]
i=0
for alias in aliasesArr:
nameAddress = alias.split(',');
try:
name = nameAddress[0]
try:
address = nameAddress[1]
except:
address = '';
aliasesParam.append({'name':name, 'address':address})
i+=1
except:
raise Exception( 'name was not provided for an alias')
deleteParam = dict()
deleteParam['wwn_alias']=aliasesParam
if (fabricId):
deleteParam['fabric_id']=fabricId
print deleteParam
o = self.api('POST', URI_NETWORKSYSTEM_ALIASES_REMOVE.format(uri), deleteParam)
print o;
return self.api_sync_2(o['resource']['id'], o['op_id'], self.networksystem_show_task)
def portalias_update(self, uri, fabricId, aliases):
if (not aliases):
raise Exception( 'No aliases were provided')
else:
aliasesArr = aliases.split('#')
if ( len(aliasesArr) <= 0):
raise Exception( 'No aliases were provided')
else:
aliasesParam=[]
i=0
for alias in aliasesArr:
nameAddress = alias.split(',');
try:
name = nameAddress[0]
try:
newAddress = nameAddress[1]
except:
newArress = ''
try:
address = nameAddress[2]
except:
address = '';
try:
newName = nameAddress[3]
except:
newName = ''
except:
raise Exception('name or new_address was not provided in an alias for updating')
aliasesParam.append({'name':name, 'address':address, 'new_address':newAddress, 'new_name':newName})
i+=1
updateParam = dict()
updateParam['wwn_alias_update']=aliasesParam
if (fabricId):
updateParam['fabric_id']=fabricId
print updateParam
o = self.api('PUT', URI_NETWORKSYSTEM_ALIASES.format(uri), updateParam)
print o;
return self.api_sync_2(o['resource']['id'], o['op_id'], self.networksystem_show_task)
def zone_list(self, uri, fabricId,zoneName, excludeMembers, excludeAliases):
if (fabricId):
if ( zoneName == None ):
zoneName = ""
return self.api('GET', URI_NETWORKSYSTEM_ZONES_QUERY.format(uri, fabricId, zoneName,excludeMembers, excludeAliases))
else:
raise Exception('fabricid was not provided')
def zone_create(self, uri, fabricId, zones):
if (not zones):
raise Exception( 'No zones were provided')
elif (not fabricId):
raise Exception( 'fabricid or vsan was not provided');
else:
zonesArr = zones.split('#')
if ( len(zonesArr) <= 0):
raise Exception( 'No zones were provided')
else:
zonesParam=[]
i=0
for zone in zonesArr:
zoneArr = zone.split(',');
try:
name = zoneArr[0];
members = zoneArr[1].split('+');
if ( len(members) <= 0):
raise Exception()
zonesParam.append({'name':name, 'members':members})
i+=1
except:
raise Exception('Name or members was not provided for a zone');
createParam = dict()
createParam['san_zone']=zonesParam
print createParam
o = self.api('POST', URI_NETWORKSYSTEM_ZONES.format(uri,fabricId), createParam)
return self.api_sync_2(o['resource']['id'], o['op_id'], self.networksystem_show_task)
def zone_delete(self, uri, fabricId, zones):
if (not zones):
raise Exception( 'No zones were provided')
elif (not fabricId):
raise Exception( 'fabricid or vsan was not provided');
else:
zonesArr = zones.split('#')
if ( len(zonesArr) <= 0):
raise Exception( 'No zones were provided')
else:
zonesParam=[]
i=0
for zone in zonesArr:
zoneArr = zone.split(',');
try:
name = zoneArr[0];
try:
members = zoneArr[1].split('+');
except:
members=[]
zonesParam.append({'name':name, 'members':members})
i+=1
except:
raise Exception('Name was not provided for a zone');
deleteParam = dict()
deleteParam['san_zone']=zonesParam
print deleteParam
o = self.api('POST', URI_NETWORKSYSTEM_ZONES_REMOVE.format(uri,fabricId), deleteParam)
return self.api_sync_2(o['resource']['id'], o['op_id'], self.networksystem_show_task)
def zone_update(self, uri, fabricId, zones):
if (not zones):
raise Exception( 'No zones were provided')
elif (not fabricId):
raise Exception( 'fabricid or vsan was not provided');
else:
zonesArr = zones.split('#')
if ( len(zonesArr) <= 0):
raise Exception( 'No zones were provided')
else:
zonesParam=[]
i=0
for zone in zonesArr:
zoneArr = zone.split(',');
try:
name = zoneArr[0];
try:
addMembers = zoneArr[1].split('+');
except:
addMembers=[]
try:
removeMembers = zoneArr[2].split('+');
except:
removeMembers=[]
zonesParam.append({'name':name, 'add':addMembers, 'remove':removeMembers})
i+=1
except:
raise Exception('Name was not provided for a zone');
updateParam = dict()
updateParam['san_zone_update']=zonesParam
print updateParam
o = self.api('PUT', URI_NETWORKSYSTEM_ZONES.format(uri,fabricId), updateParam)
return self.api_sync_2(o['resource']['id'], o['op_id'], self.networksystem_show_task)
def zone_activate(self, uri, fabricId):
if (fabricId):
o = self.api('POST', URI_NETWORKSYSTEM_ZONES_ACTIVATE.format(uri,fabricId))
return self.api_sync_2(o['resource']['id'], o['op_id'], self.networksystem_show_task)
else:
raise Exception('fabricid was not provided')
def storagedevice_create(self, label, type, devip, devport, username, password,
serialno, smis_ip, smis_port, smisuser, smispw, smisssl, uri):
parms = { 'system_type' : type,
'ip_address' : devip,
'port_number' : devport,
'user_name' : username,
'password' : password,
'name' : label,
#'registration_mode' : registrationmode,
#'registration_status': registrationstatus,
}
if(serialno):
parms['serial_number'] = serialno
if(smis_ip):
parms['smis_provider_ip'] = smis_ip
if(smis_port):
parms['smis_port_number'] = smis_port
if (smisuser):
parms['smis_user_name'] = smisuser
if (smispw):
parms['smis_password'] = smispw
if (smisssl):
parms['smis_use_ssl'] = smisssl
o = self.api('POST', uri, parms)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.storagedevice_show_task)
return s
def storagedevice_discover_all(self, ignore_error):
o = self.api('POST', URI_STORAGEDEVICE_DISCOVERALL);
for task in o['task'] :
s=self.api_sync_2(task['resource']['id'],task['op_id'],self.storagedevice_show_task, ignore_error)
# self.pretty_print_json(s)
# Check if all the storagedevices come up as 'COMPLETED'
refs = self.storagedevice_list()
count=0
while True:
all_complete=True
for ref in refs:
ss = self.storagedevice_show(ref['id'])
# self.pretty_print_json(ss)
disc_status = ss['job_discovery_status']
if (disc_status != 'COMPLETE'):
all_complete=False
if (all_complete):
return "discovery is completed"
else:
# Timeout after some time
if (count > 180):
return "Timed out waiting for disovery to complete"
else:
time.sleep(10)
count = count + 1
return "discovery is completed"
def storagedevice_discover_namespace(self, native_guid, namespace, ignore_error):
if (self.__is_uri(native_guid)):
return name
systems = self.storagedevice_list()
for system in systems:
try:
storage_system = self.show_element(system['id'], URI_DISCOVERED_STORAGEDEVICE)
if (storage_system['native_guid'] == native_guid):
o = self.api('POST', URI_DISCOVERED_STORAGEDEVICE_NS.format(system['id'], namespace));
s=self.api_sync_2(o['resource']['id'],o['op_id'],self.storagedevice_show_task, ignore_error)
return "discovery of namespace is completed"
except KeyError:
print 'no name key'
raise Exception('bad storagedevice name: ' + native_guid)
def storagedevice_show(self, uri):
return self.api('GET', URI_STORAGEDEVICE.format(uri))
def storagedevice_update(self, uri, max_resources):
parms = dict()
parms['max_resources'] = max_resources
return self.api('PUT', URI_STORAGEDEVICE.format(uri), parms)
def storagedevice_show_task(self, device, task):
uri_device_task = URI_STORAGEDEVICE + '/tasks/{1}'
return self.api('GET', uri_device_task.format(device,task))
def storagedevice_delete(self, uri):
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_STORAGEDEVICE.format(uri)))
def storagedevice_deregister(self, uri):
return self.api('POST', URI_STORAGEDEVICE_DEREGISTER.format(uri))
def register_element(self, provideruri, systemuri, resourceuri):
return self.api('POST', resourceuri.format(provideruri, systemuri))
def storagedevice_refresh(self, uri, resourceuri):
return self.api('POST', resourceuri.format(uri))
def storagedevice_query(self, name):
if (self.__is_uri(name)):
return name
systems = self.storagedevice_list()
for system in systems:
try:
if (system['name'] == name):
return system['id']
except KeyError:
print 'no name key'
raise Exception('bad storagedevice name: ' + name)
def storagedevice_querybyIp(self, ip):
systems = self.discovered_storagedevice_list()
print 'SYSTEMS: ' + str(systems) + ' :SYSTEMS'
for system in systems:
try:
storage_system = self.show_element(system['id'], URI_DISCOVERED_STORAGEDEVICE)
print("Storage_System: " + str(storage_system) + " :Storage_System" )
if (storage_system['ip_address'] == ip):
return system['id']
except KeyError:
print 'no ip key'
raise Exception('bad ip: ' + ip)
def storagedevice_querybynativeguid(self, native_guid):
systems = self.discovered_storagedevice_list()
print 'SYSTEMS: ' + str(systems) + ' :SYSTEMS'
for system in systems:
try:
storage_system = self.show_element(system['id'], URI_DISCOVERED_STORAGEDEVICE)
print("Storage_System: " + str(storage_system) + " :Storage_System" )
if (storage_system['native_guid'] == native_guid):
return system['id']
except KeyError:
print 'no native_guid key'
raise Exception('bad native_guid: ' + native_guid)
def storagedevice_list(self):
o = self.api('GET', URI_STORAGEDEVICES)
if (not o):
return {};
systems = o['storage_system'];
if(type(systems) != list):
return [systems];
return systems;
def discovered_storagedevice_list(self):
o = self.api('GET', URI_DISCOVERED_STORAGEDEVICES)
if (not o):
return {};
systems = o['storage_system'];
if(type(systems) != list):
return [systems];
return systems;
def storagesystem_bulkgetids(self):
ids = self.__storagesystem_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def storagesystem_bulkpost(self, ids):
return self.__storagesystem_bulkget_reps(ids)
def __storagesystem_bulkget_ids(self):
return self.api('GET', URI_STORAGESYSTEMS_BULKGET)
def __storagesystem_bulkget_reps(self, ids):
return self.api('POST', URI_STORAGESYSTEMS_BULKGET, ids)
def storageport_bulkgetids(self):
ids = self.__storageport_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def storageport_bulkpost(self, ids):
return self.__storageport_bulkget_reps(ids)
def __storageport_bulkget_ids(self):
return self.api('GET', URI_STORAGEPORTS_BULKGET)
def __storageport_bulkget_reps(self, ids):
return self.api('POST', URI_STORAGEPORTS_BULKGET, ids)
def storagepool_bulkgetids(self):
ids = self.__storagepool_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def storagepool_bulkpost(self, ids):
return self.__storagepool_bulkget_reps(ids)
def __storagepool_bulkget_ids(self):
return self.api('GET', URI_STORAGEPOOLS_BULKGET)
def __storagepool_bulkget_reps(self, ids):
return self.api('POST', URI_STORAGEPOOLS_BULKGET, ids)
def transportzone_bulkgetids(self):
ids = self.__transportzone_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def transportzone_bulkpost(self, ids):
return self.__transportzone_bulkget_reps(ids)
def __transportzone_bulkget_ids(self):
return self.api('GET', URI_NETWORKS_BULKGET)
def __transportzone_bulkget_reps(self, ids):
return self.api('POST', URI_NETWORKS_BULKGET, ids)
def storagepool_update(self, pooluri, nhadds, nhrems, max_resources):
parms = dict()
nhassignments = dict();
parms['varray_assignment_changes'] = nhassignments
if (nhadds):
nh = dict();
nh['varrays'] = nhadds
nhassignments['add'] = nh;
if (nhrems):
nh = dict();
nh['varrays'] = nhrems
nhassignments['remove'] = nh;
if (max_resources):
parms['max_resources'] = max_resources
return self.api('PUT', URI_STORAGEPOOL_UPDATE.format(pooluri), parms)
def storagepool_register(self, systemuri, pooluri):
return self.api('POST', URI_STORAGEPOOL_REGISTER.format(systemuri, pooluri))
def storagepool_deregister(self, name):
#
# name = { pool_uri | concat(storagedevice, label) }
#
try:
(sdname, label) = name.split('/', 1)
except:
return name
sduri = self.storagedevice_query(sdname)
pools = self.storagepool_list(sduri)
for pool in pools:
pool = self.storagepool_show(sduri, pool['id'])
if (pool['native_guid'] == label):
return self.api('POST', URI_STORAGEPOOL_DEREGISTER.format(pool['id']))
raise Exception('bad storagepool name')
def storagepool_show(self, systemuri, pooluri):
return self.api('GET', URI_STORAGEPOOL_SHOW.format(systemuri, pooluri))
def storagepool_query(self, name):
#
# name = { pool_uri | concat(storagedevice, label) }
#
try:
(sdname, label) = name.split('/', 1)
except:
return name
sduri = self.storagedevice_query(sdname)
pools = self.storagepool_list(sduri)
for pool in pools:
pool = self.storagepool_show(sduri, pool['id'])
if (pool['native_guid'] == label):
return pool['id']
raise Exception('bad storagepool name')
def storagepool_list(self, uri):
o = self.api('GET', URI_STORAGEPOOLS.format(uri))
if (not o):
return {};
else:
return o['storage_pool']
def list_elements(self, uri, resourceuri):
o = self.api('GET', resourceuri.format(uri))
if (not o):
return {};
elif (isinstance(o['id'], str)):
return [o['id']]
else:
return o['id']
def list_poolsbycos(self, uri, resourceuri):
o = self.api('GET', resourceuri.format(uri))
return o
# to get list of storagesystems
def list_discovered_elements(self, resourceuri):
o = self.api('GET', resourceuri)
if (not o):
return {};
elif (isinstance(o['id'], str)):
return [o['id']]
else:
return o['id']
def show_element(self, uri, resourceuri):
return self.api('GET', resourceuri.format(uri))
#
#Disaster Recovery APIs
#
def dr_add_standby(self, name, description, vip, username, password):
parms = {
'name' : name,
'description' : description,
'vip' : vip,
'username' : username,
'password' : password
}
print "DR ADD STANDBY Params = ", parms
resp = self.api('POST', URI_DR, parms, {})
print "DR ADD STANDBY RESP = ", resp
self.assert_is_dict(resp)
return resp
def dr_list_standby(self):
resp = self.api('GET', URI_DR)
print "DR LIST STANDBY RESP = ",resp
self.assert_is_dict(resp)
return resp
def dr_get_standby(self,uuid):
resp = self.api('GET', URI_DR_GET.format(uuid))
print "DR GET STANDBY RESP = ",resp
self.assert_is_dict(resp)
return resp
def dr_get_standby_details(self,uuid):
resp = self.api('GET', URI_DR_GET_DETAILS.format(uuid))
print "DR GET STANDBY DETAILS RESP = ",resp
self.assert_is_dict(resp)
return resp
def dr_delete_standby(self,uuid):
resp = self.api('DELETE', URI_DR_DELETE.format(uuid))
print "DR DELETE STANDBY RESP = ",resp
return resp
def dr_switchover(self,uri):
resp = self.api('POST', URI_DR_SWITCHOVER.format(uri))
print "DR SWITCHOVER RESP = ",resp
def dr_pause_standby(self,uuid):
resp = self.api('POST', URI_DR_PAUSE.format(uuid))
print "DR PAUSE STANDBY RESP = ",resp
return resp
def dr_resume_standby(self,uuid):
resp = self.api('POST', URI_DR_RESUME.format(uuid))
print "DR RESUME STANDBY RESP = ",resp
self.assert_is_dict(resp)
return resp
def dr_failover(self,uuid):
resp = self.api('POST', URI_DR_FAILOVER.format(uuid))
print "DR FAILOVER RESP = ",resp
return resp
#
# IPsec APIs
#
def ipsec_rotate_key(self):
resp = self.api('POST', URI_IPSEC_KEY)
return resp
def ipsec_check(self):
resp = self.api('GET', URI_IPSEC)
return resp
def ipsec_change_status(self,status):
resp = self.api('POST', URI_IPSEC_STATUS.format(status))
return resp
#
#VDC APIs
#
def vdc_show(self, uri):
return self.api('GET', URI_VDC_GET.format(uri))
def vdc_query(self, name):
if (self.__is_uri(name)):
return name
vdcs = vdc_list()
for vdc in vdcs:
if vdc["name"] == name:
return vdc["id"]
raise Exception('bad vdc name ' + name)
def vdc_show_task(self, vdc, task):
uri_vdc_task = URI_VDC_GET + '/tasks/{1}'
result = self.api('GET', uri_vdc_task.format(vdc, task))
if isinstance(result, str) or isinstance(result, basestring):
raise requests.exceptions.ConnectionError("unexpected error")
return result
def vdc_add(self, name, endpoint, key, certificate_chain, dataEndpoint=None, cmdEndpoint=None):
parms = {
'name' : name,
'api_endpoint' : endpoint,
'secret_key' : key,
'certificate_chain' : certificate_chain,
}
if dataEndpoint:
parms['geo_data_endpoint'] = dataEndpoint
if cmdEndpoint:
parms['geo_command_endpoint'] = cmdEndpoint
print "VDC ADD Params = ", parms
resp = self.api('POST', URI_VDC, parms, {})
print "VDC ADD RESP = ", resp
self.assert_is_dict(resp)
result = self.api_sync_2(resp['resource']['id'], resp['op_id'], self.vdc_show_task)
return result
def vdc_update(self, id, name, dataEndpoint=None, cmdEndpoint=None):
parms = {
'name' : name,
}
if dataEndpoint:
parms['geo_data_endpoint'] = dataEndpoint
if cmdEndpoint:
parms['geo_command_endpoint'] = cmdEndpoint
print "VDC UPDATE Params = ", parms
resp = self.api('PUT', URI_VDC_GET.format(id), parms, {})
print "VDC UPDATE RESP = ", resp
self.assert_is_dict(resp)
result = self.api_sync_2(resp['resource']['id'], resp['op_id'], self.vdc_show_task)
return result
def vdc_del(self, id):
print "VDC DEL id = ", id
resp = self.api('DELETE', URI_VDC_GET.format(id))
print "VDC DEL RESP = ", resp
self.assert_is_dict(resp)
result = self.api_sync_2(resp['resource']['id'], resp['op_id'], self.vdc_show_task)
return result
def vdc_disconnect(self, id):
print "VDC DIS id = ", id
resp = self.api('POST', URI_VDC_DISCONNECT_POST.format(id))
print "VDC DIS RESP = ", resp
self.assert_is_dict(resp)
result = self.api_sync_2(resp['resource']['id'], resp['op_id'], self.vdc_show_task)
return result
def vdc_reconnect(self, id):
print "VDC REC id = ", id
resp = self.api('POST', URI_VDC_RECONNECT_POST.format(id))
print "VDC REC RESP = ", resp
self.assert_is_dict(resp)
result = self.api_sync_2(resp['resource']['id'], resp['op_id'], self.vdc_show_task)
return result
def vdc_get_id(self, vdcname):
vdclist = self.vdc_list()
for eachvdc in vdclist:
for detail in vdclist[eachvdc]:
if(detail['name'] == vdcname):
return detail['id']
return None
def vdc_list(self):
resp = self.api('GET', URI_VDC)
return resp
def vdc_get_secret_key(self):
o = self.api('GET', URI_VDC_SECRETKEY)
return o['secret_key']
def vdc_get_certchain(self):
o = self.api('GET', URI_VDC_CERTCHAIN)
return o['chain']
def vdc_get_local(self):
vdclist = self.vdc_list()
for eachvdc in vdclist:
for detail in vdclist[eachvdc]:
vdc = self.vdc_show(detail['id'])
if vdc["local"] == True:
return detail['id']
return None
def obj_add_key_cert_pair(self, key, certificate_chain):
parms = {
'system_selfsigned' : False,
'key_and_certificate' : {
'private_key' : key,
'certificate_chain' : certificate_chain
}
}
resp = self.api('PUT', URI_OBJ_CERT, parms, {})
print "VDC ADD OBJ CERT = ", resp
return resp
def obj_generate_key_cert_pair(self, ips):
parms = {
'system_selfsigned' : True
}
if(ips != None):
parms = {
'system_selfsigned' : True,
'ip_addresses' : ips
}
resp = self.api('PUT', URI_OBJ_CERT, parms, {})
print "VDC ADD OBJ CERT = ", resp
return resp
def obj_get_certChain(self):
o = self.api('GET', URI_OBJ_CERT)
return o['chain']
def tirgger_node_recovery(self):
return self.api('POST', URI_RECOVERY)
def get_recovery_status(self):
return self.api('GET', URI_RECOVERY)
def create_backup(self,name):
return self.api('POST', URI_BACKUP_CREATE.format(name))
def delete_backup(self,name):
return self.api('DELETE', URI_BACKUP_DELETE.format(name))
def list_backup(self):
return self.api('GET', URI_BACKUP_LIST)
def list_external_backup(self):
return self.api('GET', URI_BACKUP_LIST_EXTERNAL)
def download_backup(self,name):
return self.api('GET', URI_BACKUP_DOWNLOAD.format(name), None, None, content_type=CONTENT_TYPE_OCTET)
def upload_backup(self,name):
return self.api('POST', URI_BACKUP_UPLOAD.format(name), None, None, content_type=CONTENT_TYPE_OCTET)
def query_upload_backup(self,name):
return self.api('GET', URI_BACKUP_QUERY_UPLOAD.format(name))
def query_backup_info(self,name,isLocal):
return self.api('GET', URI_BACKUP_QUERY_INFO.format(name, isLocal))
def pull_backup(self,name):
return self.api('POST', URI_BACKUP_PULL.format(name), None, None, content_type=CONTENT_TYPE_OCTET)
def query_pull_backup(self,name,isLocal):
return self.api('GET', URI_BACKUP_QUERY_PULL.format(name, isLocal))
def restore_backup(self,name,isLocal,password):
return self.api('POST', URI_BACKUP_RESTORE.format(name, isLocal, password))
def get_db_repair_status(self):
return self.api('GET', URI_DB_REPAIR)
def task_delete(self,uri):
return self.api('POST', URI_TASK_DELETE.format(uri))
def task_list(self):
return self.api('GET', URI_TASK_LIST)
def task_list_system(self):
return self.api('GET', URI_TASK_LIST_SYSTEM)
def task_show(self, uri):
return self.api('GET', URI_TASK_GET.format(uri))
def task_follow(self, uri):
return self.api_sync_4(uri, self.task_show)
def volume_list(self, project):
puri = self.project_query(project)
puri = puri.strip()
results = self.volume_search(None, puri)
return results['resource']
def volume_bulkget(self):
ids = self.__volume_bulkget_ids()
# retrieve the first 10 volumes only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return self.__volume_bulkget_reps(chunk)
def __volume_bulkget_ids(self):
return self.api('GET', URI_VOLUME_BULKGET)
def __volume_bulkget_reps(self, ids):
return self.api('POST', URI_VOLUME_BULKGET, ids)
def volume_bulkgetids(self):
ids = self.__volume_bulkget_ids()
# retrieve the first 10 volumes only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def volume_bulkpost(self, ids):
return self.api('POST', URI_VOLUME_BULKGET, ids)
def get_ids_chunk(self, ids, start, end):
chunk = { 'id' : ids[start:end] }
#self.pretty_print_json(chunk)
return chunk
def volume_show(self, uri):
return self.api('GET', URI_VOLUME.format(uri))
def volume_show_task(self, vol, task):
uri_vol_task = URI_VOLUME + '/tasks/{1}'
return self.api('GET', uri_vol_task.format(vol, task))
def volume_exports(self, uri):
return self.api('GET', URI_VOLUMES_EXPORTS.format(uri))
def volume_create(self, label, project, neighborhood, cos, size, isThinVolume, count, protocols, protection, consistencyGroup, computeResource, portgroup):
parms = {
'name' : label,
'varray' : neighborhood,
'project' : project,
'vpool' : cos,
'size' : size,
'count' : count,
}
if (protocols):
parms['protocols'] = {'protocol' : protocols}
if (consistencyGroup != ''):
parms['consistency_group'] = consistencyGroup
if (computeResource):
parms['computeResource'] = computeResource
if (portgroup):
parms['port_group'] = portgroup
print "VOLUME CREATE Params = ", parms
resp = self.api('POST', URI_VOLUME_LIST, parms, {})
print "RESP = ", resp
self.assert_is_dict(resp)
tr_list = resp['task']
#print 'DEBUG : debug operation for volume : ' + o['resource']['id']
print tr_list
result = list()
for tr in tr_list:
s = self.api_sync_2(tr['resource']['id'], tr['op_id'], self.volume_show_task)
result.append(s)
return result
def volume_add_journal(self, copyName, project, neighborhood, cos, size, count, consistencyGroup):
parms = {
'name' : copyName,
'varray' : neighborhood,
'project' : project,
'vpool' : cos,
'size' : size,
'count' : count,
'consistency_group' : consistencyGroup,
}
print "ADD JOURNAL Params = ", parms
resp = self.api('POST', URI_ADD_JOURNAL, parms, {})
print "RESP = ", resp
self.assert_is_dict(resp)
tr_list = resp['task']
#print 'DEBUG : debug operation for volume : ' + o['resource']['id']
print tr_list
result = list()
for tr in tr_list:
s = self.api_sync_2(tr['resource']['id'], tr['op_id'], self.volume_show_task)
result.append(s)
return result
def volume_full_copy(self, label, sourceVolume, count, createInactive):
parms = {
'name' : label,
'count': count,
'create_inactive': createInactive
}
resp = self.api('POST', URI_VOLUME_FULL_COPY.format(sourceVolume), parms, {})
self.assert_is_dict(resp)
tr_list = resp['task']
#print 'DEBUG : debug operation for volume : ' + o['resource']['id']
print tr_list
result = list()
for tr in tr_list:
s = self.api_sync_2(tr['resource']['id'], tr['op_id'], self.volume_show_task)
result.append(s)
return result
def volume_full_copy_resync(self, fullCopyVolume):
resp = self.api('POST', URI_FULL_COPY_RESYNC.format(fullCopyVolume), {}, {})
self.assert_is_dict(resp)
tr_list = resp['task']
print tr_list
result = list()
for tr in tr_list:
s = self.api_sync_2(tr['resource']['id'], tr['op_id'], self.volume_show_task)
result.append(s)
return result
def volume_full_copy_restore(self, fullCopyVolume):
resp = self.api('POST', URI_FULL_COPY_RESTORE.format(fullCopyVolume), {}, {})
self.assert_is_dict(resp)
tr_list = resp['task']
print tr_list
result = list()
for tr in tr_list:
s = self.api_sync_2(tr['resource']['id'], tr['op_id'], self.volume_show_task)
result.append(s)
return result
def volume_activate(self, sourceVolume, fullCopyVolume):
resp = self.api('POST', URI_VOLUME_FULL_COPY_ACTIVATE.format(sourceVolume, fullCopyVolume), {}, {})
self.assert_is_dict(resp)
result = self.api_sync_2(resp['resource']['id'], resp['op_id'], self.volume_show_task)
return result
def volume_full_copy_check_progress(self, sourceVolume, fullCopyVolume):
resp = self.api('POST', URI_VOLUME_FULL_COPY_CHECK_PROGRESS.format(sourceVolume, fullCopyVolume), {}, {})
self.assert_is_dict(resp)
return resp
def volume_detach(self, sourceVolume, fullCopyVolume):
resp = self.api('POST', URI_VOLUME_FULL_COPY_DETACH.format(sourceVolume, fullCopyVolume), {}, {})
self.assert_is_dict(resp)
try:
result = self.api_sync_2(resp['resource']['id'], resp['op_id'], self.volume_show_task)
except:
print resp
return result
def volume_full_copies(self, uri):
return self.api('GET', URI_VOLUME_FULL_COPY.format(uri))
def volume_change_cos(self, uris, cos_uri, cg_uri, suspend):
dosuspend='false'
if (suspend):
dosuspend=suspend
ids = []
if (type(uris) is list):
for u in uris:
ids.append(u)
else:
ids.append(uris)
params = {}
params['volumes'] = ids
params['vpool'] = cos_uri
params['consistency_group'] = cg_uri
params['migration_suspend_before_commit'] = dosuspend
params['migration_suspend_before_delete_source'] = dosuspend
posturi = URI_VOLUME_CHANGE_VPOOL
resp = self.api('POST', posturi, params, {})
self.assert_is_dict(resp)
tr_list = resp['task']
result = list()
for tr in tr_list:
s = self.api_sync_2(tr['resource']['id'], tr['op_id'], self.volume_show_task)
result.append(s)
return result
def volume_change_cos_matches(self, uri):
tr = self.api('GET', URI_VOLUME_CHANGE_VPOOL_MATCH.format(uri))
return tr
def volume_change_nh(self, uri, nh_uri):
parms = {
'varray' : nh_uri,
}
tr = self.api('PUT', URI_VOLUME_CHANGE_VARRAY.format(uri), parms, {})
self.assert_is_dict(tr)
result = self.api_sync_2(tr['resource']['id'], tr['op_id'], self.volume_show_task)
return result
def volume_expand(self, uri, size):
parms = {
'new_size' : size,
}
tr = self.api('POST', URI_VOLUME_EXPAND.format(uri), parms)
self.assert_is_dict(tr)
result = self.api_sync_2(tr['resource']['id'], tr['op_id'], self.volume_show_task)
return result
def volume_change_link(self, uri, operation, copy_uri, type, am, pit):
copies_param = dict()
copy = dict()
copy_entries = []
copy['copyID'] = copy_uri
copy['type'] = type
if (pit):
copy['pointInTime'] = pit
if (am):
copy['accessMode'] = am
copy_entries.append(copy)
copies_param['copy'] = copy_entries
o = self.api('POST', URI_VOLUME_CHANGE_LINK.format(uri) + "/" + operation, copies_param)
self.assert_is_dict(o)
s = self.api_sync_2(o['task'][0]['resource']['id'], o['task'][0]['op_id'], self.volume_show_task)
m = s['message']
return (s, m)
def volume_verify(self, uri, field, value):
o = self.api('GET', URI_VOLUME.format(uri))
self.assert_is_dict(o)
foundValue = 'N/A';
if field == 'personality':
personality = 'N/A'
if 'protection' in o.keys():
if 'recoverpoint' in o['protection'].keys():
personality = o['protection']['recoverpoint']['personality'];
if 'srdf' in o['protection'].keys():
personality = o['protection']['srdf']['personality'];
if personality == value:
return;
if field in o.keys():
foundValue = o[field];
if o[field] == value:
return;
elif value == "none":
return;
print 'ERROR: Volume field FAILED Verfication: ' + field + ' IS: ' + foundValue + ', SHOULD BE: ' + value;
return -1;
def volume_delete(self, uri, wait, vipronly, force):
s = ""
m = ""
posturi = URI_RESOURCE_DEACTIVATE.format(URI_VOLUME.format(uri))
if (vipronly):
posturi = posturi + '?type=VIPR_ONLY'
if (force):
posturi = posturi + '&force=TRUE'
elif (force):
posturi = posturi + '?force=TRUE'
o = self.api('POST', posturi)
if (wait):
self.assert_is_dict(o)
try:
sync = self.api_sync_2(o['resource']['id'], o['op_id'], self.volume_show_task)
s = sync['state']
m = sync['message']
except:
print o
return (o, s, m)
def volume_multi_delete(self, uris, wait, vipronly, force):
params = {}
ids = []
if (type(uris) is list):
for u in uris:
ids.append(u)
else:
ids.append(uris)
params['id'] = ids
s = ""
m = ""
posturi = URI_VOLUMES_DEACTIVATE
if (vipronly):
posturi = posturi + '?type=VIPR_ONLY'
if (force):
posturi = posturi + '&force=TRUE'
elif (force):
posturi = posturi + '?force=TRUE'
o = self.api('POST', posturi, params)
if (wait):
self.assert_is_dict(o)
tr_list = o['task']
for tr in tr_list:
sync = self.api_sync_2(tr['resource']['id'], tr['op_id'], self.volume_show_task)
s = sync['state']
m = sync['message']
return (o, s, m)
def volume_query(self, name):
if (self.__is_uri(name)):
return name
if (len(name.split('/')) == 3):
(voluri, uri) = self.block_mirror_query(name)
if (uri == None):
return self.block_snapshot_query(name)
else:
return uri
(pname, label) = name.rsplit('/', 1)
puri = self.project_query(pname)
puri = puri.strip()
results = self.volume_search(None, puri)
resources = results['resource']
for resource in resources:
if (resource['match'] == label):
return resource['id']
raise Exception('bad volume name ' + name)
def volume_search(self, name, project=None, tag=None, wwn=None):
if (self.__is_uri(name)):
return name
if (wwn):
return self.api('GET', URI_VOLUMES_SEARCH_WWN.format(wwn))
if (tag):
return self.api('GET', URI_VOLUMES_SEARCH_TAG.format(tag))
if (name):
if (project):
return self.api('GET', URI_VOLUMES_SEARCH_PROJECT_NAME.format(project,name))
else:
return self.api('GET', URI_VOLUMES_SEARCH_NAME.format(name))
if (project):
return self.api('GET', URI_VOLUMES_SEARCH_PROJECT.format(project))
def volume_name(self, uri):
volume = self.volume_show(uri)
return volume['name']
def migration_create(self, volume_uri, source_system_uri, target_system_uri, vpool_uri):
parms = {
'volume' : volume_uri,
'source_storage_system' : source_system_uri,
'target_storage_system' : target_system_uri,
}
if (vpool_uri):
parms['vpool'] = vpool_uri
resp = self.api('POST', URI_MIGRATIONS, parms, {})
self.assert_is_dict(resp)
print resp
s = self.api_sync_2(resp['resource']['id'], resp['op_id'], self.volume_show_task)
return s
def migration_query(self, name):
if (self.__is_uri(name)):
return name
migrations = self.migration_list()
for migration in migrations:
try:
if (migration['name'] == name):
return migration['id']
except KeyError:
print 'no name key'
raise Exception('Bad migration name: ' + name)
def migration_list(self):
migrationlist = self.api('GET', URI_MIGRATIONS)
if (not migrationlist):
return {}
migrations = migrationlist['block_migration']
if (type(migrations) != list):
return [migrations]
return migrations
def migration_show(self, uri):
return self.api('GET', URI_MIGRATION.format(uri))
#
# Block Mirror
#
def block_mirror_show_task(self, volume, mirror, task):
return self.api('GET', URI_BLOCK_MIRROR_TASKS.format(volume, mirror, task))
def block_mirror_show(self, volume, mirror):
return self.api('GET', URI_BLOCK_MIRRORS_READ.format(volume, mirror))
def block_mirror_list(self, volume):
vuri = self.volume_query(volume)
vuri = vuri.strip()
o = self.api('GET', URI_BLOCK_MIRRORS_LIST.format(vuri))
print "Mirror list response: " + str(o)
self.assert_is_dict(o)
blkmirrors = o['mirror']
ids = []
if (not o):
return {}
else :
if (type(blkmirrors) != list):
blkmirrors = [blkmirrors]
for blkmirror in blkmirrors:
ids.append(blkmirror.get('id'))
return ids
def block_mirror_query(self, name):
if (self.__is_uri(name)):
return name
(sname, label) = name.rsplit('/', 1)
furi = self.volume_query(sname)
furi = furi.strip()
return (furi, self.block_mirror_get_id_by_name(furi,label))
raise Exception('bad mirror name')
def block_mirror_get_id_by_name(self, volume, name):
vuri = self.volume_query(volume)
vuri = vuri.strip()
o = self.api('GET', URI_BLOCK_MIRRORS_LIST.format(vuri))
self.assert_is_dict(o)
blkmirrors = o['mirror']
ids = []
if (not o):
return {}
else :
if (type(blkmirrors) != list):
blkmirrors = [blkmirrors]
for blkmirror in blkmirrors:
print 'The requested name : ' + name
if(name == blkmirror.get('name')):
print 'The selected id : ' + blkmirror.get('id')
return blkmirror.get('id')
def block_mirror_pause_all(self, volume):
copies_param = dict()
copy = dict()
copy_entries = []
copy['type'] = "native"
copy_entries.append(copy)
copies_param['copy'] = copy_entries
o = self.api('POST', URI_BLOCK_MIRRORS_PAUSE_ALL.format(volume), copies_param)
self.assert_is_dict(o)
print "MIRROR_PAUSE_RESP: " + str(o)
s = self.api_sync_2(o['task'][0]['resource']['id'], o['task'][0]['op_id'], self.volume_show_task)
return (o, s['state'], s['message'])
def block_mirror_resume_all(self, volume):
copies_param = dict()
copy = dict()
copy_entries = []
copy['type'] = "native"
copy_entries.append(copy)
copies_param['copy'] = copy_entries
o = self.api('POST', URI_BLOCK_MIRRORS_RESUME_ALL.format(volume), copies_param)
self.assert_is_dict(o)
print "MIRROR_RESUME_RESP: " + str(o)
s = self.api_sync_2(o['task'][0]['resource']['id'], o['task'][0]['op_id'], self.volume_show_task)
return (o, s['state'], s['message'])
def block_mirror_deactivate(self, volume_uri, mirror_uri):
copies_param = dict()
copy = dict()
copy_entries = []
copy['type'] = "native"
copy['copyID'] = mirror_uri
copy_entries.append(copy)
copies_param['copy'] = copy_entries
resp= self.api('POST', URI_BLOCK_MIRRORS_DEACTIVATE.format(volume_uri), copies_param)
self.assert_is_dict(resp)
print "RESP = ", resp
tr_list = resp['task']
print tr_list
result = list()
for tr in tr_list:
s = self.api_sync_2(tr['resource']['id'], tr['op_id'], self.volume_show_task)
result.append(s)
return result
def block_mirror_attach(self, volume, label, count):
copies_param = dict()
copy = dict()
copy_entries = []
copy['name'] = label
copy['count'] = count
copy['type'] = "native"
copy_entries.append(copy)
copies_param['copy'] = copy_entries
resp = self.api('POST', URI_BLOCK_MIRRORS_ATTACH.format(volume), copies_param)
self.assert_is_dict(resp)
print "RESP = ", resp
tr_list = resp['task']
print tr_list
result = list()
for tr in tr_list:
s = self.api_sync_2(tr['resource']['id'], tr['op_id'], self.volume_show_task)
result.append(s)
return result
def block_mirror_detach_all(self, volume):
copies_param = dict()
copy = dict()
copy_entries = []
copy['type'] = "native"
copy_entries.append(copy)
copies_param['copy'] = copy_entries
resp = self.api('POST', URI_BLOCK_MIRRORS_DETACH_ALL.format(volume), copies_param)
self.assert_is_dict(resp)
print "RESP = ", resp
tr_list = resp['task']
print tr_list
result = list()
for tr in tr_list:
s = self.api_sync_2(tr['resource']['id'], tr['op_id'], self.volume_show_task)
result.append(s)
return result
#
# Block Consistency Groups
#
def block_consistency_group_create(self, project, label, arrayconsistency):
arrayconsistencyvalue = "false"
if (arrayconsistency):
arrayconsistencyvalue=arrayconsistency
parms = {
'name' : label,
'project' : project,
'array_consistency' : arrayconsistencyvalue,
}
return self.api('POST', URI_BLOCK_CONSISTENCY_GROUP_CREATE, parms)
def block_consistency_group_show_task(self, group, task):
return self.api('GET', URI_BLOCK_CONSISTENCY_GROUP_TASKS.format(group, task))
def block_consistency_group_show(self, group):
return self.api('GET', URI_BLOCK_CONSISTENCY_GROUP.format(group))
def block_consistency_group_query(self, name):
if (self.__is_uri(name)):
return name
return (self.block_consistency_group_get_id_by_name(name))
raise Exception('bad consistency group name')
def block_consistency_group_get_id_by_name(self, name):
print 'The requested name : ' + name
resource = self.search('block_consistency_group', None, name, None, False)
for consistencyGroup in resource:
if (consistencyGroup.get('match') == name):
return consistencyGroup.get('id')
raise Exception('bad consistency group name')
def block_consistency_group_delete(self, group_uri, vipronly):
posturi = URI_BLOCK_CONSISTENCY_GROUP_DELETE.format(group_uri)
if (vipronly):
posturi = posturi + '?type=VIPR_ONLY'
o = self.api('POST', posturi);
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.block_consistency_group_show_task)
return (o, s)
def block_consistency_group_update(self, group, add, remove):
to_add = {}
to_remove = {}
if (add):
volumes = []
for vol in add.split(','):
volumes.append(self.volume_query(vol))
to_add['volume'] = volumes
if (remove):
volumes = []
for vol in remove.split(','):
volumes.append(self.volume_query(vol))
to_remove['volume'] = volumes
update_params = {}
update_params['add_volumes'] = to_add
update_params['remove_volumes'] = to_remove
o = self.api('PUT', URI_BLOCK_CONSISTENCY_GROUP.format(group), update_params)
self.assert_is_dict(o)
if ('op_id' in o):
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.block_consistency_group_show_task)
else:
s = o['details']
return s
def block_consistency_group_bulkgetids(self):
ids = self.__block_consistency_group_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def block_consistency_group_bulkpost(self, ids):
return self.__block_consistency_group_bulkget_reps(ids)
def __block_consistency_group_bulkget_ids(self):
return self.api('GET', URI_BLOCK_CONSISTENCY_GROUP_BULK)
def __block_consistency_group_bulkget_reps(self, ids):
return self.api('POST', URI_BLOCK_CONSISTENCY_GROUP_BULK, ids)
def block_consistency_group_snapshot_create(self, group, label, createInactive):
parms = {
'name' : label,
'create_inactive' : createInactive,
}
o = self.api('POST', URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_CREATE.format(group), parms)
self.assert_is_dict(o)
snapshots = o['task']
id = ''
task_id = ''
if (not o):
return {}
else :
if (type(snapshots) != list):
snapshots = [snapshots]
for snap in snapshots:
id = snap['resource']['id']
task_id = snap['op_id']
s = self.api_sync_2(id, task_id, self.block_consistency_group_show_task)
return (o, s)
def block_consistency_group_swap(self, group, copyType, targetVarray):
copies_param = dict()
copy = dict()
copy_entries = []
copy['type'] = copyType
copy['copyID'] = targetVarray
copy_entries.append(copy)
copies_param['copy'] = copy_entries
o = self.api('POST', URI_BLOCK_CONSISTENCY_GROUP_SWAP.format(group), copies_param )
self.assert_is_dict(o)
if ('task' in o):
tasks = []
for task in o['task']:
s = self.api_sync_2(task['resource']['id'], task['op_id'], self.block_consistency_group_show_task)
tasks.append(s)
s = tasks
else:
s = o['details']
return s
def block_consistency_group_accessmode(self, group, copyType, targetVarray, am):
copies_param = dict()
copy = dict()
copy_entries = []
copy['type'] = copyType
copy['copyID'] = targetVarray
copy['accessMode'] = am
copy_entries.append(copy)
copies_param['copy'] = copy_entries
o = self.api('POST', URI_BLOCK_CONSISTENCY_GROUP_ACCESS_MODE.format(group), copies_param )
self.assert_is_dict(o)
if ('task' in o):
tasks = []
for task in o['task']:
s = self.api_sync_2(task['resource']['id'], task['op_id'], self.block_consistency_group_show_task)
tasks.append(s)
s = tasks
else:
s = o['details']
return s
def block_consistency_group_failover(self, group, copyType, targetVarray, pit):
copies_param = dict()
copy = dict()
copy_entries = []
copy['type'] = copyType
copy['copyID'] = targetVarray
if (pit):
copy['pointInTime'] = pit
copy_entries.append(copy)
copies_param['copy'] = copy_entries
o = self.api('POST', URI_BLOCK_CONSISTENCY_GROUP_FAILOVER.format(group), copies_param )
self.assert_is_dict(o)
if ('task' in o):
tasks = []
for task in o['task']:
s = self.api_sync_2(task['resource']['id'], task['op_id'], self.block_consistency_group_show_task)
tasks.append(s)
s = tasks
else:
s = o['details']
return s
def block_consistency_group_failover_cancel(self, group, copyType, targetVarray):
copies_param = dict()
copy = dict()
copy_entries = []
copy['type'] = copyType
copy['copyID'] = targetVarray
copy_entries.append(copy)
copies_param['copy'] = copy_entries
o = self.api('POST', URI_BLOCK_CONSISTENCY_GROUP_FAILOVER_CANCEL.format(group), copies_param )
self.assert_is_dict(o)
if ('task' in o):
tasks = []
for task in o['task']:
s = self.api_sync_2(task['resource']['id'], task['op_id'], self.block_consistency_group_show_task)
tasks.append(s)
s = tasks
else:
s = o['details']
return s
def block_consistency_group_snapshot_show_task(self, group, snapshot, task):
return self.api('GET', URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_TASKS.format(group, snapshot, task))
def block_consistency_group_snapshot_show(self, group, snapshot):
return self.api('GET', URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT.format(group, snapshot))
def block_consistency_group_snapshot_query(self, group, name):
if (self.__is_uri(name)):
return name
return (self.block_consistency_group_snapshot_get_id_by_name(group, name))
raise Exception('bad consistency group snapshot name')
def block_consistency_group_snapshot_list(self, group):
groupId = self.block_consistency_group_query(group)
groupId = groupId.strip()
o = self.api('GET', URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_LIST.format(groupId))
self.assert_is_dict(o)
snapshots = o['snapshot']
ids = []
if (not o):
return {}
else :
if (type(snapshots) != list):
snapshots = [snapshots]
for snapshot in snapshots:
ids.append(snapshot.get('id'))
return ids
def block_consistency_group_snapshot_get_id_by_name(self, group, name):
groupid = self.block_consistency_group_query(group)
groupid = groupid.strip()
o = self.api('GET', URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_LIST.format(groupid))
self.assert_is_dict(o)
snapshots = o['snapshot']
ids = []
if (not o):
return {}
else :
if (type(snapshots) != list):
snapshots = [snapshots]
print 'The requested consistency group snapshot name : ' + name
for snapshot in snapshots:
if(name == snapshot.get('name')):
print 'The selected id : ' + snapshot.get('id')
return snapshot.get('id')
def block_consistency_group_snapshot_activate(self, group, snapshot):
o = self.api('POST', URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_ACTIVATE.format(group, snapshot))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.block_consistency_group_show_task)
return (o, s)
def block_consistency_group_snapshot_deactivate(self, group, snapshot):
o = self.api('POST', URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_DEACTIVATE.format(group, snapshot))
self.assert_is_dict(o)
tasks = []
for task in o['task']:
s = self.api_sync_2(task['resource']['id'], task['op_id'], self.block_consistency_group_show_task)
tasks.append(s)
return tasks
def block_consistency_group_snapshot_restore(self, group, snapshot):
o = self.api('POST', URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_RESTORE.format(group, snapshot))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.block_consistency_group_show_task)
return (o, s)
#
# varray APIs
#
def neighborhood_list(self):
o = self.api('GET', URI_VARRAYS)
if (not o):
return {};
else:
return o['varray']
def neighborhood_storageports(self, uri):
o = self.api('GET', URI_VARRAY_PORTS.format(uri))
if (not o):
return {};
else:
return o['storage_port']
def neighborhood_name(self, uri):
n = self.neighborhood_show(uri)
return n['name']
def neighborhood_show(self, uri):
return self.api('GET', URI_VARRAY.format(uri))
def neighborhood_create(self, label, autoSanZoning, protectionType):
req = dict()
req['name'] = label
if (autoSanZoning):
req['auto_san_zoning'] = autoSanZoning
if (protectionType):
req['protection_type'] = protectionType
return self.api('POST', URI_VARRAYS, req)
def neighborhood_delete(self, uri):
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_VARRAY.format(uri)))
def neighborhood_query(self, name):
if (self.__is_uri(name)):
return name
neighborhoods = self.neighborhood_list()
for nb in neighborhoods:
neighborhood = self.neighborhood_show(nb['id'])
if (neighborhood['name'] == name):
return neighborhood['id']
raise Exception('bad varray name ' + name)
def neighborhood_search(self, initiator_port):
searchstr = URI_VARRAYS + '/search?initiator_port={0}'
return self.api('GET', searchstr.format(initiator_port))
def neighborhood_add_acl(self, uri, tenant):
id = self.__tenant_id_from_label(tenant)
self.neighborhood_add_aclInternal(uri, id)
def neighborhood_add_aclInternal(self, uri, tenantUri):
parms = {
'add':[{
'privilege': ['USE'],
'tenant': tenantUri,
}]
}
response = self.__api('PUT', URI_VARRAY_ACLS.format(uri), parms)
if (response.status_code != 200):
print "neighborhood_add_acl failed with code: ", response.status_code
raise Exception('neighborhood_add_acl: failed')
def neighborhood_bulkgetids(self):
ids = self.__neighborhood_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def neighborhood_bulkpost(self, ids):
return self.__neighborhood_bulkget_reps(ids)
def __neighborhood_bulkget_ids(self):
return self.api('GET', URI_VARRAYS_BULKGET)
def __neighborhood_bulkget_reps(self, ids):
return self.api('POST', URI_VARRAYS_BULKGET, ids)
#
# Hosts
#
def host_bulkgetids(self):
ids = self.__host_bulkget_ids()
print "ids=", ids
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def host_bulkpost(self, ids):
return self.__host_bulkget_reps(ids)
def __host_bulkget_ids(self):
return self.api('GET', URI_HOSTS_BULKGET)
def __host_bulkget_reps(self, ids):
return self.api('POST', URI_HOSTS_BULKGET, ids)
#
# Clusters
#
def cluster_bulkgetids(self):
ids = self.__cluster_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def cluster_bulkpost(self, ids):
return self.__cluster_bulkget_reps(ids)
def __cluster_bulkget_ids(self):
return self.api('GET', URI_CLUSTERS_BULKGET)
def __cluster_bulkget_reps(self, ids):
return self.api('POST', URI_CLUSTERS_BULKGET, ids)
#
# Vcenters
#
def vcenter_bulkgetids(self):
ids = self.__vcenter_bulkget_ids()
# retrieve the first 10 ids only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def vcenter_bulkpost(self, ids):
return self.__vcenter_bulkget_reps(ids)
def __vcenter_bulkget_ids(self):
return self.api('GET', URI_VCENTERS_BULKGET)
def __vcenter_bulkget_reps(self, ids):
return self.api('POST', URI_VCENTERS_BULKGET, ids)
#
# Transport Zone
#
def transportzone_list(self, neighborhood):
o = self.api('GET', URI_VARRAY_NETWORKS.format(neighborhood))
if (not o):
return {}
else:
return o['network']
def transportzone_listall(self):
o = self.api('GET', URI_NETWORKS.format())
if (not o):
return {}
else:
return o['network']
def transportzone_show(self, uri):
return self.api('GET', URI_NETWORK.format(uri))
def transportzone_create(self, label, neighborhood, type):
parms = dict()
if (label):
parms['name'] = label
if (type):
parms['transport_type'] = type
return self.api('POST', URI_VARRAY_NETWORKS.format(neighborhood), parms)
def transportzone_create2(self, label, type, neighborhoods, endpoints):
parms = dict()
parms['name'] = label
parms['transport_type'] = type
nhs = []
eps = []
if (neighborhoods):
nhLbls = neighborhoods.split(',')
for nhLbl in nhLbls:
nhs.append(self.neighborhood_query(nhLbl))
parms['varrays'] = nhs
if (endpoints):
eps = endpoints.split(',')
parms['endpoints'] = eps
return self.api('POST', URI_NETWORKS, parms)
def transportzone_update(self, id, label, addNeighborhoods, remNeighborhoods, addEndpoints, remEndpoints):
parms = dict()
parms['name'] = label
nhChanges = {}
if (addNeighborhoods):
addNhs = []
nhLbls = addNeighborhoods.split(',')
for nhLbl in nhLbls:
addNhs.append(self.neighborhood_query(nhLbl))
nh = dict();
nh['varrays'] = addNhs
nhChanges['add'] = nh
print str(nhChanges)
if (remNeighborhoods):
remNhs = []
nhLbls = remNeighborhoods.split(',')
for nhLbl in nhLbls:
remNhs.append(self.neighborhood_query(nhLbl))
nh = dict()
nh['varrays']=remNhs
nhChanges['remove'] = nh
parms['varray_assignment_changes'] = nhChanges
epChanges = {}
if (addEndpoints):
addEps = addEndpoints.split(',')
epChanges['add'] = addEps
if (remEndpoints):
remEps = remEndpoints.split(',')
epChanges['remove'] = remEps
parms['endpoint_changes'] = epChanges
return self.api('PUT', URI_NETWORK.format(id), parms)
def transportzone_assign(self, id, neighborhood):
parms = {
'varrays' : [ neighborhood ]
}
return self.api('PUT', URI_NETWORK_ASSIGN.format(id), parms)
def transportzone_unassign(self, id):
parms = {
'varrays' : [ ]
}
return self.api('PUT', URI_NETWORK_UNASSIGN.format(id), parms)
def transportzone_delete(self, uri, force):
return self.api('POST', URI_NETWORK_DEACTIVATE.format(uri, force))
def transportzone_queryall(self, name):
if (self.__is_uri(name)):
return name
tzs = self.transportzone_listall()
#tzs = resp['network-info']
for tz in tzs:
if (tz['name'] == name):
return tz['id'];
raise Exception('bad transportzone name: ' + name)
def transportzone_query(self, name):
if (self.__is_uri(name)):
return name
try:
(tname, label) = name.rsplit('/', 1)
except:
label = name
return self.transportzone_queryall(label)
def transportzone_add(self, uri, endpoints):
parms = {
'endpoints' : endpoints,
'op' : 'add',
}
return self.api('PUT', URI_NETWORK_ENDPOINTS.format(uri), parms)
def transportzone_remove(self, uri, endpoints):
parms = {
'endpoints' : endpoints,
'op' : 'remove',
}
return self.api('PUT', URI_NETWORK_ENDPOINTS.format(uri), parms)
def transportzone_register(self, uri):
return self.api('POST', URI_NETWORK_REGISTER.format(uri))
def transportzone_deregister(self, uri):
return self.api('POST', URI_NETWORK_DEREGISTER.format(uri))
def storageport_update(self, spuri, tzone, addvarrays, rmvarrays):
parms = dict()
varrayassignments = dict();
if (addvarrays or rmvarrays):
parms['varray_assignment_changes'] = varrayassignments
if (addvarrays):
addsarray = []
adds = addvarrays.split(',')
for add in adds:
adduri = self.neighborhood_query(add)
addsarray.append(adduri)
addsdict = dict()
addsdict['varrays'] = addsarray
varrayassignments['add'] = addsdict
if (rmvarrays):
rmsarray = []
rms = rmvarrays.split(',')
for rm in rms:
rmuri = self.neighborhood_query(rm)
rmsarray.append(rmuri)
rmsdict = dict()
rmsdict['varrays'] = rmsarray
varrayassignments['remove'] = rmsdict
if (tzone):
tzuri = self.transportzone_query(tzone)
if (tzuri):
parms['network'] = tzuri
return self.api('PUT', URI_STORAGEPORT_UPDATE.format(spuri), parms)
def storageport_register(self, systemuri, spuri):
return self.api('POST', URI_STORAGEPORT_REGISTER.format(systemuri, spuri))
def storageport_deregister(self, name):
#
# name = { port_uri | concat(storagedevice, label) }
#
try:
(sdname, label) = name.split('/', 1)
except:
return name
sduri = self.storagedevice_query(sdname)
ports = self.storageport_list(sduri)
for port in ports:
port = self.storageport_show(sduri, port['id'])
if (port['native_guid'] == label):
return self.api('POST', URI_STORAGEPORT_DEREGISTER.format(port['id']))
raise Exception('bad storageport name')
def storageport_delete(self, uri):
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_STORAGEPORT.format(uri)))
def storageport_show(self, systemuri, porturi):
return self.api('GET', URI_STORAGEPORT_SHOW.format(systemuri, porturi))
def storageport_get(self, porturi):
return self.api('GET', URI_STORAGEPORT.format(porturi))
def storageport_query(self, name):
#
# name = { port_uri | concat(storagedevice, port) }
#
try:
(sdname, port) = name.split('/', 1)
except:
return name
pguri = self.storagedevice_query(sdname)
ports = self.storageport_list(pguri)
for p in ports:
sport = self.storageport_show(pguri, p['id'])
if (sport['name'] == port):
return sport['id']
raise Exception('bad storageport name: ' + name)
def storageport_list(self, sduri):
o = self.api('GET', URI_STORAGEPORTS.format(sduri))
if (not o):
return {};
else:
return o['storage_port']
#
# SMI-S providers APIs
#
def smisprovider_list(self):
o = self.api('GET', URI_SMISPROVIDERS)
if (not o):
return {};
else:
return o
def smisprovider_show(self, uri):
return self.api('GET', URI_SMISPROVIDER.format(uri))
def smisprovider_show_task(self, uri, task):
uri_smisprovider_task = URI_SMISPROVIDER + '/tasks/{1}'
return self.api('GET', uri_smisprovider_task.format(uri, task))
def smisprovider_create(self, name, ipaddress, port, username, password, usessl):
req = dict()
req['name'] = name
req['ip_address'] = ipaddress
req['port_number'] = port
req['user_name'] = username
req['password'] = password
req['use_ssl'] = usessl
o = self.api('POST', URI_SMISPROVIDERS, req)
try:
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.smisprovider_show_task)
except:
print o
return s
def smisprovider_delete(self, uri):
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_SMISPROVIDER.format(uri)))
def smisprovider_query(self, name):
if (self.__is_uri(name)):
return name
providers = self.smisprovider_list()
for provider in providers['smis_provider']:
smisprovider = self.smisprovider_show(provider['id'])
if (smisprovider['name'] == name):
return smisprovider['id']
raise Exception('bad smisprovider name ' + name)
#
# Storage providers APIs
#
def storageprovider_list(self):
o = self.api('GET', URI_STORAGEPROVIDERS)
if (not o):
return {};
else:
return o
def storageprovider_show(self, uri):
return self.api('GET', URI_STORAGEPROVIDER.format(uri))
def storageprovider_show_task(self, uri, task):
uri_storageprovider_task = URI_STORAGEPROVIDER + '/tasks/{1}'
return self.api('GET', uri_storageprovider_task.format(uri, task))
def storageprovider_create(self, name, ipaddress, port, username, password, usessl, interface, secondary_username, secondary_password, element_manager_url, sio_cli):
req = dict()
req['name'] = name
req['ip_address'] = ipaddress
req['port_number'] = port
req['user_name'] = username
req['password'] = password
req['use_ssl'] = usessl
req['interface_type'] = interface
req['sio_cli'] = sio_cli
req['secondary_username'] = secondary_username
req['secondary_password'] = secondary_password
req['element_manager_url'] = element_manager_url
o = self.api('POST', URI_STORAGEPROVIDERS, req)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.storageprovider_show_task)
return s
def storageprovider_delete(self, uri):
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_STORAGEPROVIDER.format(uri)))
def storageprovider_query(self, name):
if (self.__is_uri(name)):
return name
providers = self.storageprovider_list()
for provider in providers['storage_provider']:
storageprovider = self.storageprovider_show(provider['id'])
if (storageprovider['name'] == name):
return storageprovider['id']
raise Exception('bad storageprovider name ' + name)
#
# export group apis
#
def export_group_show(self, uri):
return self.api('GET', URI_EXPORTGROUP_INSTANCE.format(uri))
def export_show_tasks(self, uri):
uri_exp_task = URI_EXPORTGROUP_INSTANCE + '/tasks'
return self.api('GET', uri_exp_task.format(uri))
def export_show_task(self, uri, task):
uri_exp_task = URI_EXPORTGROUP_INSTANCE + '/tasks/{1}'
return self.api('GET', uri_exp_task.format(uri, task))
def export_group_list(self, project):
puri = self.project_query(project).strip()
puri = puri.strip()
results = self.api('GET', URI_EXPORTGROUP_SEARCH_PROJECT.format(puri))
resources = results['resource']
exportgroups = []
for resource in resources:
exportgroups.append(resource['id'])
return exportgroups
def export_group_create(self, name, project, neighborhood, type, volspec, initList, hostList, clusterList, pathParam):
projectURI = self.project_query(project).strip()
nhuri = self.neighborhood_query(neighborhood).strip()
parms = {
'name' : name,
'project' : projectURI,
'varray' : nhuri,
}
# Optionally add path parameters
if (pathParam['max_paths'] > 0):
print 'Path parameters', pathParam
parms['path_parameters'] = pathParam
if ('port_group' in pathParam):
print 'Path parameters', pathParam
parms['path_parameters'] = pathParam
# Build volume parameter, if specified
if (volspec):
vols = volspec.split(',')
volentry = []
for vol in vols:
volparam = vol.split('+')
volmap = dict()
if (len(volparam) > 0):
volmap['id'] = self.volume_query(volparam[0])
if (len(volparam) > 1):
volmap['lun'] = volparam[1]
volentry.append(volmap)
parms['volumes'] = volentry
# Build initiators parameter, if specified
if (initList):
inits = initList.split(',')
initEntry = []
for initLbl in inits:
initEntry.append(self.initiator_query(initLbl))
parms['initiators'] = initEntry
# Build initiators parameter, if specified
if (hostList):
hosts = hostList.split(',')
hostEntry = []
for hostLbl in hosts:
hostEntry.append(self.host_query(hostLbl))
parms['hosts'] = hostEntry
# Build clusters parameter, if specified
if (clusterList):
clusters = clusterList.split(',')
clusterEntry = []
for clusterLbl in clusters:
clusterEntry.append(self.cluster_query(clusterLbl))
parms['clusters'] = clusterEntry
if (type):
parms['type'] = type
else:
parms['type'] = 'Initiator'
parms['project'] = projectURI
if(BOURNE_DEBUG == '1'):
print str(parms)
o = self.api('POST', URI_EXPORTGROUP_LIST, parms)
self.assert_is_dict(o)
try:
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.export_show_task)
except:
print o
return (o, s)
def export_group_update(self, groupId, addVolspec, addInitList, addHostList, addClusterList, remVolList, remInitList, remHostList, remClusterList, pathParam):
parms = {}
# Optionally add path parameters
if (pathParam['max_paths'] > 0):
print 'Path parameters', pathParam
parms['path_parameters'] = pathParam
# Build volume change input, if specified
volChanges = {}
if (addVolspec):
vols = addVolspec.split(',')
volentry = []
for vol in vols:
volparam = vol.split('+')
volmap = dict()
if (len(volparam) > 0):
volmap['id'] = self.volume_query(volparam[0])
if (len(volparam) > 1):
volmap['lun'] = volparam[1]
volentry.append(volmap)
volChanges['add'] = volentry
if (remVolList):
vols = remVolList.split(',')
volEntry = []
for volLbl in vols:
volEntry.append(self.volume_query(volLbl))
volChanges['remove'] = volEntry
parms['volume_changes'] = volChanges
# Build initiator change input, if specified
initChanges = {}
if (addInitList):
inits = addInitList.split(',')
initEntry = []
for initLbl in inits:
initEntry.append(self.initiator_query(initLbl))
initChanges['add'] = initEntry
if (remInitList):
inits = remInitList.split(',')
initEntry = []
for initLbl in inits:
initEntry.append(self.initiator_query(initLbl))
initChanges['remove'] = initEntry
parms['initiator_changes'] = initChanges
# Build host change input, if specified
hostChanges = {}
if (addHostList):
hosts = addHostList.split(',')
hostEntry = []
for hostLbl in hosts:
hostEntry.append(self.host_query(hostLbl))
hostChanges['add'] = hostEntry
if (remHostList):
hosts = remHostList.split(',')
hostEntry = []
for hostLbl in hosts:
hostEntry.append(self.host_query(hostLbl))
hostChanges['remove'] = hostEntry
parms['host_changes'] = hostChanges
# Build cluster change input, if specified
clusterChanges = {}
if (addClusterList):
clusters = addClusterList.split(',')
clusterEntry = []
for clusterLbl in clusters:
clusterEntry.append(self.cluster_query(clusterLbl))
clusterChanges['add'] = clusterEntry
if (remClusterList):
clusters = remClusterList.split(',')
clusterEntry = []
for clusterLbl in clusters:
clusterEntry.append(self.cluster_query(clusterLbl))
clusterChanges['remove'] = clusterEntry
parms['cluster_changes'] = clusterChanges
if(BOURNE_DEBUG == '1'):
print str(parms)
o = self.api('PUT', URI_EXPORTGROUP_INSTANCE.format(groupId), parms)
self.assert_is_dict(o)
if(BOURNE_DEBUG == '1'):
print 'OOO: ' + str(o) + ' :OOO'
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.export_show_task)
return (o, s)
def export_group_query(self, groupId):
if (self.__is_uri(groupId)):
return groupId
(project, gname) = groupId.rsplit('/', 1)
puri = self.project_query(project)
names = self.export_group_list(puri)
for name in names:
export_group = self.export_group_show(name)
if (export_group['name'] == gname and export_group['inactive'] == False):
return export_group['id']
raise Exception('bad export group name')
def export_group_delete(self, groupId):
o = self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_EXPORTGROUP_INSTANCE.format(groupId)))
self.assert_is_dict(o)
try:
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.export_show_task)
except:
s = 'error'
return (o, s)
def export_group_add_volume(self, groupId, volspec):
volentry = []
vols = volspec.split(',')
for vol in vols:
volparam = vol.split('+')
volmap = dict()
if (len(volparam) > 0):
volmap['id'] = self.volume_query(volparam[0])
if (len(volparam) > 1):
volmap['lun'] = volparam[1]
volentry.append(volmap)
parms = dict()
parms['volume'] = volentry
o = self.api('POST', URI_EXPORTGROUP_VOLUMES.format(groupId), parms)
self.assert_is_dict(o)
if ('op_id' in o):
s = self.api_sync_2(groupId, o['op_id'], self.export_show_task)
else:
self.pretty_print_json(o)
s = 'error'
return (o, s)
def export_group_remove_volume(self, groupId, volspec):
volentry = []
vols = volspec.split(',')
for vol in vols:
voluri = self.volume_query(vol)
volentry.append(voluri)
parms = dict()
parms['volume'] = volentry
o = self.api('POST', URI_EXPORTGROUP_VOLUMES_REMOVE.format(groupId), parms)
self.assert_is_dict(o)
if ('op_id' in o):
s = self.api_sync_2(groupId, o['op_id'], self.export_show_task)
else:
self.pretty_print_json(o)
s = 'error'
return (o, s)
def export_group_add_initiator(self, groupId, initspec):
initkvarr = []
inits = initspec.split(',')
for init in inits:
parameters = init.split('+')
initkvdict = dict()
initkvdict['protocol'] = parameters[0]
initkvdict['initiator_node'] = parameters[1]
initkvdict['initiator_port'] = parameters[2]
initkvdict['hostname'] = parameters[3]
if (len(parameters) == 5):
initkvdict['clustername'] = parameters[4]
initkvarr.append(initkvdict)
parms = dict()
parms['initiator'] = initkvarr
o = self.api('POST', URI_EXPORTGROUP_INITS.format(groupId), parms)
self.assert_is_dict(o)
if ('op_id' in o):
s = self.api_sync_2(groupId, o['op_id'], self.export_show_task)
else:
s = 'error'
return (o, s)
def export_group_remove_initiator(self, groupId, initspec):
initkvarr = []
inits = initspec.split(',')
for init in inits:
(protocol, port) = init.split('+')
initkvdict = dict()
initkvdict['protocol'] = protocol
initkvdict['port'] = port
initkvarr.append(initkvdict)
parms = dict()
parms['initiator'] = initkvarr
o = self.api('POST', URI_EXPORTGROUP_INITS_REMOVE.format(groupId), parms)
self.assert_is_dict(o)
if ('op_id' in o):
s = self.api_sync_2(groupId, o['op_id'], self.export_show_task)
else:
s = 'error'
return (o, s)
def export_group_pathadj_preview(self, groupId, systemId, varrayId, useExisting, pathParam, hosts):
parms = {}
# Optionally add path parameters
if (pathParam['max_paths'] > 0):
print 'Path parameters', pathParam
parms['path_parameters'] = pathParam
if varrayId != "":
parms['virtual_array'] = varrayId
parms['storage_system'] = systemId
if useExisting:
parms['use_existing_paths'] = 'true'
if hosts:
parms['hosts'] = hosts;
if(BOURNE_DEBUG == '1'):
print str(parms)
o = self.api('POST', URI_EXPORTGROUP_REALLOC.format(groupId), parms)
return o
def export_group_pathadj(self, groupId, parms):
if(BOURNE_DEBUG == '1'):
print str(parms)
o = self.api('PUT', URI_EXPORTGROUP_REBALANCE.format(groupId), parms)
self.assert_is_dict(o)
if(BOURNE_DEBUG == '1'):
print 'OOO: ' + str(o) + ' :OOO'
try:
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.export_show_task)
except:
print o
return (o, s)
#
# block snapshot
#
def block_snapshot_show(self, uri):
return self.api('GET', URI_BLOCK_SNAPSHOTS.format(uri))
def block_snapshot_show_task(self, snap, task):
return self.api('GET', URI_BLOCK_SNAPSHOTS_TASKS.format(snap, task))
def block_snapshot_query(self, name):
if (self.__is_uri(name)):
return name
(sname, label) = name.rsplit('/', 1)
furi = self.volume_query(sname)
furi = furi.strip()
uris = self.block_snapshot_list(furi)
for uri in uris:
snapshot = self.block_snapshot_show(uri)
if (snapshot['name'] == label):
return snapshot['id']
raise Exception('bad snapshot name')
def block_snapshot_create(self, volume, label, create_inactive, rp):
parms = {
'name' : label,
'create_inactive' : create_inactive,
'type' : rp
}
o = self.api('POST', URI_BLOCK_SNAPSHOTS_LIST.format(volume), parms)
self.assert_is_dict(o)
snapshots = o['task']
id = ''
task_id = ''
if (not o):
return {}
else :
if (type(snapshots) != list):
snapshots = [snapshots]
for snap in snapshots:
id = snap['resource']['id']
task_id = snap['op_id']
s = self.api_sync_2(id, task_id, self.block_snapshot_show_task)
return (o, s['state'], s['message'])
def block_snapshot_activate(self, snapshot):
vuri = self.block_snapshot_query(snapshot)
vuri = vuri.strip()
o = self.api('POST', URI_BLOCK_SNAPSHOTS_ACTIVATE.format(vuri))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.block_snapshot_show_task)
return (o, s['state'], s['message'])
def block_snapshot_delete(self, uri, vipronly):
posturi = URI_RESOURCE_DEACTIVATE.format(URI_BLOCK_SNAPSHOTS.format(uri))
if (vipronly):
posturi = posturi + '?type=VIPR_ONLY'
o = self.api('POST', posturi)
self.assert_is_dict(o)
tasks = []
for task in o['task']:
s = self.api_sync_2(task['resource']['id'], task['op_id'], self.block_snapshot_show_task)
tasks.append(s)
return tasks
def block_snapshot_list(self, volume):
vuri = self.volume_query(volume)
vuri = vuri.strip()
o = self.api('GET', URI_BLOCK_SNAPSHOTS_LIST.format(vuri))
self.assert_is_dict(o)
snaps = o['snapshot']
ids = []
if (not o):
return {}
else :
if (type(snaps) != list):
snaps = [snaps]
for snap in snaps:
ids.append(snap.get('id'))
return ids
def block_snapshot_restore(self, snapshot):
vuri = self.block_snapshot_query(snapshot)
vuri = vuri.strip()
o = self.api('POST', URI_BLOCK_SNAPSHOTS_RESTORE.format(vuri))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.block_snapshot_show_task)
return (o, s['state'], s['message'])
def block_snapshot_expand(self, snapshot, size):
params = {
'new_size' : size,
}
suri = self.block_snapshot_query(snapshot)
suri = suri.strip()
o = self.api('POST', URI_BLOCK_SNAPSHOTS_EXPAND.format(suri), params)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.block_snapshot_show_task)
return (o, s['state'], s['message'])
def block_snapshot_exports(self, snapshot):
vuri = self.block_snapshot_query(snapshot).strip()
return self.api('GET', URI_BLOCK_SNAPSHOTS_EXPORTS.format(vuri))
def block_snapshot_expose(self, snapshot):
vuri = self.block_snapshot_query(snapshot)
vuri = vuri.strip()
o = self.api('POST', URI_BLOCK_SNAPSHOTS_EXPOSE.format(vuri))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.block_snapshot_show_task)
return (o, s['state'], s['message'])
#
# protection system APIs
#
def protectionsystem_show_task(self, protectionsystem, task):
uri_task = URI_PROTECTION_SYSTEM + '/tasks/{1}'
return self.api('GET', uri_task.format(protectionsystem,task))
def protectionsystem_discover(self, uri, ignore_error):
task = self.api('POST', URI_PROTECTION_SYSTEM_DISCOVER.format(uri));
s=self.api_sync_2(task['resource']['id'],task['op_id'],self.protectionsystem_show_task, ignore_error)
return "discovery is completed"
def protectionsystem_discover_namespace(self, native_guid, namespace, ignore_error):
if (self.__is_uri(native_guid)):
return name
systems = self.protectionsystem_list()
for system in systems:
try:
protection_system = self.show_element(system['id'], URI_PROTECTION_SYSTEM)
if (protection_system['native_guid'] == native_guid or protection_system['name'] == native_guid):
o = self.api('POST', URI_DISCOVERED_PROTECTION_SYSTEM_NS.format(system['id'], namespace));
s=self.api_sync_2(o['resource']['id'],o['op_id'],self.protectionsystem_show_task, ignore_error)
return "discovery of namespace is completed"
except KeyError:
print 'no name key'
raise Exception('bad protection system native_guid: ' + native_guid)
def protectionsystem_list(self):
o = self.api('GET', URI_PROTECTION_SYSTEMS)
if (not o):
return {};
systems = o['protection_system'];
if(type(systems) != list):
return [systems];
return systems;
def protectionsystem_show(self, uri):
return self.api('GET', URI_PROTECTION_SYSTEM.format(uri))
def protectionsystem_query(self, name):
if (self.__is_uri(name)):
return name
protectionsystems = self.protectionsystem_list()
for protection_system in protectionsystems:
protectionsystem = self.protectionsystem_show(protection_system['id'])
if (protectionsystem['name'] == name):
return protectionsystem['id']
raise Exception('bad protectionsystem name ' + name)
def protectionsystem_delete(self, uri):
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_PROTECTION_SYSTEM.format(uri)))
def protectionsystem_create(self, name, system_type, ip_address, port_number, user_name, password, registration_mode):
parms = dict()
if (name):
parms['name'] = name
if (system_type):
parms['system_type'] = system_type
if (ip_address):
parms['ip_address'] = ip_address
if (port_number):
parms['port_number'] = port_number
if (user_name):
parms['user_name'] = user_name
if (password):
parms['password'] = password
if (registration_mode):
parms['registration_mode'] = registration_mode
resp = self.api('POST', URI_PROTECTION_SYSTEMS, parms)
print resp
return self.api_sync_2(resp['resource']['id'], resp['op_id'], self.protectionsystem_show_task)
def protectionsystem_update(self, psuri, cluster, addvarrays, rmvarrays):
parms = dict()
if (addvarrays or rmvarrays):
if (cluster):
varrayassignment = dict();
varrayassignment['cluster_id'] = cluster
if (addvarrays):
addsarray = []
adds = addvarrays.split(',')
for add in adds:
adduri = self.neighborhood_query(add)
addsarray.append(adduri)
addsdict = dict()
addsdict['varrays'] = addsarray
varrayassignment['add'] = addsdict
if (rmvarrays):
rmsarray = []
rms = rmvarrays.split(',')
for rm in rms:
rmuri = self.neighborhood_query(rm)
rmsarray.append(rmuri)
rmsdict = dict()
rmsdict['varrays'] = rmsarray
varrayassignment['remove'] = rmsdict
vassignsarray = []
varrayassignments = dict();
vassignsarray.append(varrayassignment);
parms['varray_assignment_changes'] = vassignsarray
return self.api('PUT', URI_PROTECTION_SYSTEM_UPDATE.format(psuri), parms)
#
# Search API's
#
def search(self, resource_type, scope, prefix, project, tag):
search_scope = {
}
if(scope):
search_scope["tenant"] = scope
if(project):
search_scope["project"] = project
if(tag):
search_scope["tag"] = prefix
elif(prefix):
search_scope["name"] = prefix
uri = ''
if resource_type == "authnprovider":
uri = URI_VDC_AUTHN_PROFILE
elif resource_type == "auto_tiering_policy":
uri = URI_SERVICES_BASE + '/vdc/auto-tier-policies'
elif resource_type == "fileshare":
uri = URI_FILESYSTEMS_LIST
elif resource_type == "volume":
uri = URI_VOLUME_LIST
elif resource_type == "project":
uri = URI_SERVICES_BASE + '/projects'
elif resource_type == "tenant":
uri = URI_SERVICES_BASE + '/tenants'
elif resource_type == "block_vpool":
uri = URI_SERVICES_BASE + '/block/vpools'
elif resource_type == 'file_vpool':
uri = URI_SERVICES_BASE + '/file/vpools'
elif resource_type == "varray":
uri = URI_VARRAYS
elif resource_type == "network_system":
uri = URI_NETWORKSYSTEMS
elif resource_type == "storage_system":
uri = URI_STORAGEDEVICES
elif resource_type == "protection_system":
uri = URI_PROTECTION_SYSTEMS
elif resource_type == "protectionset":
uri = URI_PROTECTIONSETS
elif resource_type == "smis_provider":
uri = URI_SMISPROVIDERS
elif resource_type == "storage_tier":
uri = URI_STORAGETIERS
elif resource_type == "network":
uri = URI_NETWORKS
elif resource_type == "storage_pool":
uri = URI_SERVICES_BASE + '/vdc/storage-pools'
elif resource_type == "storage_port":
uri = URI_SERVICES_BASE + '/vdc/storage-ports'
elif resource_type == "snapshot":
uri = URI_FILE_SNAPSHOTS
elif resource_type == "block_snapshot":
uri = URI_SERVICES_BASE + '/block/snapshots'
elif resource_type == "block_export":
uri = URI_SERVICES_BASE + '/block/exports'
elif resource_type == "block_consistency_group":
uri = URI_SERVICES_BASE + '/block/consistency-groups'
elif resource_type == "vcenter":
uri = URI_VCENTERS
elif resource_type == "datacenter":
uri = URI_DATACENTERS
elif resource_type == "host":
uri = URI_HOSTS
elif resource_type == "cluster":
uri = URI_CLUSTERS
elif resource_type == "ipinterface":
uri = URI_IPINTERFACES
elif resource_type == "initiator":
uri = URI_INITIATORS
else:
raise Exception('Unknown resource type ' + resource_type)
searchuri = uri + '/search'
results = self.api('GET', searchuri, None, search_scope)
return results['resource']
#
# Tag API's
#
def getTagURI(self, resource_type, id):
uri = ''
if resource_type == "authnprovider":
uri = URI_VDC_AUTHN_PROFILES.format(id)
elif resource_type == "auto_tiering_policy":
uri = URI_AUTO_TIER_POLICY.format(id)
elif resource_type == "fileshare":
uri = URI_FILESYSTEM.format(id)
elif resource_type == "volume":
uri = URI_VOLUME.format(id)
elif resource_type == "project":
uri = URI_PROJECT.format(id)
elif resource_type == "tenant":
uri = URI_TENANTS.format(id)
elif resource_type == "block_vpool":
uri = URI_SERVICES_BASE + '/block/vpools/{0}'.format(id)
elif resource_type == 'file_vpool':
uri = URI_SERVICES_BASE + '/file/vpools/{0}'.format(id)
elif resource_type == 'vpool':
uri = URI_SERVICES_BASE + '/object/data-services-vpools/{0}'.format(id)
elif resource_type == "varray":
uri = URI_VARRAY.format(id)
elif resource_type == "network_system":
uri = URI_NETWORKSYSTEM.format(id)
elif resource_type == "storage_system":
uri = URI_STORAGEDEVICE.format(id)
elif resource_type == "protection_system":
uri = URI_PROTECTION_SYSTEM.format(id)
elif resource_type == "protectionset":
uri = URI_PROTECTIONSET.format(id)
elif resource_type == "smis_provider":
uri = URI_SMISPROVIDER.format(id)
elif resource_type == "storage_tier":
uri = URI_STORAGETIER.format(id)
elif resource_type == "network":
uri = URI_NETWORK.format(id)
elif resource_type == "storage_pool":
uri = URI_STORAGEPOOL.format(id)
elif resource_type == "storage_port":
uri = URI_STORAGEPORT.format(id)
elif resource_type == "snapshot":
uri = URI_FILE_SNAPSHOT.format(id)
elif resource_type == "block_snapshot":
uri = URI_BLOCK_SNAPSHOTS.format(id)
elif resource_type == "block_export":
uri = URI_EXPORTGROUP_INSTANCE.format(id)
elif resource_type == "vcenter":
uri = URI_VCENTER.format(id)
elif resource_type == "datacenter":
uri = URI_DATACENTER.format(id)
elif resource_type == "host":
uri = URI_HOST.format(id)
elif resource_type == "cluster":
uri = URI_CLUSTER.format(id)
elif resource_type == "ipinterface":
uri = URI_IPINTERFACE.format(id)
elif resource_type == "initiator":
uri = URI_INITIATOR.format(id)
else:
raise Exception('Unknown resource type ' + resource_type)
return uri + '/tags'
def tag(self, resource_type, id, tags):
target = self.getTagURI(resource_type, id)
params = {
'add': tags
}
self.api('PUT', target, params)
def untag(self, resource_type, id, tags):
target = self.getTagURI(resource_type, id)
params = {
'remove': tags
}
self.api('PUT', target, params)
def datastore_create(self, type, label, cos, filecos, size, mountpoint):
if (type == 'commodity'):
params = dict()
params['nodes'] = []
params['nodes'].append({"nodeId":label, "name":label, "description":"Commodity Sanity Node", "virtual_array":cos})
o = self.api('POST', URI_DATA_STORE_LIST + "/" + type, params)
print ('data store creation result is %s' % o)
sync_out_list = []
for task in o['task']:
s = self.api_sync_2(task['resource']['id'], task['op_id'], self.datastore_show_task)
sync_out_list.append(s)
print "sync completed"
return (o, sync_out_list)
else:
params = {
'name' : label,
'virtual_array' : cos,
}
if (size):
params['size'] = size
if (mountpoint):
params['mount_point'] = mountpoint
if (filecos):
params['file_data_services_vpool'] = filecos
o = self.api('POST', URI_DATA_STORE_LIST + "/" + type, params)
print ('data store creation result is %s' % o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.datastore_show_task)
print "sync completed"
return (o, s)
def datastore_delete(self, uri, type):
print "uri is ", uri
o = self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_DATA_STORE.format(uri)), None)
r = self.waitfor_op_deletion(uri, type)
return (o, r)
def waitfor_op_deletion(self, id, type):
response = self.coreapi('GET', URI_DATA_STORE_LIST + "/" + type + "/" + id)
if(BOURNE_DEBUG == '1'):
print ('Datastore deletion response is %s' % response.text)
tmo = 0
while (response.text != 'invalid pool'):
time.sleep(3)
response = self.coreapi('GET', URI_DATA_STORE_LIST + "/" + type + "/" + id)
print ('response is %s' % response.text)
tmo += 3
if (tmo > API_SYNC_TIMEOUT):
break
if (response.text != 'invalid pool'):
raise Exception('Timed out waiting for deletion of data store: ' + id)
return response
def datastore_show(self, type, uri):
return self.api('GET', URI_DATA_STORE_LIST + "/" + type + "/" + uri)
def datastore_show_task(self, uri, task):
uri_object_task = URI_DATA_STORE + '/tasks/{1}'
return self.api('GET', uri_object_task.format(uri, task))
def datastore_list(self):
o = self.api('GET', URI_DATA_STORE_LIST)
if (not o):
return {};
else:
return o['data_store']
def datastore_query(self, type, label):
if (self.__is_uri(label)):
return label
o = self.api('GET', URI_DATA_STORE_LIST)
pools = o['data_store']
ids = []
if (not o):
return ()
else :
for pool in pools:
try:
pool_details = self.datastore_show(type, pool['id'])
if (pool_details['name'] == label):
return pool_details.get('id')
except:
pass
raise Exception('bad pool name '+label)
def datastore_bulkget(self):
return self.api('GET', URI_DATA_STORE_BULKGET)
def datastore_bulkgetids(self):
ids = self.datastore_bulkget()
# retrieve the first 10 volumes only
chunk = self.get_ids_chunk(ids['id'], 0, 10)
return chunk
def datastore_bulkpost(self, ids):
return self.api('POST', URI_DATA_STORE_BULKGET, ids)
def atmosdevice_create(self, namespace, project, name, atmosip, tenant, tenantid, admin, password, token):
parms = {'name': name,
'namespace': namespace,
'ip': atmosip,
'tenant_name': tenant,
'tenant_id': tenantid,
'tenant_admin': admin,
'tenant_admin_password': password}
if (project != None):
project = self.project_query(project).strip()
parms['project'] = project
o = self.api('POST', URI_ATMOS_DEVICE_LIST, parms)
# only POST uses /object/atmos-importer
# GETs continue to use /vdc/data-stores
token = o['op_id']
s = self.api_sync_2(o['resource']['id'], token, self.atmosdevice_show_task)
return (o, s)
def atmosdevice_update(self, uri, atmosip, tenant, admin, password):
parms = {}
if(atmosip):
parms['ip'] = atmosip
if(tenant):
parms['tenant_name'] = tenant
if(admin):
parms['tenant_admin'] = admin
if(password):
parms['tenant_admin_password'] = password
token = 'cli-update-' + uri
response = self.coreapi('PUT', URI_ATMOS_DEVICE.format(uri), parms)
if (response.status_code != 200):
print "update atmos device failed with code: ", response.status_code
raise Exception('update atmos device failed')
return response.text
def atmosdevice_query(self, label):
if (self.__is_uri(label)):
return label
o = self.api('GET', URI_ATMOS_DEVICE_LIST)
devices = o['atmos_device']
if (not o):
return ()
else:
for device in devices:
try:
device_details = self.atmosdevice_show(device['id'])
if (device_details['name'] == label):
return device.get('id')
except:
pass
raise Exception('bad device name '+ label)
def atmosdevice_show(self, uri):
return self.api('GET', URI_ATMOS_DEVICE.format(uri))
def atmosdevice_list(self):
o = self.api('GET', URI_ATMOS_DEVICE_LIST)
devices = o['atmos_device']
ids = []
if (not o):
return ()
else:
for device in devices:
ids.append(device.get('id'))
return ids
def atmosdevice_show_task(self, uri, task):
return self.api('GET', URI_ATMOS_DEVICE_TASK.format(uri, task))
def atmosdevice_delete(self, uri):
o = self.api('POST', URI_ATMOS_DEVICE_DELETE.format(uri), None)
token = o['op_id']
r = self.api_sync_2(uri, token, self.atmosdevice_show_task)
return (o, r)
def objectingestion_create(self, dataStoreName, fileshareId, keypoolName,
dataStoreDescription):
parms = {
'datastore_name' : dataStoreName,
'filesystem_device_info' : { 'fileshare_id': fileshareId },
'keypool_name' : keypoolName
}
if (dataStoreDescription):
parms['datastore_description'] = dataStoreDescription
return self.api('POST', URI_OBJECT_INGESTION_LIST, parms)
def objectingestion_op_status(self, objectingestionId, opId):
return self.api('GET', URI_OBJECT_INGESTION_OP_STATUS.format(objectingestionId, opId))
def objectingestion_list(self):
o = self.api('GET', URI_OBJECT_INGESTION_LIST)
if (not o):
return {};
else:
return o['object_ingestion']
def objectingestion_show(self, objectingestionId):
print self.api('GET', URI_OBJECT_INGESTION.format(objectingestionId))
def objectingestion_delete(self, objectingestionId):
print self.api('POST', URI_OBJECT_INGESTION_DELETE.format(objectingestionId))
def _s3_hmac_base64_sig(self, method, bucket, objname, uid, secret, content_type, parameters_to_sign=None):
'''
calculate the signature for S3 request
StringToSign = HTTP-Verb + "\n" +
* Content-MD5 + "\n" +
* Content-Type + "\n" +
* Date + "\n" +
* CanonicalizedAmzHeaders +
* CanonicalizedResource
'''
buf = ""
# HTTP-Verb
buf += method + "\n"
# Content-MD5, a new line is needed even if it does not exist
md5 = self._headers.get('Content-MD5')
if md5 != None:
buf += md5
buf += "\n"
#Content-Type, a new line is needed even if it does not exist
if content_type != None:
buf+=content_type
buf += "\n"
# Date, it should be removed if "x-amz-date" is set
if self._headers.get("x-amz-date") == None:
date = self._headers.get('Date')
if date != None:
buf += date
buf += "\n"
# CanonicalizedAmzHeaders, does not support multiple headers with same name
canonicalizedAmzHeaders = []
for header in self._headers.keys():
if header.startswith("x-amz-") or header.startswith("x-emc-"):
canonicalizedAmzHeaders.append(header)
canonicalizedAmzHeaders.sort()
for name in canonicalizedAmzHeaders:
buf +=name+":"+self._headers[name]+"\n"
#CanonicalizedResource represents the Amazon S3 resource targeted by the request.
buf += "/"
if bucket != None:
buf += bucket
if objname != None:
buf += "/" + urllib.quote(objname)
if parameters_to_sign !=None:
para_names = parameters_to_sign.keys()
para_names.sort()
separator = '?';
for name in para_names:
value = parameters_to_sign[name]
buf += separator
buf += name
if value != None and value != "":
buf += "=" + value
separator = '&'
if BOURNE_DEBUG == '1':
print 'message to sign with secret[%s]: %s\n' % (secret, buf)
macer = hmac.new(secret.encode('UTF-8'), buf, hashlib.sha1)
signature = base64.b64encode(macer.digest())
if BOURNE_DEBUG == '1':
print "calculated signature:"+signature
# The signature
self._headers['Authorization'] = 'AWS ' + uid + ':' + signature
def _set_auth_and_ns_header(self, method, namespace, bucket, objname, uid, secret, content_type = CONTENT_TYPE_XML, parameters_to_sign=None):
if self._headers.get("x-amz-date") == None:
self._headers['Date'] = formatdate()
if (uid):
self._s3_hmac_base64_sig(method, bucket, objname, uid, secret, content_type, parameters_to_sign)
else:
# anonymous still requires namespace
self._headers['x-emc-namespace'] = namespace
def _computeMD5(self, value):
m = hashlib.md5()
if value != None:
m.update(value)
return m.hexdigest()
def _checkETag(self, response, md5str):
responseEtag = response.headers['ETag'];
# strip enclosing quotes from returned ETag before matching with
# calculated MD5
if (responseEtag.startswith("\"") and responseEtag.endswith("\"")):
responseEtag = responseEtag[1:-1]
if (responseEtag != md5str):
s = "data md5 mismatch! local md5: %s, response etag: %s" % \
(md5str, responseEtag)
print s
raise Exception(s)
@resetHeaders
def bucket_switch(self, namespace, bucket, mode, hosts, duration, token, user, uid, secret, preserveDirStructure = False):
self._headers[FILE_ACCESS_MODE_HEADER] = mode
print "switching bucket %s to file access mode %s, preserveDirStructure=%s" % (bucket, mode, preserveDirStructure)
self._headers[FILE_ACCESS_PRESERVE_DIR_STRUCTURE_HEADER] = str(preserveDirStructure)
if (user != ''):
self._headers[USER_HEADER] = user
if (hosts != ''):
self._headers[HOST_LIST_HEADER] = hosts
if (duration != ''):
self._headers[FILE_ACCESS_DURATION_HEADER] = duration
if (token != ''):
self._headers[TOKEN_HEADER] = token
else:
if (self._headers.has_key(TOKEN_HEADER)):
del self._headers[TOKEN_HEADER]
qparms = {'accessmode': None}
self._set_auth_and_ns_header('PUT', namespace,bucket, None, uid, secret, parameters_to_sign = qparms)
response = self.coreapi('PUT', URI_S3_BUCKET_INSTANCE.format(bucket), None, qparms , content_type=CONTENT_TYPE_XML)
return response
@resetHeaders
def bucket_fileaccesslist(self, namespace, bucket, uid, secret):
qparms = {'fileaccess':None}
self._set_auth_and_ns_header('GET', namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
return self.coreapi('GET', URI_S3_BUCKET_INSTANCE.format(bucket), None, qparms, content_type=CONTENT_TYPE_XML)
@resetHeaders
def bucket_switchget(self, namespace, bucket, uid, secret):
qparms = {'accessmode':None}
self._set_auth_and_ns_header('GET', namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
response = self.coreapi('GET', URI_S3_BUCKET_INSTANCE.format(bucket), None, qparms, content_type=CONTENT_TYPE_XML)
return response
# swift related operations --begin
def __swift_getkey(self, key):
return "{%s}%s" % (OPENSTACK_XML_NS, key)
#format a dict to XML, all leaf elements will be taken as attributes
def swift_formatxml(self, parent, parms):
for key in parms.keys():
value = parms[key]
if type(value) == dict:
e = ET.Element(self.__swift_getkey(key))
if parent == None:
parent = e
else:
parent.append(e)
self.swift_formatxml(e, value)
else:
parent.set(key, value)
return parent
@resetHeaders
def swift_authenticate(self, uid, password):
self._headers[SWIFT_AUTH_USER] = uid
self._headers[SWIFT_AUTH_KEY] = password
response = self.coreapi('GET', '/auth/v1.0', None, None, None, content_type=CONTENT_TYPE_XML)
headers = response.headers
token = headers.get(SWIFT_AUTH_TOKEN)
if not token:
raise Exception('authentication failed')
if BOURNE_DEBUG == '1':
print '%s=%s' % (SWIFT_AUTH_TOKEN, token)
return token
@resetHeaders
def swift_authenticate_v2(self, uid, password, ctype=CONTENT_TYPE_JSON):
parms = dict()
parms['auth'] = dict()
parms['auth']['passwordCredentials'] = dict()
parms['auth']['passwordCredentials']['username'] = uid
parms['auth']['passwordCredentials']['password'] = password
if ctype == CONTENT_TYPE_XML:
parms = ET.tostring(self.swift_formatxml(None, parms), "UTF-8")
if BOURNE_DEBUG == '1':
print parms
# get unscoped token
response = self.api('POST', '/v2.0/tokens', parms, content_type=ctype)
unscoped_token = None
if ctype == CONTENT_TYPE_JSON:
unscoped_token = response["access"]["token"]["id"]
else:
unscoped_token = ET.fromstring(response).find(self.__swift_getkey("token")).get("id")
# use unscoped token to get tenants info
self._headers[SWIFT_AUTH_TOKEN] = unscoped_token
response = self.api('GET', '/v2.0/tenants', content_type=ctype)
tenantName = None
if ctype == CONTENT_TYPE_JSON:
tenantName = response["tenants"][0]["name"]
else:
tenantName = ET.fromstring(response).find(self.__swift_getkey("tenant")).get("name")
# use unscoped token plus tenantNmae to get scoped token info
parms = dict()
parms['auth'] = dict()
parms['auth']['tenantName'] = tenantName
parms['auth']['token'] = dict()
parms['auth']['token']['id'] = unscoped_token
if ctype == CONTENT_TYPE_XML:
parms = ET.tostring(self.swift_formatxml(None, parms), "UTF-8")
response = self.api("POST", "/v2.0/tokens", parms, content_type=ctype)
if ctype == CONTENT_TYPE_JSON:
scoped_token = response["access"]["token"]["id"]
else:
scoped_token = ET.fromstring(response).find(self.__swift_getkey("token")).get("id")
if BOURNE_DEBUG == '1':
print 'the scoped token is %s' % scoped_token
return scoped_token
@resetHeaders
def containers_list(self, namespace, project, token):
qparms = {"format" : "json"}
if project:
puri = self.project_query(project)
puri = puri.strip()
self._headers['x-emc-project-id'] = puri
self._headers[SWIFT_AUTH_TOKEN] = token
response = self.api("GET", URI_SWIFT_ACCOUNT_INSTANCE.format(namespace), None, qparms, content_type=CONTENT_TYPE_XML)
return response
@resetHeaders
def containers_meta(self, namespace, project, token):
if project:
puri = self.project_query(project)
puri = puri.strip()
self._headers['x-emc-project-id'] = puri
self._headers[SWIFT_AUTH_TOKEN] = token
response = self.coreapi("HEAD", URI_SWIFT_ACCOUNT_INSTANCE.format(namespace), None, None, content_type=CONTENT_TYPE_XML)
return response
@resetHeaders
def container_create(self, namespace, project, container, cos, x_container_read, x_container_write, metadata, token):
if project:
puri = self.project_query(project)
puri = puri.strip()
self._headers['x-emc-project-id'] = puri
if cos:
curi = self.cos_query("object", cos)
curi = curi.strip()
self._headers['x-emc-cos'] = curi
if metadata:
self._headers = dict(self._headers.items() + metadata.items())
self._headers[SWIFT_AUTH_TOKEN] = token
self._headers[SWIFT_X_CONTAINER_READ] = x_container_read
self._headers[SWIFT_X_CONTAINER_WRITE] = x_container_write
response = self.coreapi('PUT', URI_SWIFT_CONTAINER_INSTANCE.format(namespace, container), None, None, content_type=CONTENT_TYPE_XML)
if response.status_code != 201 and response.status_code != 202:
print "container create failed with code: ", response.status_code
raise Exception("failed to create container")
@resetHeaders
def container_delete(self, namespace, container, token):
self._headers[SWIFT_AUTH_TOKEN] = token
response = self.coreapi('DELETE', URI_SWIFT_CONTAINER_INSTANCE.format(namespace, container), None, None, content_type=CONTENT_TYPE_XML)
if response.status_code != 204:
if response.status_code != 409:
print "container_delete failed with code: ", response.status_code
else:
print "container is not empty, can not delete conflict"
raise Exception('failed to delete container')
@resetHeaders
def container_switchfileaccess(self, namespace, container, mode, hosts, duration, token, user, swift_token, preserveDirStructure = False):
self._headers[SWIFT_AUTH_TOKEN] = swift_token
self._headers[FILE_ACCESS_MODE_HEADER] = mode
print "switching container %s to file access mode %s, preserveDirStructure=%s" % (container, mode, preserveDirStructure)
self._headers[FILE_ACCESS_PRESERVE_DIR_STRUCTURE_HEADER] = str(preserveDirStructure)
if (duration != ''):
self._headers[FILE_ACCESS_DURATION_HEADER] = duration
if (hosts != ''):
self._headers[HOST_LIST_HEADER] = hosts
if (user != ''):
self._headers[USER_HEADER] = user
if (token != None):
self._headers[TOKEN_HEADER] = token
response = self.coreapi('PUT', URI_SWIFT_CONTAINER_INSTANCE.format(namespace, container), None, {'accessmode':None}, content_type=CONTENT_TYPE_XML)
h = response.headers
if (FILE_ACCESS_MODE_HEADER in h) :
print '%s=%s' % (FILE_ACCESS_MODE_HEADER, h[FILE_ACCESS_MODE_HEADER])
if (FILE_ACCESS_DURATION_HEADER in h) :
print '%s=%s' % (FILE_ACCESS_DURATION_HEADER, h[FILE_ACCESS_DURATION_HEADER])
if (HOST_LIST_HEADER in h):
print '%s=%s' % (HOST_LIST_HEADER, h[HOST_LIST_HEADER])
if (USER_HEADER in h):
print '%s=%s' % (USER_HEADER, h[USER_HEADER])
if (TOKEN_HEADER in h):
print '%s=%s' % (TOKEN_HEADER, h[TOKEN_HEADER])
if (START_TOKEN_HEADER in h):
print '%s=%s' % (START_TOKEN_HEADER, h[START_TOKEN_HEADER])
if (END_TOKEN_HEADER in h):
print '%s=%s' % (START_TOKEN_HEADER, h[END_TOKEN_HEADER])
return response
@resetHeaders
def container_getfileaccess(self, namespace, container, swift_token):
self._headers[SWIFT_AUTH_TOKEN] = swift_token
response = self.coreapi('GET', URI_SWIFT_CONTAINER_INSTANCE.format(namespace, container), None,
{'fileaccess': None, 'format': 'json'},
None, CONTENT_TYPE_XML)
return response
@resetHeaders
def container_getaccessmode(self, namespace, container, swift_token):
self._headers[SWIFT_AUTH_TOKEN] = swift_token
response = self.coreapi('GET', URI_SWIFT_CONTAINER_INSTANCE.format(namespace, container), None, {'accessmode':None}, content_type=CONTENT_TYPE_XML)
h = response.headers
print '%s=%s' % (FILE_ACCESS_MODE_HEADER, h[FILE_ACCESS_MODE_HEADER])
if (FILE_ACCESS_DURATION_HEADER in h):
print '%s=%s' % (FILE_ACCESS_DURATION_HEADER, h[FILE_ACCESS_DURATION_HEADER])
if (HOST_LIST_HEADER in h):
print '%s=%s' % (HOST_LIST_HEADER, h[HOST_LIST_HEADER])
if (USER_HEADER in h):
print '%s=%s' % (USER_HEADER, h[USER_HEADER])
if (TOKEN_HEADER in h):
print '%s=%s' % (TOKEN_HEADER, h[TOKEN_HEADER])
return response
@resetHeaders
def container_metadata(self, namespace, container, metadata, token):
self._headers[SWIFT_AUTH_TOKEN] = token
if metadata:
self._headers = dict(self._headers.items() + metadata.items())
response = self.coreapi('POST', URI_SWIFT_CONTAINER_INSTANCE.format(namespace, container), None, None, content_type=CONTENT_TYPE_XML)
if response.status_code != 204:
print "container set/remove metadata failed with code: ", response.status_code
raise Exception("failed to set/remove container metadata")
@resetHeaders
def container_header(self, namespace, container, token):
self._headers[SWIFT_AUTH_TOKEN] = token
response = self.coreapi('HEAD', URI_SWIFT_CONTAINER_INSTANCE.format(namespace, container), None, None, content_type=CONTENT_TYPE_XML)
if response.status_code != 204:
print "container get metadata failed with code: ", response.status_code
raise Exception("failed to get container metadata")
headers = response.headers
for key in headers.keys():
if key.lower().startswith('x-container-meta-'):
print "%s: %s" % (key, headers[key])
return headers
@resetHeaders
def container_objs_list(self, namespace, container, params, token):
self._headers[SWIFT_AUTH_TOKEN] = token
qparms = self._build_list_params(params)
return self.api('GET', URI_SWIFT_CONTAINER_INSTANCE.format(namespace, container), None, qparms, content_type=CONTENT_TYPE_XML)
@resetHeaders
def container_object_create(self, namespace, container, key, value, headers, token, deleteAt, deleteAfter):
self._headers[SWIFT_AUTH_TOKEN] = token
if deleteAt != None:
self._headers[SWIFT_DELETE_AT] = deleteAt
if deleteAfter != None:
self._headers[SWIFT_DELETE_AFTER] = deleteAfter
if value is None:
print "empty object, setting Content-Length to 0"
self._headers['Content-Length'] = str(0)
if headers:
for header_name,header_value in headers.items():
self._headers[header_name] = header_value
print self._headers
response = self.coreapi('PUT', URI_SWIFT_KEY_INSTANCE.format(namespace, container, key), value, None, content_type=CONTENT_TYPE_OCTET)
# check response status
if (response.status_code != 201):
s = "container_object_create key:%s, container:%s, namespace:%s, token:%s failed with code:%d " % \
(key, container, namespace, token, response.status_code)
print s
print response.headers
print response.content
raise Exception(s)
return response.content
@resetHeaders
def container_object_show(self, namespace, container, key, range_str, token):
if range_str:
self._headers['Range'] = range_str
self._headers[SWIFT_AUTH_TOKEN] = token
response = self.__api('GET', URI_SWIFT_KEY_INSTANCE.format(namespace, container, key), None, None, content_type=CONTENT_TYPE_OCTET)
# check response status
if (response.status_code != 200 and response.status_code != 204 and response.status_code != 206 and response.status_code != 404):
s = "container_object_show key:%s, container:%s, namespace:%s, token:%s failed with code:%d " % \
(key, container, namespace, token, response.status_code)
print s
print response.headers
print response.content
raise Exception(s)
if response.status_code == 404:
return response.status_code
return response.content
@resetHeaders
def container_object_delete(self, namespace, container, key, token):
self._headers[SWIFT_AUTH_TOKEN] = token
response = self.coreapi('DELETE', URI_SWIFT_KEY_INSTANCE.format(namespace, container, key), None, None, content_type=CONTENT_TYPE_OCTET)
if (response.status_code != 204):
s = "container_object_delete failed with code: %d" % response.status_code
raise Exception(s)
@resetHeaders
def container_object_head(self, namespace, container, key, token):
self._headers[SWIFT_AUTH_TOKEN] = token
response = self.coreapi('HEAD', URI_SWIFT_KEY_INSTANCE.format(namespace, container, key), None, None, content_type=CONTENT_TYPE_OCTET)
if response.status_code != HTTP_OK:
print "object failed with code: ", response.status_code
raise Exception("failed to head key")
print response.headers
return response.headers
@resetHeaders
def container_object_post(self, namespace, container, key, headers, token):
self._headers[SWIFT_AUTH_TOKEN] = token
if headers:
for header_name,header_value in headers.items():
self._headers[header_name] = header_value
print self._headers
response = self.coreapi('POST', URI_SWIFT_KEY_INSTANCE.format(namespace, container, key), None, None, content_type=CONTENT_TYPE_OCTET)
if response.status_code != 202:
print "object failed with code: ", response.status_code
raise Exception("failed to head key")
print response
@resetHeaders
def container_object_copy(self, namespace, src_container, src_key, dst_container, dst_key, headers, token):
self._headers[SWIFT_AUTH_TOKEN] = token
copy_from_str = "%s/%s" % (src_container, src_key)
self._headers[SWIFT_COPY_FROM] = copy_from_str
if headers:
for header_name,header_value in headers.items():
self._headers[header_name] = header_value
print headers
response = self.coreapi('PUT', URI_SWIFT_KEY_INSTANCE.format(namespace, dst_container, dst_key), None, None, content_type=CONTENT_TYPE_OCTET)
if response.status_code != 201:
print "object failed with code: ", response.status_code
raise Exception("failed to head key")
return response.headers
@resetHeaders
def container_object_update(self, namespace, container, key, range_str, payload, token):
self._headers['Range'] = range_str
self._headers[SWIFT_AUTH_TOKEN] = token
response = self.coreapi('PUT', URI_SWIFT_KEY_INSTANCE.format(namespace, container, key), payload, None, content_type=CONTENT_TYPE_OCTET)
if response.status_code != 200:
print "container_object_update failed with code: ", response.status_code
raise Exception('failed to update key')
# swift related operation --end
@resetHeaders
def baseurl_create(self, baseUrl, name, namespaceInHost):
parms = {
'name': name,
'base_url': baseUrl,
'is_namespace_in_host': namespaceInHost
}
response = self.coreapi('POST', URI_BASEURL_BASE, parms)
if(response.status_code != HTTP_OK):
raise Exception('failed to create baseurl')
resp_parms = self.__json_decode(response.text)
return resp_parms['id']
@resetHeaders
def baseurl_get(self, baseUrlId):
response = self.coreapi('GET', URI_BASEURL_INSTANCE.format(baseUrlId))
if(response.status_code != HTTP_OK):
raise Exception('failed to get baseurl')
resp_parms = self.__json_decode(response.text)
baseurl_obj = resp_parms
if(baseUrlId != baseurl_obj['id']):
raise Exception('got baseurl with id ' + baseurl_obj['id'] + ' instead of one with id ' + baseUrlId)
return baseurl_obj
@resetHeaders
def baseurl_list(self):
response = self.coreapi('GET', URI_BASEURL_BASE)
if(response.status_code != HTTP_OK):
raise Exception('failed to get baseurl')
resp_parms = self.__json_decode(response.text)
return resp_parms['base_url']
@resetHeaders
def baseurl_delete(self, baseUrlId):
response = self.coreapi('POST', URI_BASEURL_DEACTIVATE.format(baseUrlId))
if response.status_code != HTTP_OK:
raise Exception('failed to delete baseurl')
@resetHeaders
def bucket_update_acl(self, namespace, bucket, uid, secret, bodyAclValue, headerCannedAclValue=None):
if (headerCannedAclValue):
self._headers['x-amz-acl'] = headerCannedAclValue
qparms = {'acl': None}
self._set_auth_and_ns_header('PUT', namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
uri = URI_S3_BUCKET_INSTANCE.format(bucket)
response = self.coreapi('PUT', uri, bodyAclValue, qparms, None, CONTENT_TYPE_XML)
if response.status_code != HTTP_OK:
print "failure", response.status_code, response.text
raise Exception('failed to update bucket acl')
@resetHeaders
def bucket_get_acl(self, namespace, bucket, uid, secret):
qparms = {'acl': None}
self._set_auth_and_ns_header('GET', namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
return self.api('GET', URI_S3_BUCKET_INSTANCE.format(bucket),
None, qparms, content_type=CONTENT_TYPE_XML)
@resetHeaders
def s3_ping(self, namespace):
qparms = {'ping': None}
self._headers['x-emc-namespace'] = namespace
self._set_auth_and_ns_header('GET', namespace, None, None, None, None, parameters_to_sign = qparms)
uri = URI_S3_PING
response = self.coreapi('GET', uri, qparms = qparms, content_type = CONTENT_TYPE_XML)
print "status", response.status_code
if response.status_code != HTTP_OK:
print "failure", response.status_code, response.text
raise Exception('ping failure')
return response.text
@resetHeaders
def s3_datanode(self, namespace, uid, secret):
qparms = {'endpoint': None}
self._headers['x-emc-namespace'] = namespace
self._set_auth_and_ns_header('GET', namespace, None, None, uid, secret, parameters_to_sign = qparms)
uri = URI_S3_DATANODE
response = self.coreapi('GET', uri, qparms = qparms, content_type = CONTENT_TYPE_XML)
print "status", response.status_code
if response.status_code != HTTP_OK:
print "failure", response.status_code, response.text
raise Exception('getting datanode failure')
else:
print "succeed"
print response.text
@resetHeaders
def bucket_create(self, namespace, bucket, uid, secret, rg=None, fileSystemEnabled = False, proj=None):
if(rg != None):
self._headers['x-emc-dataservice-vpool'] = rg
if(proj != None):
self._headers['x-emc-project-id'] = proj
if(fileSystemEnabled):
self._headers['x-emc-file-system-access-enabled'] = 'true'
self._set_auth_and_ns_header('PUT',namespace, bucket, None, uid, secret)
response = self.coreapi('PUT', URI_S3_BUCKET_INSTANCE.format(bucket), None, None, content_type=CONTENT_TYPE_XML)
h = response.headers
if ( h['location']):
return h['location']
else:
print "bucket_create failed with code: ", response.status_code
raise Exception('failed to create bucket')
@resetHeaders
def bucket_delete(self, namespace, bucket, uid, secret):
self._set_auth_and_ns_header('DELETE', namespace, bucket, None, uid, secret)
response = self.coreapi('DELETE', URI_S3_BUCKET_INSTANCE.format(bucket), None, None, content_type=CONTENT_TYPE_XML)
if (response.status_code != 204):
print "bucket_delete failed with code: ", response.status_code
raise Exception('failed to delete bucket')
@resetHeaders
def bucket_list(self, namespace, uid, secret):
self._set_auth_and_ns_header('GET', namespace, None, None, uid, secret)
return self.api('GET', URI_S3_SERVICE_BASE, None, None, content_type=CONTENT_TYPE_XML)
@resetHeaders
def bucket_head(self, namespace, bucket, uid, secret):
self._set_auth_and_ns_header('HEAD', namespace, bucket, None, uid, secret)
response = self.coreapi('HEAD', URI_S3_BUCKET_INSTANCE.format(bucket), None, None, content_type=CONTENT_TYPE_XML)
if response.status_code == HTTP_OK:
print " HEAD Bucket for " , bucket, " resp code:", response.status_code
else:
print "HEAD Bucket for ", bucket, "failed with resp code:", response.status_code
raise Exception('HEAD Bucket failed')
def _build_versioning_payload(self, status):
root = ET.Element('VersioningConfiguration')
root.set('xmlns', S3_XML_NS)
ET.SubElement(root, 'Status').text = status
return ET.tostring(root)
def _get_versioning_status(self, payload):
tree = ET.fromstring(payload)
return tree.findtext('./{' + S3_XML_NS + '}Status')
@resetHeaders
def bucket_versioning_get(self, namespace, bucket, uid, secret):
qparms = {'versioning':None}
self._set_auth_and_ns_header('GET',namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
uri = URI_S3_BUCKET_INSTANCE.format(bucket)
response = self.coreapi('GET', uri, qparms = qparms, content_type = CONTENT_TYPE_XML)
if response.status_code != HTTP_OK:
print "failure", response.status_code, response.text
raise Exception('failed to get versioning status')
return self._get_versioning_status(response.text)
@resetHeaders
def bucket_versioning_put(self, namespace, bucket, status, uid, secret):
qparms = {'versioning':None}
self._set_auth_and_ns_header('PUT', namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
uri = URI_S3_BUCKET_INSTANCE.format(bucket)
parms = self._build_versioning_payload(status)
response = self.coreapi('PUT', uri, parms, qparms,
None, CONTENT_TYPE_XML)
if response.status_code != HTTP_OK:
print "failure", response.status_code, response.text
raise Exception('failed to put versioning status')
def _build_lifecycle_payload(self, rules):
root = ET.Element('LifecycleConfiguration')
root.set('xmlns', S3_XML_NS)
json_rules = cjson.decode(rules)
for r in json_rules.get('rules'):
rule = ET.SubElement(root, 'Rule')
ET.SubElement(rule, 'ID').text = r.get('id')
ET.SubElement(rule, 'Prefix').text = r.get('prefix')
ET.SubElement(rule, 'Status').text = r.get('status')
e = r.get('expiration')
expiration = ET.SubElement(rule, 'Expiration')
if e.get('days'):
ET.SubElement(expiration, 'Days').text = str(e.get('days'))
if e.get('date'):
ET.SubElement(expiration, 'Date').text = e.get('date')
return ET.tostring(root)
@resetHeaders
def bucket_lifecycle_get(self, namespace, bucket, uid, secret):
qparms = {'lifecycle': None}
self._set_auth_and_ns_header('GET', namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
uri = URI_S3_BUCKET_INSTANCE.format(bucket)
response = self.coreapi('GET', uri, qparms = qparms, content_type = CONTENT_TYPE_XML)
if response.status_code != HTTP_OK:
print "failure", response.status_code, response.text
raise Exception('failed to get lifecycle configuration')
return response.text
@resetHeaders
def bucket_lifecycle_del(self, namespace, bucket, uid, secret):
qparms = {'lifecycle': None}
self._set_auth_and_ns_header('GET', namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
uri = URI_S3_BUCKET_INSTANCE.format(bucket)
response = self.coreapi('DELETE', uri, qparms = qparms, content_type = CONTENT_TYPE_XML)
if response.status_code != 204:
print "failure", response.status_code, response.text
raise Exception('failed to get lifecycle configuration')
return response.text
@resetHeaders
def bucket_lifecycle_put(self, namespace, bucket, rules, uid, secret):
qparms = {'lifecycle': None}
self._set_auth_and_ns_header('PUT', namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
uri = URI_S3_BUCKET_INSTANCE.format(bucket)
parms = self._build_lifecycle_payload(rules)
response = self.coreapi('PUT', uri, parms, qparms, None, CONTENT_TYPE_XML)
if response.status_code != HTTP_OK:
print "failure", response.status_code, response.text
raise Exception('failed to put lifecycle configuration')
def _build_cors_payload(self, rules):
root = ET.Element('CORSConfiguration')
root.set('xmlns', S3_XML_NS)
json_rules = cjson.decode(rules)
for r in json_rules.get('rules'):
rule = ET.SubElement(root, 'CORSRule')
origin = r.get('origin')
for o in origin:
ET.SubElement(rule, 'AllowedOrigin').text = o
method = r.get('method')
for m in method:
ET.SubElement(rule, 'AllowedMethod').text = m
header = r.get('header')
for h in header:
ET.SubElement(rule, 'AllowedHeader').text = h
return ET.tostring(root)
@resetHeaders
def bucket_cors_get(self, namespace, bucket, uid, secret):
qparms = {'cors': None}
self._set_auth_and_ns_header('GET', namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
uri = URI_S3_BUCKET_INSTANCE.format(bucket)
response = self.coreapi('GET', uri, qparms = qparms, content_type = CONTENT_TYPE_XML)
if response.status_code != HTTP_OK:
print "failure", response.status_code, response.text
raise Exception('failed to get CORS configuration')
return response.text
@resetHeaders
def bucket_cors_delete(self, namespace, bucket, uid, secret):
qparms = {'cors': None}
self._set_auth_and_ns_header('DELETE', namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
uri = URI_S3_BUCKET_INSTANCE.format(bucket)
response = self.coreapi('DELETE', uri, qparms = qparms, content_type = CONTENT_TYPE_XML)
if response.status_code != HTTP_NO_CONTENT:
print "failure", response.status_code, response.text
raise Exception('failed to delete CORS configuration')
@resetHeaders
def bucket_cors_put(self, namespace, bucket, rules, uid, secret):
qparms = {'cors': None}
self._set_auth_and_ns_header('PUT', namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
uri = URI_S3_BUCKET_INSTANCE.format(bucket)
parms = self._build_cors_payload(rules)
response = self.coreapi('PUT', uri, parms, qparms, None, CONTENT_TYPE_XML)
if response.status_code != HTTP_OK:
print "failure", response.status_code, response.text
raise Exception('failed to put CORS configuration')
def node_create(self, name):
return self.api('POST', URI_NODEOBJ.format(name))
def security_logout(self):
response = self.__api('GET', URI_LOGOUT)
if (response.status_code != 200):
print "logout failed with code: ", response.status_code
raise Exception('security logout: failed')
def security_add_tenant_role(self, tenant, objecttype, objectname, role):
uri = self.__tenant_id_from_label(tenant)
if( not objecttype in ['subject_id', 'group']):
raise Exception('type must be subject_id or group')
if( not role in ['TENANT_ADMIN','PROJECT_ADMIN', 'TENANT_APPROVER']):
raise Exception('role must be TENANT_ADMIN, PROJECT_ADMIN, or TENANT_APPROVER')
self.security_add_tenant_id_role(uri, objecttype, objectname, role)
def security_remove_tenant_role(self, tenant, objecttype, objectname, role):
uri = self.__tenant_id_from_label(tenant)
if( not objecttype in ['subject_id', 'group']):
raise Exception('type must be subject_id or group')
if( not role in ['TENANT_ADMIN','PROJECT_ADMIN', 'TENANT_APPROVER']):
raise Exception('role must be TENANT_ADMIN, PROJECT_ADMIN, or TENANT_APPROVER')
self.security_remove_tenant_id_role(uri, objecttype, objectname, role)
def security_add_tenant_id_role(self, tenant_id, objecttype, objectname, role):
parms = {
"add" : [ { "role" : [role], objecttype : objectname }]
}
print parms
response = self.__api('PUT', URI_TENANTS_ROLES.format(tenant_id), parms)
if (response.status_code != 200):
print "security assign role failed with code: ", response.status_code
raise Exception('security assign role: failed')
def security_remove_tenant_id_role(self, tenant_id, objecttype, objectname, role):
parms = {
"remove" : [ { "role" : [role], objecttype : objectname }]
}
print parms
response = self.__api('PUT', URI_TENANTS_ROLES.format(tenant_id), parms)
if (response.status_code != 200):
print "security assign role failed with code: ", response.status_code
raise Exception('security assign role: failed')
def _get_s3_key_uri(self, bucket, key, alternateFormat = False):
if(alternateFormat):
return URI_S3_KEY_INSTANCE_ALTERNATE.format(urllib.quote(key))
return URI_S3_KEY_INSTANCE.format(urllib.quote(bucket), urllib.quote(key))
@resetHeaders
def bucket_key_create(self, namespace, bucket, key, value, uid, headers, secret, baseurl = None, bucketNameFormat = 1, namespaceFormat = 1):
value = self.getDataValueFromCli(value)
if headers:
for header_name,header_value in headers.items():
self._headers[header_name] = header_value
if(bucketNameFormat == 2): #set the bucket name in the Host header and not in the path
if(baseurl == None):
raise Exception('Base URL should be specified if the alternate format of URI needs to be used')
host = bucket + '.'
if(namespaceFormat == 2):
host = host + namespace + '.'
else:
self._headers['x-emc-namespace'] = namespace
host = host + baseurl
self._headers['Host'] = host
if value is None:
print "empty object, setting Content-Length to 0"
self._headers['Content-Length'] = str(0)
self._set_auth_and_ns_header('PUT', namespace, bucket, key, uid, secret, CONTENT_TYPE_OCTET)
print self._headers
md5str = self._computeMD5(value)
altUriFmt = False
if(bucketNameFormat == 2):
altUriFmt = True
response = self.coreapi('PUT', self._get_s3_key_uri(bucket, key, altUriFmt), value, None, content_type=CONTENT_TYPE_OCTET)
#TODO: server returns
if (response.status_code != 200 and response.status_code != 204 ):
print "bucket_key_create failed with code: ", response.status_code
raise Exception('failed to create key')
if BOURNE_DEBUG == '1':
print response.headers
self._checkETag(response, md5str)
@resetHeaders
def bucket_key_copy(self, namespace, bucket, key, destbucket, destkey, uid, secret):
self._headers['x-amz-copy-source'] = URI_S3_KEY_INSTANCE.format(urllib.quote_plus(bucket), urllib.quote_plus(key))
self._headers['x-amz-metadata-directive'] = 'COPY'
self._set_auth_and_ns_header('PUT', namespace, destbucket, destkey, uid, secret)
# TODO: rewire scripts to return etag so that we can validate
response = self.coreapi('PUT', self._get_s3_key_uri(destbucket, destkey), None, None, content_type=CONTENT_TYPE_XML)
#TODO: error may be embedded in 200
if (response.status_code != 200 and response.status_code != 204):
print "bucket_key_copy failed with code: ", response.status_code
raise Exception('failed to copy key')
@resetHeaders
def bucket_key_delete(self, namespace, bucket, key, version, uid, secret):
qparms = None
if version is not None:
qparms = {'versionId': version}
self._set_auth_and_ns_header('DELETE', namespace, bucket, key, uid, secret, 'text/plain', qparms)
response = self.coreapi('DELETE', self._get_s3_key_uri(bucket, key),
None, qparms, None, 'text/plain')
if (response.status_code != 204):
print "bucket_key_delete failed with code: ", response.status_code
raise Exception('failed to delete key')
@resetHeaders
def bucket_key_update(self, namespace, bucket, key, value, uid, secret, range):
self._headers['Range'] = range
self._set_auth_and_ns_header('PUT', namespace, bucket, key, uid, secret, CONTENT_TYPE_OCTET)
md5str = self._computeMD5(value)
response = self.coreapi('PUT', self._get_s3_key_uri(bucket, key), value, None, content_type=CONTENT_TYPE_OCTET)
#TODO: server returns
if (response.status_code != 200 and response.status_code != 204 ):
print "bucket_key_update failed with code: ", response.status_code
raise Exception('failed to update/append key')
return response
@resetHeaders
def bucket_key_update_acl(self, namespace, bucket, key, bodyAclValue, cannedAclValue, aclHeaders, uid, secret):
if (cannedAclValue):
self._headers["x-amz-acl"] = cannedAclValue
if (aclHeaders):
for acl in aclHeaders:
pair = acl.split(':')
self._headers[pair[0]] = pair[1]
qparms = {'acl':None}
self._set_auth_and_ns_header('PUT', namespace, bucket, key, uid, secret, CONTENT_TYPE_OCTET, parameters_to_sign = qparms)
if (bodyAclValue):
md5str = self._computeMD5(bodyAclValue)
response = self.coreapi('PUT', self._get_s3_key_uri(bucket, key) + '?acl', bodyAclValue, None, content_type=CONTENT_TYPE_OCTET)
if (response.status_code != 200 and response.status_code != 204 ):
print "bucket_key_update failed with code: ", response.status_code
raise Exception('failed to update ACL')
return response
@resetHeaders
def bucket_key_get_acl(self, namespace, bucket, key, uid, secret):
qparms = {'acl': None}
self._set_auth_and_ns_header('GET', namespace, bucket, key, uid, secret, parameters_to_sign = qparms)
return self.api('GET', self._get_s3_key_uri(bucket, key),
None, qparms, content_type=CONTENT_TYPE_XML)
# build qparms for list
def _build_list_params(self, params, qparms = None):
if qparms is None:
qparms = {}
for (key, value) in params.iteritems():
if value is not None:
qparms[key] = value
return qparms
@resetHeaders
def bucket_key_list(self, namespace, bucket, params, uid, secret):
self._set_auth_and_ns_header('GET', namespace, bucket, None, uid, secret)
qparms = self._build_list_params(params)
return self.api('GET', URI_S3_BUCKET_INSTANCE.format(bucket),
None, qparms, content_type=CONTENT_TYPE_XML)
@resetHeaders
def bucket_key_list_versions(self, namespace, bucket, params, uid, secret):
qparms = {}
qparms['versions'] = None
self._set_auth_and_ns_header('GET', namespace, bucket, None, uid, secret, parameters_to_sign = qparms)
qparms = self._build_list_params(params, qparms)
return self.api('GET', URI_S3_BUCKET_INSTANCE.format(bucket),
None, qparms, content_type=CONTENT_TYPE_XML)
@resetHeaders
def bucket_key_show(self, namespace, bucket, key, version, uid, secret, range=None):
return self._bucket_key_read('GET', namespace, bucket, key, version, uid, secret, range)
@resetHeaders
def bucket_key_head(self, namespace, bucket, key, version, uid, secret):
return self._bucket_key_read('HEAD', namespace, bucket, key, version, uid, secret)
@resetHeaders
def _bucket_key_read(self, method, namespace, bucket, key, version, uid, secret, range=None):
qparms = None
if version is not None:
qparms = {'versionId': version}
if range != None:
self._headers['Range'] = range
self._set_auth_and_ns_header(method, namespace, bucket, key, uid, secret, CONTENT_TYPE_OCTET, parameters_to_sign = qparms )
response = self.__api(method, self._get_s3_key_uri(bucket,key), None, qparms, content_type=CONTENT_TYPE_OCTET)
# check response status
if (response.status_code != 200 and response.status_code != 204 and response.status_code != 206 ):
s = "bucket_key_read (%s) key:%s, bucket:%s, namespace:%s, uid:%s, secret:%s, version:%s failed with code:%d " % \
(method, key, bucket, namespace, uid, secret, version, response.status_code)
print s
print response.headers
print response.content
raise Exception(s)
if method == 'GET':
if BOURNE_DEBUG == '1':
print response.headers
return response.content
else:
return response.headers
@resetHeaders
def bucket_key_options(self, namespace, bucket, key, origin, method, header, uid, secret):
self._set_auth_and_ns_header('OPTIONS', namespace, bucket, key, uid, secret)
self._headers['Origin'] = origin
self._headers['Access-Control-Request-Method'] = method
if header:
self._headers['Access-Control-Request-Headers'] = header
response = self.coreapi('OPTIONS', self._get_s3_key_uri(bucket, key), content_type=CONTENT_TYPE_XML)
if (response.status_code != HTTP_OK):
print "OPTIONS Object failed with status code ", response.status_code
raise Exception('OPTIONS Object failed')
print response.headers
@resetHeaders
def bucket_options(self, namespace, bucket, origin, method, header, uid, secret):
self._set_auth_and_ns_header('OPTIONS', namespace, bucket, None, uid, secret)
self._headers['Origin'] = origin
self._headers['Access-Control-Request-Method'] = method
if header:
self._headers['Access-Control-Request-Headers'] = header
response = self.coreapi('OPTIONS', URI_S3_BUCKET_INSTANCE.format(bucket), content_type=CONTENT_TYPE_XML)
if (response.status_code != HTTP_OK):
print "OPTIONS Bucket failed with status code ", response.status_code
raise Exception('OPTIONS Bucket failed')
print response.headers
# atmos related operations --begin
def atmos_hmac_base64_sig(self, method, content_type, uri, date, secret):
byteRangeStr = ""
custom_headers = {}
for header in self._headers.iterkeys():
if re.match('^x-emc-', header, re.IGNORECASE):
custom_headers[header.lower()] = self._headers[header]
if header == "Range":
byteRangeStr = self._headers[header]
if ('x-emc-signature' in custom_headers):
del custom_headers['x-emc-signature']
msg = method + '\n' + \
content_type + '\n' + \
byteRangeStr + '\n' + \
date + '\n' + \
uri.lower() + '\n'
sorted_headers = custom_headers.keys()
sorted_headers.sort()
for sorted_header in sorted_headers:
msg += sorted_header + ':' + custom_headers[sorted_header] + '\n'
msg = msg.rstrip()
if(BOURNE_DEBUG == '1'):
print 'message to sign:\n' + msg
key = base64.b64decode(secret)
macer = hmac.new(key, msg, hashlib.sha1)
if(BOURNE_DEBUG == '1'):
print "hmac string:"+base64.b64encode(macer.digest())
return base64.b64encode(macer.digest())
def atmos_object_create(self, namespace, value, uid, secret):
uri = ""
if (namespace):
uri = URI_ATMOS_NAMESPACE_INSTANCE.format(namespace)
else:
uri = URI_ATMOS_OBJECTS
method = 'POST'
content_type = CONTENT_TYPE_OCTET
date = email.Utils.formatdate(timeval=None, localtime=False, usegmt=True)
length = str(0)
if value is not None:
length = str(len(value))
self._headers['Content-Length'] = length
self._headers['date'] = date
#_headers['x-emc-date'] = date
self._headers['x-emc-uid'] = uid
self._headers['x-emc-meta'] = 'color=red,city=seattle,key=�'
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig(method, content_type, uri, date, secret)
response = self.coreapi(method, uri, value, None, None, content_type)
#cleanup the global variable
del self._headers['Content-Length']
del self._headers['date']
del self._headers['x-emc-uid']
del self._headers['x-emc-signature']
if (response.status_code != 201):
print "atmos_object_create failed with code: ", response.status_code
if(BOURNE_DEBUG == '1'):
print 'response:\n' + response.content
raise Exception('failed to create object')
location = response.headers['location']
match = re.match(r"/rest/objects/(\w+)", location)
if (not match):
print "The location header doesn't contain a valid object id: ", location
raise Exception('failed to create object')
objectid = match.group(1)
if(BOURNE_DEBUG == '1'):
print 'object id:\n' + objectid
return objectid
def atmos_object_read(self, oid, namespace, uid, secret):
uri = ""
if (namespace):
uri = URI_ATMOS_NAMESPACE_INSTANCE.format(namespace)
elif (oid):
uri = URI_ATMOS_OBJECT_INSTANCE.format(oid)
else:
print "Neither object id or namespace is provided"
raise Exception('failed to read object')
method = 'GET'
content_type = CONTENT_TYPE_OCTET
date = email.Utils.formatdate(timeval=None, localtime=False, usegmt=True)
self._headers['x-emc-date'] = date
self._headers['date'] = date
self._headers['x-emc-uid'] = uid
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig(method, content_type, uri, date, secret)
response = self.coreapi(method, uri, None, None, None, content_type)
#cleanup the global variable
del self._headers['date']
del self._headers['x-emc-date']
del self._headers['x-emc-uid']
del self._headers['x-emc-signature']
if (response.status_code != 200):
print "atmos_object_read failed with code: ", response.status_code
raise Exception('failed to read object')
return response.content
def atmos_object_delete(self, oid, namespace, uid, secret):
uri = ""
if (namespace):
uri = URI_ATMOS_NAMESPACE_INSTANCE.format(namespace)
elif (oid):
uri = URI_ATMOS_OBJECT_INSTANCE.format(oid)
else:
print "Neither object id or namespace is provided"
raise Exception('failed to delete object')
method = 'DELETE'
content_type = CONTENT_TYPE_OCTET
date = email.Utils.formatdate(timeval=None, localtime=False, usegmt=True)
self._headers['x-emc-date'] = date
self._headers['date'] = date
self._headers['x-emc-uid'] = uid
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig(method, content_type, uri, date, secret)
response = self.coreapi(method, uri, None, None, None, content_type)
#cleanup the global variable
del self._headers['date']
del self._headers['x-emc-date']
del self._headers['x-emc-uid']
del self._headers['x-emc-signature']
if (response.status_code != 204):
print "atmos_object_read failed with code: ", response.status_code
raise Exception('failed to delete object')
# atmos related operation --end
def namespace_create(self, tenant, namespace, project, cos, rg, allowed_vpools_list):
myport=self._port
self._port=PORT
tenant_uri = self.__tenant_id_from_label(tenant)
project_uri = None
cos_uri = None
rg_uri = None
if(project != None):
project_uri = self.project_query(project)
self._port=myport
if(cos != None):
cos_uri = self.neighborhood_query(cos)
if (rg != None):
rg_uri = self.repgroup_query(rg)
return self.namespace_createInternal(tenant_uri, namespace, project_uri, cos_uri, rg_uri, allowed_vpools_list)
def namespace_createInternal(self, tenant_uri, namespace, project_uri, cos_uri, rg_uri, allowed_vpools_list):
allowedList =[]
allowedList.append(allowed_vpools_list)
parms = {
'namespace' : namespace,
'tenant' : tenant_uri,
'allowed_vpools_list' : allowedList
}
if ( project_uri != None ):
parms['default_object_project'] = project_uri
if ( cos_uri != None ):
parms['default_data_services_vpool'] = cos_uri
if ( rg_uri != None):
parms['default_data_services_rg'] = rg_uri
response = self.coreapi('POST', URI_NAMESPACE_BASE, parms)
if response.status_code != HTTP_OK:
print "failure:", response.text
raise Exception('failed to create namespace')
return self.__json_decode(response.text)
def namespace_update(self, tenant, namespace, project, repGroup, vpools_added_to_allowed_vpools_list, vpools_added_to_disallowed_vpools_list):
myport=self._port
self._port=PORT
tenant_uri = self.__tenant_id_from_label(tenant)
project_uri = self.project_query(project)
self._port = myport
rg_uri = self.repgroup_query(repGroup)
allowedList = []
allowedList.append(vpools_added_to_allowed_vpools_list)
disAllowedList = []
disAllowedList.append(vpools_added_to_disallowed_vpools_list)
parms = {
'tenant' : tenant_uri,
'default_object_project' : project_uri,
'default_data_services_vpool' : rg_uri,
'vpools_added_to_allowed_vpools_list' : allowedList,
'vpools_added_to_disallowed_vpools_list' : disAllowedList
}
response = self.coreapi('PUT', URI_NAMESPACE_INSTANCE.format(namespace), parms)
if response.status_code != HTTP_OK:
print "failure:", response, response.text
raise Exception('failed to update namespace')
return self.__json_decode(response.text)
def namespace_delete(self, namespace):
response = self.coreapi('POST', URI_RESOURCE_DEACTIVATE.format(URI_NAMESPACE_INSTANCE.format(namespace)))
if response.status_code != HTTP_OK:
print "failure:", response
raise Exception('failed to delete namespace')
def namespace_show(self, namespace):
response = self.coreapi('GET', URI_NAMESPACE_INSTANCE.format(namespace))
if response.status_code != HTTP_OK:
print "failure:", response
raise Exception('failed to get namespace')
return self.__json_decode(response.text)
def namespace_show_tenant(self, tenant):
myport=self._port
self._port=PORT
tenant_uri = self.__tenant_id_from_label(tenant)
self._port=myport
response = self.coreapi('GET', URI_NAMESPACE_TENANT_INSTANCE.format(tenant_uri))
if response.status_code != HTTP_OK:
print "failure:", response
raise Exception('failed to get namespace')
return self.__json_decode(response.text)
def namespace_list(self):
o = self.api('GET', URI_NAMESPACE_COMMON)
namespaces = o['namespace']
ids = []
if (type(namespaces) != list):
namespaces = [namespaces]
for namespace in namespaces:
ids.append(namespace.get('id'))
return ids
def checkAtmosResponse(self, response):
if (response.status_code != 200 and response.status_code != 204 and response.status_code != 201 and response.status_code != 206):
print "failed with code: ", response.status_code
raise Exception('failed operation ')
@resetHeaders
def subtenant_create(self, namespace, project, cos, uid, secret):
#self._headers['x-emc-cos'] = cos
#self._headers['x-emc-project-id'] = project
#self._headers['x-emc-namespace'] = namespace
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = uid
self._headers['x-emc-file-system-access-enabled'] = 'true'
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('PUT', CONTENT_TYPE_OCTET, URI_ATMOS_SUBTENANT_BASE, self._headers['date'], secret)
#response = self.coreapi('PUT', URI_ATMOS_SUBTENANT_BASE, None, None, None, content_type = CONTENT_TYPE_OCTET, port=ATMOS_PORT)
response = self.coreapi('PUT', URI_ATMOS_SUBTENANT_BASE, None, None, None, content_type = CONTENT_TYPE_OCTET)
h = response.headers
if ( h['subtenantID']):
return h['subtenantID']
else:
print "subtenant_create failed with code: ", response.status_code
raise Exception('failed to create subtenant')
def subtenant_delete(self, namespace, subtenant, uid, secret):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = uid
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('DELETE', CONTENT_TYPE_OCTET, URI_ATMOS_SUBTENANT_INSTANCE.format(subtenant), self._headers['date'], secret)
print URI_ATMOS_SUBTENANT_INSTANCE.format(subtenant)
print self._headers['x-emc-signature']
print secret
#response = self.coreapi('DELETE', URI_ATMOS_SUBTENANT_INSTANCE.format(subtenant), None, None, None, content_type = CONTENT_TYPE_OCTET, port=ATMOS_PORT)
response = self.coreapi('DELETE', URI_ATMOS_SUBTENANT_INSTANCE.format(subtenant), content_type = CONTENT_TYPE_OCTET)
if (response.status_code != 200 and response.status_code != 204):
print "subtenant_delete failed with code: ", response.status_code
raise Exception('failed to delete subtenant' + subtenant)
# value starting with @ char is a file, e.g. @/etc/hosts
def getDataValueFromCli(self, value):
if value and value.find('@') == 0:
with open(value[1:], "rb") as f:
f.seek(0)
value = f.read()
return value
def _addChecksum(self, value, epoch):
# returns value in format <len, data, checksum>
size = len(value)
size = struct.pack('>i', size)
checksum = zlib.crc32(size + value) & 0xffffffff
checksum = struct.pack('>q', checksum)
epoch = struct.pack('>36s', epoch)
return size + value + checksum + epoch
@resetHeaders
def atmos_key_create(self, namespace, project, subtenant, keypath, value, uid, secret, useracl=None, groupacl=None):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = subtenant + '/' + uid
self._headers['x-emc-meta'] = 'color=red,city=seattle'
self._headers['x-emc-listable-meta'] = 'country=usa'
if (useracl):
self._headers['x-emc-useracl'] = useracl
if (groupacl):
self._headers['x-emc-groupacl'] = groupacl
if (keypath != ''):
uri = URI_ATMOS_NAMESPACE_PATH.format(keypath)
else :
uri = URI_ATMOS_OBJECTS
value = self.getDataValueFromCli(value)
content_type = 'application/octet-stream'
print self._headers
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('POST', content_type, uri, self._headers['date'], secret)
response = self.coreapi('POST', uri, value, content_type = CONTENT_TYPE_OCTET)
self.checkAtmosResponse(response)
return response
@resetHeaders
def atmos_key_update(self, namespace, project, subtenant, keypath, value, uid, secret, byteRange=None, useracl=None, groupacl=None):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = subtenant + '/' + uid
self._headers['x-emc-meta'] = 'color=red,city=seattle'
self._headers['x-emc-listable-meta'] = 'country=usa'
if (useracl):
self._headers['x-emc-useracl'] = useracl
if (groupacl):
self._headers['x-emc-groupacl'] = groupacl
if (keypath != ''):
uri = URI_ATMOS_NAMESPACE_PATH.format(keypath)
else :
uri = URI_ATMOS_OBJECTS
if byteRange != None:
self._headers["Range"] = byteRange
value = self.getDataValueFromCli(value)
content_type = 'application/octet-stream'
print self._headers
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('PUT', content_type, uri, self._headers['date'], secret)
response = self.coreapi('PUT', uri, value, content_type = CONTENT_TYPE_OCTET)
self.checkAtmosResponse(response)
return response
@resetHeaders
def atmos_key_delete(self, namespace, project, subtenant, keypath, apiType, uid, secret):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = subtenant + '/' + uid
if (apiType == 'N'):
uri = URI_ATMOS_NAMESPACE_PATH.format(keypath)
else :
uri = URI_ATMOS_OBJECTS_OID.format(keypath)
print uri
content_type = 'application/octet-stream'
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('DELETE', content_type, uri, self._headers['date'], secret)
response = self.coreapi('DELETE', uri, None, content_type = CONTENT_TYPE_OCTET)
self.checkAtmosResponse(response)
return response
@resetHeaders
def atmos_key_show(self, namespace, project, subtenant, keypath, apiType, uid, secret, byteRange=None, includeMd=False):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = subtenant + '/' + uid
if byteRange != None:
self._headers["Range"] = byteRange
if includeMd:
self._headers["x-emc-include-meta"] = "1"
if (apiType == 'N'):
uri = URI_ATMOS_NAMESPACE_PATH.format(keypath)
else :
uri = URI_ATMOS_OBJECTS_OID.format(keypath)
content_type = '*/*'
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('GET', content_type, uri, self._headers['date'], secret)
response = self.coreapi('GET', uri, None, content_type = '*/*')
self.checkAtmosResponse(response)
return response
@resetHeaders
def atmos_key_rename(self, namespace, project, subtenant, keypath, target, apiType, uid, secret):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = subtenant + '/' + uid
self._headers['x-emc-path'] = target
if (apiType == 'N'):
uri = URI_ATMOS_NAMESPACE_PATH.format(keypath)
else :
print 'no support for rename using object api'
return
print uri
content_type = '*/*'
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('POST', content_type, uri+'?rename', self._headers['date'], secret)
response = self.coreapi('POST', uri, qparms = {'rename':None}, content_type = '*/*')
self.checkAtmosResponse(response)
return response
@resetHeaders
def atmos_key_getinfo(self, namespace, project, subtenant, keypath, apiType, uid, secret):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = subtenant + '/' + uid
if (apiType == 'N'):
uri = URI_ATMOS_NAMESPACE_PATH.format(keypath)
else :
uri = URI_ATMOS_OBJECTS_OID.format(keypath)
print uri
content_type = '*/*'
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('GET', content_type, uri+'?info', self._headers['date'], secret)
response = self.coreapi('GET', uri, qparms = {'info':None}, content_type = '*/*')
self.checkAtmosResponse(response)
return response
@resetHeaders
def atmos_listtags(self, namespace, project, subtenant, apiType, uid, secret):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = subtenant + '/' + uid
if (apiType == 'N'):
uri = URI_ATMOS_NAMESPACE
else :
uri = URI_ATMOS_OBJECTS
print uri
content_type = '*/*'
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('GET', content_type, uri+'?listabletags', self._headers['date'], secret)
response = self.coreapi('GET', uri, qparms = {'listabletags':None}, content_type = '*/*')
self.checkAtmosResponse(response)
return response
@resetHeaders
def atmos_listobjectsbytags(self, namespace, project, subtenant, tag, uid, secret):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = subtenant + '/' + uid
if (tag != ''):
self._headers['x-emc-tags'] = tag
content_type = '*/*'
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('GET', content_type, URI_ATMOS_OBJECTS, self._headers['date'], secret)
response = self.coreapi('GET', URI_ATMOS_OBJECTS, None, content_type = '*/*')
self.checkAtmosResponse(response)
return response
@resetHeaders
def atmos_key_getumd(self, namespace, project, subtenant, keypath, umdkey, apiType, uid, secret):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = subtenant + '/' + uid
self._headers['x-emc-tags'] = umdkey
if (apiType == 'N'):
uri = URI_ATMOS_NAMESPACE_PATH.format(keypath)
else :
uri = URI_ATMOS_OBJECTS_OID.format(keypath)
print uri
content_type = '*/*'
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('GET', content_type, uri+'?metadata/user', self._headers['date'], secret)
response = self.coreapi('GET', uri, qparms = {'metadata/user':None}, content_type = '*/*')
self.checkAtmosResponse(response)
return response
@resetHeaders
def atmos_key_setumd(self, namespace, project, subtenant, keypath, umd, tagType, apiType, uid, secret):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = subtenant + '/' + uid
if (tagType == 'Y'):
self._headers['x-emc-listable-meta'] = umd
else :
self._headers['x-emc-meta'] = umd
if (apiType == 'N'):
uri = URI_ATMOS_NAMESPACE_PATH.format(keypath)
else :
uri = URI_ATMOS_OBJECTS_OID.format(keypath)
print uri
content_type = '*/*'
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('POST', content_type, uri+'?metadata/user', self._headers['date'], secret)
response = self.coreapi('POST', uri, qparms = {'metadata/user':None}, content_type = '*/*')
self.checkAtmosResponse(response)
return response
@resetHeaders
def atmos_key_getacl(self, namespace, project, subtenant, keypath, apiType, uid, secret):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = subtenant + '/' + uid
if (apiType == 'N'):
uri = URI_ATMOS_NAMESPACE_PATH.format(keypath)
else :
uri = URI_ATMOS_OBJECTS_OID.format(keypath)
print uri
content_type = '*/*'
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('GET', content_type, uri+'?acl', self._headers['date'], secret)
response = self.coreapi('GET', uri, qparms = {'acl':None}, content_type = '*/*')
self.checkAtmosResponse(response)
return response
@resetHeaders
def atmos_key_setacl(self, namespace, project, subtenant, keypath, apiType, useracl, groupacl, uid, secret):
self._headers['date'] = formatdate()
self._headers['x-emc-uid'] = subtenant + '/' + uid
if (useracl):
self._headers['x-emc-useracl'] = useracl
if (groupacl):
self._headers['x-emc-groupacl'] = groupacl
if (apiType == 'N'):
uri = URI_ATMOS_NAMESPACE_PATH.format(keypath)
else :
uri = URI_ATMOS_OBJECTS_OID.format(keypath)
print uri
content_type = '*/*'
self._headers['x-emc-signature'] = self.atmos_hmac_base64_sig('POST', content_type, uri+'?acl', self._headers['date'], secret)
response = self.coreapi('POST', uri, qparms = {'acl':None}, content_type = '*/*')
self.checkAtmosResponse(response)
return response
def security_add_zone_role(self, objecttype, objectname, role):
if( not objecttype in ['subject_id', 'group']):
raise Exception('type must be subject_id or group')
if( not role in ['SYSTEM_MONITOR','SYSTEM_AUDITOR','SYSTEM_ADMIN','SECURITY_ADMIN','TENANT_ADMIN',]):
raise Exception('role must be SYSTEM_MONITOR, SYSTEM_AUDITOR, SYSTEM_ADMIN, SECURITY_ADMIN, or TENANT_ADMIN')
parms = {
"add" : [ { "role" : [role], objecttype : objectname }]
}
print parms
response = self.__api('PUT', URI_VDC_ROLES, parms)
if (response.status_code != 200):
print "security assign role failed with code: ", response.status_code
raise Exception('security assign role: failed')
def _mpu_parse_init_response(self, payload):
root = ET.fromstring(payload)
inittag = '{' + S3_XML_NS + '}InitiateMultipartUploadResult'
buckettag = '{' + S3_XML_NS + '}Bucket'
keytag = '{' + S3_XML_NS + '}Key'
uploadidtag = '{' + S3_XML_NS + '}UploadId'
if root.tag != inittag:
print "invalid response payload", payload
raise Exception('Invalid response, no InitiateMultipartUploadResult')
bucket = root.find(buckettag).text
key = root.find(keytag).text
uploadid = root.find(uploadidtag).text
return {"bucket" : bucket, "key" : key, "uploadId" : uploadid}
@resetHeaders
def bucket_initiate_mpu(self, namespace, bucket, key, uid, secret):
qparms = {'uploads':None}
self._set_auth_and_ns_header('POST', namespace, bucket, key, uid, secret, parameters_to_sign = qparms)
uri = self._get_s3_key_uri(bucket, key)
response = self.coreapi('POST', uri, None, qparms , content_type=CONTENT_TYPE_XML)
if response.status_code != HTTP_OK:
print "failure", response
raise Exception('failed to initiate mpu!')
return self._mpu_parse_init_response(response.text)
@resetHeaders
def bucket_upload_mpu(self, namespace, bucket, key, uid, secret, uploadId, partNum):
qparms = {'uploadId':uploadId, 'partNumber':str(partNum)}
self._set_auth_and_ns_header('PUT', namespace, bucket, key, uid, secret, CONTENT_TYPE_OCTET, parameters_to_sign = qparms)
uri = self._get_s3_key_uri(bucket, key)
value = str(uuid.uuid4())
for i in range(100):
value = value + str(uuid.uuid4())
md5str = self._computeMD5(value)
response = self.coreapi('PUT', uri, value, qparms, content_type=CONTENT_TYPE_OCTET)
if response.status_code != HTTP_OK:
print "failure", response
raise Exception('failed to upload a part for mpu!')
self._checkETag(response, md5str)
return response.headers['ETag']
@resetHeaders
def bucket_copy_part(self, namespace, bucket, key, srcbucket, srckey, uid, secret,
uploadId, partNum):
qparms = {'uploadId':uploadId, 'partNumber':str(partNum)}
self._headers['x-amz-copy-source'] = URI_S3_KEY_INSTANCE.format(urllib.quote_plus(srcbucket), urllib.quote_plus(srckey))
self._set_auth_and_ns_header('PUT', namespace, bucket, key, uid, secret, CONTENT_TYPE_OCTET, parameters_to_sign = qparms)
uri = self._get_s3_key_uri(bucket, key)
response = self.coreapi('PUT', uri, None, qparms, content_type=CONTENT_TYPE_OCTET)
if response.status_code != HTTP_OK:
print "failure", response
raise Exception('failed to upload a part for mpu!')
# parse and return etag from response
print "got response: %s" % response.text
tree = ET.fromstring(response.text)
etag = tree.findtext('./{' + S3_XML_NS + '}ETag')
return etag
def _build_complete_mpu_payload(self, etagdict):
root = ET.Element('CompleteMultipartUpload')
root.set('xmlns', S3_XML_NS)
# Note, the part list should be in ascending order
sorted_keys = etagdict.keys()
for key in sorted_keys:
partElem = ET.SubElement(root, 'Part')
ET.SubElement(partElem, 'PartNumber').text = str(key)
ET.SubElement(partElem, 'ETag').text = etagdict[key]
return ET.tostring(root)
def _parse_complete_mpu_response(self, response):
version = None
if 'x-amz-version-id' in response.headers:
version = response.headers['x-amz-version-id']
payload = response.text
root = ET.fromstring(payload)
completetag = '{' + S3_XML_NS + '}CompleteMultipartUploadResult'
uritag = '{' + S3_XML_NS + '}Location'
buckettag = '{' + S3_XML_NS + '}Bucket'
keytag = '{' + S3_XML_NS + '}Key'
etagtag = '{' + S3_XML_NS + '}ETag'
if root.tag != completetag:
print "invalid response", response
raise Exception('Invalid response, no CompleteMultipartUploadResult')
bucket = root.find(buckettag).text
key = root.find(keytag).text
uri = root.find(uritag).text
etag = root.find(etagtag).text
return {'version':version, 'etag':etag, 'uri':uri, 'key':key, 'bucket':bucket}
def _parse_list_mpu_parts_response(self, payload):
result = {}
root = ET.fromstring(payload)
listtag = '{' + S3_XML_NS + '}ListPartsResult'
buckettag = '{' + S3_XML_NS + '}Bucket'
keytag = '{' + S3_XML_NS + '}Key'
if root.tag != listtag:
print "invalid response payload", payload
raise Exception('Invalid response, no ListPartsResult')
result['bucket'] = root.find(buckettag).text
result['key'] = root.find(keytag).text
initiatortag = '{' + S3_XML_NS + '}Initiator'
idtag = '{' + S3_XML_NS + '}ID'
nametag = '{' + S3_XML_NS + '}DisplayName'
ownertag= '{' + S3_XML_NS + '}Owner'
initiator = root.find(initiatortag)
print "debug initiator = ",initiator
result['initiator'] = {'id':initiator.find(idtag).text, 'name':initiator.find(nametag).text}
owner = root.find(ownertag)
result['owner'] = {'id':owner.find(idtag).text, 'name':owner.find(nametag).text}
maxtag = '{' + S3_XML_NS + '}MaxParts'
markertag = '{' + S3_XML_NS + '}PartNumberMarker'
nexttag = '{' + S3_XML_NS + '}NextPartNumberMarker'
trunctag = '{' + S3_XML_NS + '}IsTruncated'
result['maxparts'] = root.find(maxtag).text
if None != root.find(markertag):
result['marker'] = root.find(markertag).text
result['truncated'] = root.find(trunctag).text
if None != root.find(nexttag):
result['nextmarker'] = root.find(nexttag).text
parttag = '{' + S3_XML_NS + '}Part'
etagtag = '{' + S3_XML_NS + '}ETag'
sizetag = '{' + S3_XML_NS + '}Size'
mtimetag = '{' + S3_XML_NS + '}LastModified'
partnumtag = '{' + S3_XML_NS + '}PartNumber'
index = 1
parts = []
for part in root.findall(parttag):
partdict = {}
partdict['num'] = part.find(partnumtag).text
partdict['etag'] = part.find(etagtag).text
partdict['mtime'] = part.find(mtimetag).text
partdict['size'] = part.find(sizetag).text
parts.append(partdict)
result['parts'] = parts
return result
def _parse_list_mpu_uploads_response(self, payload):
result = {}
root = ET.fromstring(payload)
list_tag = '{' + S3_XML_NS + '}ListMultipartUploadsResult'
bucket_tag = '{' + S3_XML_NS + '}Bucket'
keymarker_tag = '{' + S3_XML_NS + '}KeyMarker'
uploadidmarker_tag = '{' + S3_XML_NS + '}UploadIdMarker'
nextkeymarker_tag = '{' + S3_XML_NS + '}NextKeyMarker'
nextuploadidmarker_tag = '{' + S3_XML_NS + '}NextUploadIdMarker'
maxuploads_tag = '{' + S3_XML_NS + '}MaxUploads'
delimiter_tag = '{' + S3_XML_NS + '}Delimiter'
prefix_tag = '{' + S3_XML_NS + '}Prefix'
commonprefixes_tag = '{' + S3_XML_NS + '}CommonPrefixes'
istruncated_tag = '{' + S3_XML_NS + '}IsTruncated'
upload_tag = '{' + S3_XML_NS + '}Upload'
if root.tag != list_tag:
print "invalid response payload", payload
raise Exception('Invalid response, no ListMultipartUploadsResult')
result['bucket'] = root.find(bucket_tag).text
if None != root.find(keymarker_tag):
result['keymarker'] = root.find(keymarker_tag).text
if None != root.find(uploadidmarker_tag):
result['uploadidmarker'] = root.find(uploadidmarker_tag).text
if None != root.find(nextkeymarker_tag):
result['nextkeymarker'] = root.find(nextkeymarker_tag).text
if None != root.find(nextuploadidmarker_tag):
result['nextuploadidmarker'] = root.find(nextuploadidmarker_tag).text
if None != root.find(maxuploads_tag):
result['maxuploads'] = root.find(maxuploads_tag).text
if None != root.find(delimiter_tag):
result['delimiter'] = root.find(delimiter_tag).text
if None != root.find(prefix_tag):
result['prefix'] = root.find(prefix_tag).text
if None != root.find(istruncated_tag):
result['istruncated'] = root.find(istruncated_tag).text
uploads = []
for upload in root.findall(upload_tag):
uploaddict = {}
key_tag = '{' + S3_XML_NS + '}Key'
uploadid_tag = '{' + S3_XML_NS + '}UploadId'
initiator_tag = '{' + S3_XML_NS + '}Initiator'
id_tag = '{' + S3_XML_NS + '}ID'
name_tag = '{' + S3_XML_NS + '}DisplayName'
owner_tag= '{' + S3_XML_NS + '}Owner'
initated_tag = '{' + S3_XML_NS + '}Initiated'
initiator = root.find(initiator_tag)
if None != initiator:
uploaddict['initiator'] = {'id':initiator.find(id_tag).text, 'name':initiator.find(name_tag).text}
owner = root.find(owner_tag)
if None != owner:
uploaddict['owner'] = {'id':owner.find(id_tag).text, 'name':owner.find(name_tag).text}
uploaddict['key'] = upload.find(key_tag).text
uploaddict['uploadid'] = upload.find(uploadid_tag).text
uploads.append(uploaddict)
result['uploads'] = uploads
commonPrefixes = []
for prefix in root.findall(commonprefixes_tag):
commonPrefixes.append({'prefix':prefix.find(prefix_tag).text})
result['commonPrefixes'] = commonPrefixes
return result
@resetHeaders
def bucket_complete_mpu(self, namespace, bucket, key, uid, secret, uploadId, etagdict):
qparms = {'uploadId':uploadId}
self._set_auth_and_ns_header('POST', namespace, bucket, key, uid, secret, CONTENT_TYPE_XML, parameters_to_sign = qparms)
uri = self._get_s3_key_uri(bucket, key)
parms = self._build_complete_mpu_payload(etagdict)
response = self.coreapi('POST', uri, parms, qparms, content_type=CONTENT_TYPE_XML)
if response.status_code != HTTP_OK:
print "failure", response
raise Exception('failed to complete mpu!')
return self._parse_complete_mpu_response(response)
@resetHeaders
def bucket_abort_mpu(self, namespace, bucket, key, uid, secret, uploadId):
qparms = {'uploadId':uploadId}
self._set_auth_and_ns_header('DELETE', namespace, bucket, key, uid, secret, CONTENT_TYPE_XML, parameters_to_sign = qparms)
uri = self._get_s3_key_uri(bucket, key)
response = self.coreapi('DELETE', uri, None, qparms, content_type=CONTENT_TYPE_XML)
if response.status_code != HTTP_OK and response.status_code != HTTP_NO_CONTENT:
print "failure", response
raise Exception('failed to abort mpu!')
return response
@resetHeaders
def bucket_list_mpu_parts(self, namespace, bucket, key, uid, secret, uploadId, maxParts, partNumMarker):
qparms = {'uploadId':uploadId}
parameters_to_sign = {'uploadId':uploadId}
if None != maxParts:
qparms['max-parts'] = maxParts
if None != partNumMarker:
qparms['part-number-marker'] = partNumMarker
self._set_auth_and_ns_header('GET', namespace, bucket, key, uid, secret, CONTENT_TYPE_XML, parameters_to_sign)
uri = self._get_s3_key_uri(bucket, key)
response = self.coreapi('GET', uri, None, qparms, content_type=CONTENT_TYPE_XML)
if response.status_code != HTTP_OK:
print "failure", response
raise Exception('failed to list mpu parts!')
return self._parse_list_mpu_parts_response(response.text)
@resetHeaders
def bucket_list_mpu_uploads(self, namespace, bucket, uid, secret, maxUploads, keyMarker, uploadIdMarker, delimiter, prefix):
parameters_to_sign = {'uploads':None}
qparms = {'uploads':None}
if keyMarker != None:
qparms['key-marker'] = keyMarker
if uploadIdMarker != None:
qparms['upload-id-marker'] = uploadIdMarker
if maxUploads != None:
qparms['max-uploads'] = maxUploads
if delimiter != None:
qparms['delimiter'] = delimiter
if prefix != None:
qparms['prefix'] = prefix
self._set_auth_and_ns_header('GET', namespace, bucket, None, uid, secret, CONTENT_TYPE_XML, parameters_to_sign)
uri = URI_S3_BUCKET_INSTANCE.format(bucket)
response = self.coreapi('GET', uri, None, qparms, content_type=CONTENT_TYPE_XML)
if response.status_code != HTTP_OK:
print "failure", response
raise Exception('failed to list mpu uploads!')
return self._parse_list_mpu_uploads_response(response.text)
def objtz_list(self):
return self.api('GET', URI_OBJECTTZ)
def objtz_show(self, uri):
return self.api('GET', URI_OBJECTTZ_INSTANCE.format(uri))
def objtz_create(self, name, tz):
parms = dict()
if (name):
parms['name'] = name
if (tz):
parms['network'] = tz
return self.api('POST', URI_OBJECTTZ, parms)
def objtz_update(self, objtz, tz):
parms = dict()
if (tz):
parms['network'] = tz
return self.api('PUT', URI_OBJECTTZ_INSTANCE.format(objtz), parms)
def objtz_delete(self, objtz):
return self.api('POST', URI_OBJECTTZ_DELETE.format(objtz))
def passwordgroup_create(self, uid, password, groups, namespace):
parms = dict()
if password:
parms['password'] = password
if groups:
parms['groups_list'] = groups
if namespace:
parms['namespace'] = namespace
response = self.__api('PUT', URI_PASSWORDGROUP.format(uid), parms)
if response.status_code != HTTP_OK:
print "failure:", response
raise Exception("failed to update password/groups")
def passwordgroup_update(self, uid, password, groups):
parms = dict()
if password:
parms['password'] = password
if groups:
parms['groups_list'] = groups
response = self.__api('POST', URI_PASSWORDGROUP.format(uid), parms)
if response.status_code != HTTP_OK:
print "failure:", response
raise Exception("failed to update password/groups")
def passwordgroup_listgroup(self, uid):
response = self.__api('GET', URI_PASSWORDGROUP.format(uid))
if response.status_code == HTTP_OK:
content = None
try:
content = self.__json_decode(response.text)
except:
content = response.text
return content.get('groups_list')
elif response.status_code == HTTP_NOT_FOUND:
return None
else:
print "failure:", response
raise Exception("failed to list user groups")
def passwordgroup_remove(self, uid):
response = self.__api('POST', URI_PASSWORDGROUP_DEACTIVATE.format(uid))
if response.status_code != HTTP_NO_CONTENT:
print "failure:", response
raise Exception("failed to remove user record")
def secret_create_key_user(self, user, expiryForExistingKey):
uriToUse = URI_SECRET_KEY_USER.format(user)
parms = {
'existing_key_expiry_time_mins' : expiryForExistingKey
}
return self.api('POST', uriToUse, parms)
def secret_delete_key_user(self, user, secretKeyToDelete):
uriToUse = URI_DELETE_SECRET_KEY_USER.format(user)
parms = {
'secret_key': secretKeyToDelete
}
response = self.__api('POST', uriToUse, parms)
return response
def secret_show_key_user(self, user):
uriToUse = URI_SECRET_KEY_USER.format(user)
return self.api('GET', uriToUse)
def secret_create_key(self, expiryForExistingKey):
parms = {
'existing_key_expiry_time_mins' : expiryForExistingKey
}
return self.api('POST', URI_SECRET_KEY, parms)
def secret_delete_key(self, secretKeyToDelete):
response = None
parms = {
'secret_key': secretKeyToDelete
}
response = self.__api('POST', URI_SECRET_KEY + '/deactivate', parms)
if response.status_code != HTTP_OK:
print "failure:", response
raise Exception('failed to delete user secret key')
return response
def secret_show_key(self):
return self.api('GET', URI_SECRET_KEY)
def add_webstorage_user(self, uid, namespace):
parms = {
'user': uid,
'namespace': namespace
}
response = self.api('POST', URI_WEBSTORAGE_USER, parms)
return response
def remove_webstorage_user(self, uid):
parms = {
'user': uid
}
print "calling delete user api with uid: ", uid
response = self.__api('POST', URI_WEBSTORAGE_USER_DEACTIVATE, parms)
def list_webstorage_user(self):
o = self.api('GET', URI_WEBSTORAGE_USER)
users = o['users_list']
ids = []
if (not o):
return ()
else:
if (type(users) != list):
users = [users]
for user in users:
ids.append(user)
return ids
def verify_user(self, expected_user):
response = self.api('GET', URI_WHOAMI)
user = response['common_name']
if(expected_user.lower() != user):
raise Exception(user + " logged in but " + expected_user + " was expected")
def set_object_props(self, properties):
params = self.to_object_props_params(properties)
response = self.api_check_success('PUT', URI_OBJECT_PROPERTIES, params, None, CONTENT_TYPE_JSON, CONTENT_TYPE_JSON)
return response
def to_object_props_params(self, props):
params = dict()
#properties = dict()
params['properties'] = props
return params
#params['properties'] = properties
#properties['entry'] = []
#for key, value in props.iteritems():
# entry = dict()
# entry['key'] = key
# entry['value'] = value
# properties['entry'].append(entry)
#return params
def get_object_props(self):
response = self.api_check_success('GET', URI_OBJECT_PROPERTIES)
return self.from_object_props_params(response)
def from_object_props_params(self, params):
return params['properties']
# props = dict()
# entry = params['properties']['entry']
# if(isinstance(entry, dict)):
# key = entry['key']
# value = entry['value']
# props[key] = value
# else:
# for entry in params['properties']['entry']:
# print type(entry)
# #print key
# #print value
# key = entry['key']
# value = entry['value']
# props[key] = value
# return props
def verify_user_roles(self, roles):
response = self.api('GET', URI_WHOAMI)
rolesRead = response['vdc_roles']
rolesRead.extend(response['home_tenant_roles'])
for role in roles.split(","):
if (role not in rolesRead):
raise Exception(role + " not found in list for this user, roles read " + str(rolesRead))
print "Roles verified"
def kickstart_get(self):
response = self.__api('GET', URI_KICKSTART)
if (response.status_code != 200):
print "Connect to kickstart failed", response.status_code
raise Exception('Connect to kickstart failed')
def test_proxy_token(self):
if SKIP_SECURITY == '1':
return
# as root
response = self.__api('GET', URI_PROXY_TOKEN)
if (response.status_code != 200):
print "Could not get proxy token", response.status_code
raise Exception('Get proxy token failed')
h = response.headers
proxytoken = h.get(SEC_PROXYTOKEN_HEADER, "")
print 'proxy token: ', proxytoken
if (SEC_PROXYTOKEN_HEADER in _headers):
del _headers[SEC_PROXYTOKEN_HEADER]
# as a non tenant_admin user
if (SEC_AUTHTOKEN_HEADER in _headers):
del _headers[SEC_AUTHTOKEN_HEADER]
self.login(PROXY_USER_NAME, PROXY_USER_PASSWORD)
id = self.tenant_getid()
response2 = self.__api('GET', URI_TENANTS.format(id))
if (response2.status_code != 403):
print "Get tenant/id should have failed for this user, but got this code instead: ", response2.status_code
raise Exception('Negative test for tenant/id failed.')
# add proxy token
self._headers[SEC_PROXYTOKEN_HEADER] = proxytoken
response3 = self.__api('GET', URI_TENANTS.format(id))
if (response3.status_code != 200):
print "Get tenant/id should have succeeded with proxy token but did not: ", response3.status_code
raise Exception('Proxy token test failed.')
self._headers[SEC_PROXYTOKEN_HEADER] = ""
print 'Proxy token test completed.'
def test_password_change(self, user, password):
if SKIP_SECURITY == '1':
return
parms = { 'password' : 'ChangeMe1!' }
response = self.__api('PUT', URI_MY_PASSWORD_CHANGE, parms)
if (response.status_code != 400):
print "Did not get 400 when trying to change the password of the currently logged in user with the same value", response.status_code
raise Exception('Password change for the logged in user with the same value failed to return an error')
# Test another user password change
parms = {
'username' : 'svcuser',
'password' : 'ChangeMe'
}
response = self.__api('PUT', URI_USER_PASSWORD_CHANGE, parms)
if (response.status_code != 400):
print "Did not get 400 when trying to change the password of the provided user with the same value", response.status_code
raise Exception('Password change for the provided user with the same value failed to return an error')
def audit_query(self, timeslot, language):
print 'Querying audit logs (timeslot: ' + timeslot + ' language: ' + language + ') ...';
response = self.__api('GET', URI_AUDIT_QUERY.format(timeslot, language))
if (response.status_code != 200):
print "Query audit logs failed", response.status_code
raise Exception('Query audit logs failed')
print 'Query audit logs succeed. Result: ' + response.text
def test_formlogin(self, user, password):
if SKIP_SECURITY == '1':
return
scheme = 'https://'
ipaddr = self._ipaddr
port=PORT
cookiejar = cookielib.LWPCookieJar()
if USE_SSL == '0':
scheme = 'http://'
ipaddr = ipaddr
port = '8080'
if(port == APISVC_PORT):
authsvcPort = '7443'
elif(port == LB_API_PORT):
authsvcPort = LB_API_PORT
login_response = requests.get(scheme+ipaddr+':'+port+'/tenant?using-formlogin',
headers=_headers, verify=False, cookies=cookiejar, allow_redirects=False)
if(not login_response.status_code == SEC_REDIRECT):
raise Exception('The first response to /tenant?using-formlogin is not redirect (302)')
location = login_response.headers['Location']
if(not location):
raise Exception('The redirect location of the authentication service is not provided')
# Make the second request. Should get 200 with form login page
login_response = requests.get(location, headers=_headers, verify=False,
cookies=cookiejar, allow_redirects=False)
if(login_response.status_code != requests.codes['ok']):
raise Exception('Failed to get custom login page. Failure code: '
+ str(login_response.status_code) + ' Error: ' + login_response.text)
# Get the formlogin page
FORM_LOGIN_URI = scheme+ipaddr+':' + '4443' + '/formlogin?using-formlogin'
login_response = requests.get(FORM_LOGIN_URI, verify=False, allow_redirects=False, cookies=cookiejar)
if(login_response.status_code != requests.codes['ok']):
raise Exception('Failed to get custom login page. Failure code: '
+ str(login_response.status_code) + ' Error: ' + str(login_response.text) )
# Check /formlogin
login_response = requests.get(scheme+ipaddr+':'+authsvcPort+'/formlogin', verify=False, allow_redirects=False)
if(login_response.status_code != 200):
raise Exception('The request /formlogin should return 200, but is ' + str(login_response.status_code))
# Check the formlogin with some service but without the signature.
# The returned page should be the form login page
FORM_LOGIN_URI_NO_SIGNATURE=FORM_LOGIN_URI + '&service=https://www.fake.com:1234/someservice'
login_response = requests.get(FORM_LOGIN_URI_NO_SIGNATURE, verify=False, allow_redirects=False, cookies=cookiejar)
if(login_response.status_code != 200):
raise Exception('ERROR: The server failed to return 200 for service reguest to /formlogin. The return code is: '
+ str(login_response.status_code))
theContent=login_response.content
match = re.search('action=".(.+?)"', theContent)
if match:
foundService = match.group(1)
if (not match):
raise Exception('ERROR: The server failed to return the service URI in the form login. The return code is: '
+ str(login_response.status_code))
POST_LOGIN_URI=scheme+ipaddr+':' + '4443' + foundService
parms = {
'username': 'root',
'password': 'ChangeMe1!'
}
newHeaders=_headers
newHeaders["Content-Type"] = "application/x-www-form-urlencoded"
response = requests.post(POST_LOGIN_URI, headers=newHeaders, data=parms, verify=False, allow_redirects=False, cookies=cookiejar)
print str(POST_LOGIN_URI)
if(response.status_code != 302):
raise Exception('ERROR: The server failed to return 302. The return code is: '
+ str(response.status_code))
# Check that the Location does not have the fake URL
location = response.headers['Location']
locationText = str(location)
fakeURLFound = locationText.find('www.fake.com') != -1
if (fakeURLFound):
raise Exception('Failed to get redirected to the original request site.')
def test_vulnerability(self, user, password):
if SKIP_SECURITY == '1':
return
scheme = 'https://'
ipaddr = self._ipaddr
port=PORT
cookiejar = cookielib.LWPCookieJar()
# cookiejar_new will contain authentication token from /login request
cookiejar_new = cookielib.LWPCookieJar()
if USE_SSL == '0':
scheme = 'http://'
ipaddr = ipaddr
port = '8080'
if(port == APISVC_PORT):
authsvcPort = '7443'
elif(port == LB_API_PORT):
authsvcPort = LB_API_PORT
# Variables
FAKE_URL='https://www.fake.com'
FAKE_URL_PATTERN='www.fake.com'
FORM_LOGIN_FAKE_URL=scheme+ipaddr+':'+port+'/formlogin?using-formlogin=true&service='+FAKE_URL+'/tenant';
LOGIN_FAKE_URL=scheme+ipaddr+':'+port+'/login?using-cookies=true&service='+FAKE_URL+'/tenant';
# Test the /login URL
login_response = requests.get(LOGIN_FAKE_URL, headers=_headers,
auth=(user,password), verify=False, cookies=cookiejar, allow_redirects=False)
if(login_response.status_code != SEC_REDIRECT):
print "test_vulnerability:login_response.status_code=" + str(login_response.status_code)
raise Exception('The response to GET request to the auth service /login with proper credentials is not redirect (302)')
location = login_response.headers['Location']
if(not location):
raise Exception('The redirect location of the GET request to the auth service /login is not provided')
for cookie in login_response.cookies:
cookiejar_new.set_cookie(cookie)
# The location should not contain fake URL
locationText = str(location)
fakeURLFound = locationText.find(FAKE_URL_PATTERN) != -1
if (fakeURLFound):
raise Exception('GET /login with proper user credentials failed to get redirected to the request site instead of the fake site.')
# GET /formlogin. It should return formlogin page, because we did not provide a token
# but provided only user credentials. See CTRL-1830
login_response = requests.get(FORM_LOGIN_FAKE_URL, headers=_headers,
auth=(user,password), verify=False, cookies=cookiejar, allow_redirects=False)
if(login_response.status_code != requests.codes['ok']):
print "test_vulnerability:login_response.status_code=" + str(login_response.status_code)
raise Exception('The response to the GET request to the auth service /formlogin with proper credentials and without token is not OK')
formlogin_page = login_response.content
if(not formlogin_page):
raise Exception('The GET request to /formlogin without a token did not produce the formlogin page')
# Check that this is a real form login page by looking for FORMLOGIN_PATTERN
FORMLOGIN_PATTERN='<title>ViPR Login</title>'
formLoginCheck = formlogin_page.find(FORMLOGIN_PATTERN) != -1
if (not formLoginCheck):
raise Exception('GET /formlogin with proper token failed to return a proper form login page containing the pattern: ' + FORMLOGIN_PATTERN)
# Repeat the GET request with a token
login_response = requests.get(FORM_LOGIN_FAKE_URL, headers=_headers,
auth=(user,password), verify=False, cookies=cookiejar_new, allow_redirects=False)
if(login_response.status_code != SEC_REDIRECT):
print "test_vulnerability:login_response.status_code=" + str(login_response.status_code)
raise Exception('The GET request to /formlogin with proper token did not redirect (302)')
location = login_response.headers['Location']
if(not location):
raise Exception('The redirect location of the authentication service is not provided')
# The location should not contain fake URL
locationText = str(location)
fakeURLFound = locationText.find(FAKE_URL_PATTERN) != -1
if (fakeURLFound):
raise Exception('GET /formlogin with proper token failed to get redirected to the request site instead of the fake site.')
print "bourne.test_vulnerability finished OK"
def test_tenant_access_permissions(self, user, password):
if SKIP_SECURITY == '1':
return
ipaddr = self._ipaddr
port=PORT
cookiejar = cookielib.LWPCookieJar()
id = self.tenant_getid()
print "root tenant id: " + str(id)
subtenants = self.tenant_list(id)
for tenant in subtenants:
print "subtenant id: " + str(tenant['id'])
self.login(user, password)
# try root tenant
response = self.__api('GET', URI_PROJECTS.format(id))
print "response status to get projects for root tenant = " + str(response.status_code)
if (response.status_code != 200):
print "The access to the projects of the root tenant should result in 200 status code"
raise Exception('test_tenant_access_permissions: failed')
# try the first subtenant
response = self.__api('GET', URI_PROJECTS.format(tenant['id']))
print "response status to get projects for the first subtenant = " + str(response.status_code)
if (response.status_code != 403):
print "The access to the root tenant/{ROOT_TENANT_ID}/projects are not allowed for the user [email protected]"
print "The response status = " + str(response.status_code)
raise Exception('test_tenant_access_permissions: failed')
# try to get subtenants of the root subtenant. For user1 this operation should fail with error code 403
response = self.__api('GET', URI_TENANTS_SUBTENANT.format(id))
print "Response status to get subtenants of the root tenant = " + str(response.status_code)
if (response.status_code != 403):
print "The access to the tenants/{ROOT_TENANT_ID}/subtenants are not allowed for the user [email protected]"
print "The response status = " + str(response.status_code)
raise Exception('test_tenant_access_permissions: failed')
# try to get subtenants of the subtenant. In V1 this operarion is not supported
response = self.__api('GET', URI_TENANTS_SUBTENANT.format(tenant['id']))
print "Response status to get subtenants of the subtenant = " + str(response.status_code)
if (response.status_code != 405):
print "The access to the tenants/{SUB_TENANT_ID}/subtenants are not allowed"
print "The response status = " + str(response.status_code)
raise Exception('test_tenant_access_permissions: failed')
break
def test_tenant_duplicate_message(self, domain, subtenantName, deactivateTenant):
if SKIP_SECURITY == '1':
return
id = self.tenant_getid()
if deactivateTenant == "true":
# We create test subtenant only if this is the first test where the tenantID is extected.
response = self.tenant_create(subtenantName, domain, "50", subtenantName)
subtenants = self.tenant_list(id)
foundNewSubtenant=False
for tenant in subtenants:
subtName = tenant['name']
subtId = tenant['id']
if (subtName == subtenantName):
foundNewSubtenant=True
if(BOURNE_DEBUG == '1'):
print "test_tenant_duplicate_message: got the subtenant ID: "+ str(subtId)
if deactivateTenant == "true":
self.tenant_deactivate(subtId)
response = self.tenant_create(subtenantName, domain, "50", subtenantName)
if(BOURNE_DEBUG == '1'):
print "test_tenant_duplicate_message: the request to create a subtenant with duplicated mappings and name returns: "+ str(response)
# Here we should get an error which contains the tenant URI which has the same user mappings
rawresponse=str(response)
entryFound = rawresponse.find("A component/resource with the label " + subtenantName + " already exists") != -1
if deactivateTenant == "false":
if (not entryFound):
msg = 'Did not get the duplicate subtenant error while creating a subtenant with the same name and user mapping'
raise Exception(msg)
break
if(not foundNewSubtenant):
raise Exception('test_tenant_duplicate_message: failed to find the newly created subtenant')
def test_tenant_domain_update(self, user, password, domain, key, value):
if SKIP_SECURITY == '1':
return
ipaddr = self._ipaddr
port=PORT
cookiejar = cookielib.LWPCookieJar()
domainWithSpaces = " " + domain + " "
id = self.tenant_getid()
response = self.__api('GET', URI_TENANTS.format(id))
rawresponse = response.text
entryFound = rawresponse.find(domain) != -1
if (entryFound):
# remove the domain entry secureldap.com from root tenant
self.tenant_update_domain(id, domainWithSpaces, "remove", key, value)
# get the root tenant again to make sure that the domain secureldap.com is not there
response = self.__api('GET', URI_TENANTS.format(id))
rawresponse = response.text
entryFound = rawresponse.find(domain) != -1
if (entryFound):
raise Exception('Failed to remove domain entry secureldap.com in the root tenant.')
# add new domain with spaces before and after domain name
self.tenant_update_domain(id, domainWithSpaces, "add", key, value)
response = self.__api('GET', URI_TENANTS.format(id))
rawresponse = response.text
entryFound = rawresponse.find(domain) != -1
if (not entryFound):
raise Exception('Failed to add domain entry secureldap.com in the root tenant with spaces.')
def test_logout(self, user):
if SKIP_SECURITY == '1':
return
response = self.__api('GET', URI_LOGOUT)
if (response.status_code != 200):
print "logout failed with code: " + str(response.status_code)
raise Exception('security logout: failed')
rawresponse = response.text
logoutOK = rawresponse.find(user) != -1
if (not logoutOK):
raise Exception('Failed to logout the user: ' + user)
def monitor_query(self, timeslot, language):
print 'Querying monitor logs (timeslot: ' + timeslot + ' language: ' + language + ') ...';
response = self.__api('GET', URI_MONITOR_QUERY.format(timeslot))
if (response.status_code != 200):
print "Query monitor logs failed", response.status_code
raise Exception('Query monitor logs failed')
print 'Query monitor logs succeed. Result: ' + response.text
# This routine will raise an exception of the obj passed
# in is not a dictionary
def assert_is_dict(self, obj):
if (not type(obj) is dict):
raise Exception(obj)
def workflow_list(self):
return self.api('GET', URI_WORKFLOW_LIST)
def workflow_get(self, uri):
return self.api('GET', URI_WORKFLOW_INSTANCE.format(uri))
def workflow_show_task(self, uri, task):
workflow_task_uri = URI_WORKFLOW_INSTANCE + '/tasks/{1}'
return self.api('GET', workflow_task_uri.format(uri, task))
def workflow_resume(self, uri):
o = self.api('PUT', URI_WORKFLOW_RESUME.format(uri))
result = self.api_sync_2(o['resource']['id'], o['op_id'], self.workflow_show_task)
return result
def workflow_rollback(self, uri):
o = self.api('PUT', URI_WORKFLOW_ROLLBACK.format(uri))
result = self.api_sync_2(o['resource']['id'], o['op_id'], self.workflow_show_task)
return result
def workflow_suspend(self, uri, step):
return self.api('PUT', URI_WORKFLOW_SUSPEND.format(uri, step), null)
def workflow_recent(self):
return self.api('GET', URI_WORKFLOW_RECENT)
def workflow_steps(self, uri):
return self.api('GET', URI_WORKFLOW_STEPS.format(uri))
#
# Compute Resources - Vcenter
#
def vcenter_create(self, label, tenant, ipaddress, devport,
username, password, osversion, usessl):
uri = self.__tenant_id_from_label(tenant)
parms = { 'name' : label,
'ip_address' : ipaddress,
'os_version' : osversion,
'port_number' : devport,
'user_name' : username,
'password' : password,
'use_ssl' : usessl
}
return self.api('POST', URI_TENANTS_VCENTERS.format(uri), parms)
def vcenter_list(self, tenant):
uri = self.__tenant_id_from_label(tenant)
o = self.api('GET', URI_TENANTS_VCENTERS.format(uri), None)
if (not o):
return {}
return o['vcenter']
def vcenter_query(self, name):
if (self.__is_uri(name)):
return name
label = name
tenants = self.tenant_list(self.tenant_getid())
# since we are using sysadmin as user, check on all subtenants for now
# go in reverse order, most likely, we are in the latest subtenant
for tenant in reversed(tenants):
#print tenant
vcenters = self.vcenter_list(tenant['id'])
for vcenter in vcenters:
if (vcenter['name'] == label):
return vcenter['id']
raise Exception('bad vcenter name: ' + name)
def vcenter_show(self, name):
uri = self.vcenter_query(name)
return self.api('GET', URI_VCENTER.format(uri))
def vcenter_show_task(self, vcenter, task):
uri_vcenter_task = URI_VCENTER + '/tasks/{1}'
return self.api('GET', uri_vcenter_task.format(vcenter, task))
def vcenter_discover(self, name):
uri = self.vcenter_query(name)
o = self.api('POST', URI_VCENTER_DISCOVER.format(uri))
self.assert_is_dict(o)
try:
sync = self.api_sync_2(o['resource']['id'], o['op_id'], self.vcenter_show_task)
s = sync['state']
m = sync['message']
except:
print o
return (o, s, m)
def vcenter_delete(self, name):
uri = self.vcenter_query(name)
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_VCENTER.format(uri)))
#
# Compute Resources - Vcenter Data Center
#
def datacenter_create(self, label, vcenter):
uri = self.vcenter_query(vcenter)
parms = { 'name' : label
}
return self.api('POST', URI_VCENTER_DATACENTERS.format(uri), parms)
def datacenter_list(self, vcenter):
uri = self.vcenter_query(vcenter)
o = self.api('GET', URI_VCENTER_DATACENTERS.format(uri), None)
if (not o):
return {}
return o['vcenter_data_center']
def datacenter_query(self, name):
if (self.__is_uri(name)):
return name
(vcenter, label) = name.split('/', 1)
datacenters = self.datacenter_list(vcenter)
for datacenter in datacenters:
if (datacenter['name'] == label):
return datacenter['id']
raise Exception('bad datacenter name: ' + name)
def datacenter_show(self, name):
(vcenter, label) = name.split('/', 1)
vcenterUri = self.vcenter_query(vcenter)
uri = self.datacenter_query(name)
return self.api('GET', URI_DATACENTER.format(uri))
def datacenter_delete(self, name):
(vcenter, label) = name.split('/', 1)
vcenterUri = self.vcenter_query(vcenter)
uri = self.datacenter_query(name)
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_DATACENTER.format(uri)))
#
# Compute Resources - Cluster
#
def cluster_create(self, label, tenant, project, datacenter):
uri = self.__tenant_id_from_label(tenant)
parms = { 'name' : label
}
if(project):
parms['project'] = self.tenant_project_query(tenant, project)
if (not parms['project']):
raise Exception('Could not find project : ' + project + ' for tenant org ' + tenant)
if(datacenter):
parms['vcenter_data_center'] = self.datacenter_query(datacenter)
return self.api('POST', URI_TENANTS_CLUSTERS.format(uri), parms)
def cluster_list(self, tenant):
uri = self.__tenant_id_from_label(tenant)
o = self.api('GET', URI_TENANTS_CLUSTERS.format(uri), None)
if (not o):
return {}
return o['cluster']
def cluster_query(self, name):
if (self.__is_uri(name)):
return name
(tenantLbl, label) = name.split('/', 1)
tenant = self.tenant_query(tenantLbl)
clusters = self.cluster_list(tenant)
for cluster in clusters:
if (cluster['name'] == label):
return cluster['id']
raise Exception('bad cluster name: ' + name)
def cluster_show(self, name):
uri = self.cluster_query(name)
return self.api('GET', URI_CLUSTER.format(uri))
def cluster_delete(self, name, detachstorage):
uri = self.cluster_query(name)
o = self.api('POST', URI_CLUSTER_DEACTIVATE.format(uri, detachstorage))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['id'], self.cluster_show_task)
return (o,s)
def cluster_show_task(self, uri, task):
uri_cluster_task = URI_CLUSTER + '/tasks/{1}'
return self.api('GET', uri_cluster_task.format(uri, task))
# Service Catalog
def catalog_search(self, servicename, tenant, categoryName=None):
catalog_services = self.api('GET', URI_CATALOG_SERVICE_SEARCH_NAME.format(servicename))
for catalog_service in catalog_services['resource']:
service = self.catalog_service_query(catalog_service['id'])
category = self.catalog_category_query(service['catalog_category']['id'])
if category['tenant']['id'] == tenant and service['name'] == servicename and (categoryName is None or categoryName == category['name']):
return service
raise Exception('unable to find service ' + servicename + ' in tenant ' + tenant + ' in category ' + categoryName)
def catalog_category_query(self, id):
return self.api('GET', URI_CATALOG_CATEGORY.format(id))
def catalog_service_query(self, id):
return self.api('GET', URI_CATALOG_SERVICE.format(id))
def __catalog_poll(self, id):
executing = True
orderstatus = ""
while executing:
order = self.api('GET', URI_CATALOG_ORDER.format(id))
orderstatus = order['order_status']
if orderstatus != 'PENDING' and orderstatus != 'EXECUTING':
executing = False
else:
time.sleep(5)
# commented out below lines because on error too we needed to see if we leave the system in proper state
#if orderstatus == 'ERROR':
# raise Exception('error during catalog order')
return order
def catalog_upgrade(self, tenant):
tenant = self.__tenant_id_from_label(tenant)
return self.api('POST', URI_CATALOG_CATEGORY_UPGRADE.format(tenant))
def catalog_order(self, servicename, tenant, parameters, category=None, failOnError=None):
tenant = self.__tenant_id_from_label(tenant)
self.catalog_upgrade(tenant)
service = self.catalog_search(servicename, tenant, category)
parms = { 'tenantId': tenant,
'catalog_service': service['id']
}
ordervalues = []
for parameter in parameters.split(','):
values = parameter.split('=')
ordervalues.append({ 'label':values[0],
'value': values[1]
})
parms['parameters'] = ordervalues
order = self.api('POST', URI_CATALOG_ORDERS, parms)
completedOrder = self.__catalog_poll(order['id'])
if (failOnError == "true" and completedOrder['order_status'] == 'ERROR'):
raise Exception('error during catalog order: ' + completedOrder['id'] + " " + completedOrder['message'])
return completedOrder
#
# Compute Resources - Host
#
def host_create(self, label, tenant, type, hostname, devport,
username, password, osversion, usessl,
project, cluster, datacenter, discoverable):
uri = self.__tenant_id_from_label(tenant)
parms = { 'name' : label,
'type' : type,
'host_name' : hostname,
'os_version' : osversion,
'port_number' : devport,
'user_name' : username,
'password' : password,
'use_ssl' : usessl,
'discoverable' : discoverable
}
if(datacenter):
parms['vcenter_data_center'] = self.datacenter_query(datacenter)
if(cluster):
parms['cluster'] = self.cluster_query(cluster)
if(project):
parms['project'] = self.tenant_project_query(tenant, project)
return self.api('POST', URI_TENANTS_HOSTS.format(uri), parms)
def host_update(self, uri, cluster):
clusterURI = None
if (cluster):
if (cluster == 'null'):
clusterURI = "null"
else:
clusterURI = self.cluster_query(cluster)
parms = {
'cluster' : clusterURI
}
o = self.api('PUT', URI_HOST.format(uri), parms)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['id'], self.host_show_task)
return (o,s)
def host_list(self, tenant):
uri = self.__tenant_id_from_label(tenant)
o = self.api('GET', URI_TENANTS_HOSTS.format(uri), None)
if (not o):
return {}
return o['host']
def host_query(self, name):
if (self.__is_uri(name)):
return name
label = name
tenants = self.tenant_list(self.tenant_getid())
# since we are using sysadmin as user, check on all subtenants for now
# go in reverse order, most likely, we are in the latest subtenant
for tenant in reversed(tenants):
if(BOURNE_DEBUG == '1'):
print tenant
hosts = self.host_list(tenant['id'])
for host in hosts:
host_detail = self.host_show(host['id'])
if (host['name'] == label and host_detail['inactive'] == False):
return host['id']
# also check the root tenant as a last result
hosts = self.host_list(self.tenant_getid())
for host in hosts:
host_detail = self.host_show(host['id'])
if (host['name'] == label and host_detail['inactive'] == False):
return host['id']
raise Exception('bad host name: ' + name)
def host_show(self, name):
uri = self.host_query(name)
return self.api('GET', URI_HOST.format(uri))
def host_delete(self, name, detachstorage):
uri = self.host_query(name)
o = self.api('POST', URI_HOST_DEACTIVATE.format(uri, detachstorage))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['id'], self.host_show_task)
return (o,s)
def initiator_show_tasks(self, uri):
uri_initiator_task = URI_INITIATORS + '/tasks'
return self.api('GET', uri_initiator_task.format(uri))
def initiator_show_task(self, uri, task):
uri_initiator_task = URI_INITIATOR + '/tasks/{1}'
return self.api('GET', uri_initiator_task.format(uri, task))
def host_show_task(self, uri, task):
uri_host_task = URI_HOST + '/tasks/{1}'
return self.api('GET', uri_host_task.format(uri, task))
#
# Actionable Events
#
def event_show(self, uri):
return self.api('GET', URI_EVENT_GET.format(uri))
def event_delete(self, uri):
return self.api('POST', URI_EVENT_DELETE.format(uri))
def event_show_task(self, event, task):
return self.api('GET', URI_TASK_GET.format(task))
def event_approve(self, uri):
o = self.api('POST', URI_EVENT_APPROVE.format(uri))
self.assert_is_dict(o)
try:
tr_list = o['task']
for tr in tr_list:
sync = self.api_sync_2(tr['resource']['id'], tr['id'], self.event_show_task)
s = sync['state']
m = sync['message']
except:
print o
return (o, s, m)
def event_decline(self, uri):
return self.api('POST', URI_EVENT_DECLINE.format(uri))
def event_list(self, tenant):
uri = self.__tenant_id_from_label(tenant)
o = self.api('GET', URI_EVENT_LIST.format(uri))
if (not o):
return {}
return o['event']
#
# Compute Resources - host initiator
#
def initiator_create(self, host, protocol, port, node):
uri = self.host_query(host)
parms = { 'protocol': protocol,
'initiator_port' : port,
'initiator_node' : node,
}
try:
o = self.api('POST', URI_HOST_INITIATORS.format(uri), parms)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.initiator_show_task)
except:
print o, s
return (o, s)
def initiator_list(self, host):
uri = self.host_query(host)
o = self.api('GET', URI_HOST_INITIATORS.format(uri), None)
if (not o):
return {}
return o['initiator']
def initiator_query(self, name):
if (name.find('/') == -1 and self.__is_uri(name)):
return name
(host, label) = name.split('/', 1)
host_uri = self.host_query(host)
initiators = self.initiator_list(host_uri)
for initiator in initiators:
if (initiator['name'] == label):
return initiator['id']
raise Exception('bad initiator port: ' + name)
def initiator_show(self, name):
(host, label) = name.split('/', 1)
hostUri = self.host_query(host)
uri = self.initiator_query(name)
return self.api('GET', URI_INITIATOR.format(uri))
def initiator_delete(self, name):
(host, label) = name.split('/', 1)
hostUri = self.host_query(host)
uri = self.initiator_query(name)
o = self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_INITIATOR.format(uri)))
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.initiator_show_task)
return (o, s)
def initiator_register(self, name):
uri = self.initiator_query(name)
return self.api('POST', URI_INITIATOR_REGISTER.format(uri))
def initiator_deregister(self, name):
uri = self.initiator_query(name)
return self.api('POST', URI_INITIATOR_DEREGISTER.format(uri))
def initiator_aliasget(self, name, systemuri):
uri = self.initiator_query(name)
return self.api('GET', URI_INITIATOR_ALIASGET.format(uri,systemuri))
def initiator_aliasset(self, name, systemuri, alias):
uri = self.initiator_query(name)
params = {'system_uri': systemuri,
'initiator_alias': alias
}
return self.api('PUT', URI_INITIATOR_ALIASSET.format(uri), params)
#
# Compute Resources - host ipinterface
#
def ipinterface_create(self, host, protocol, ipaddress, netmask, prefix, scope):
uri = self.host_query(host)
parms = { 'protocol' : protocol,
'ip_address' : ipaddress,
'netmask' : netmask,
'prefix_length' : prefix,
'scope_id' : scope
}
return self.api('POST', URI_HOST_IPINTERFACES.format(uri), parms)
def ipinterface_list(self, host):
uri = self.host_query(host)
print uri
o = self.api('GET', URI_HOST_IPINTERFACES.format(uri), None)
if (not o):
return {}
return o['ip_interface']
def ipinterface_query(self, name):
if (self.__is_uri(name)):
return name
(host, label) = name.split('/', 1)
ipinterfaces = self.ipinterface_list(host)
for ipinterface in ipinterfaces:
if (ipinterface['name'] == label):
return ipinterface['id']
raise Exception('bad ipinterface ip address: ' + name)
def ipinterface_show(self, name):
(host, label) = name.split('/', 1)
hostUri = self.host_query(host)
uri = self.ipinterface_query(name)
return self.api('GET', URI_IPINTERFACE.format(uri))
def ipinterface_delete(self, name):
(host, label) = name.split('/', 1)
hostUri = self.host_query(host)
uri = self.ipinterface_query(name)
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_IPINTERFACE.format(uri)))
def ipinterface_register(self, name):
uri = self.ipinterface_query(name)
return self.api('POST', URI_IPINTERFACE_REGISTER.format(uri))
def ipinterface_deregister(self, name):
uri = self.ipinterface_query(name)
return self.api('POST', URI_IPINTERFACE_DEREGISTER.format(uri))
#
# Retention classes
#
def retention_class_list(self, namespace):
return self.api('GET', URI_NAMESPACE_RETENTION_BASE.format(namespace))
def retention_class_get(self, namespace, name):
return self.api('GET', URI_NAMESPACE_RETENTION_INSTANCE.format(namespace, name))
def retention_class_create(self, namespace, name, period):
params = {
'name': name,
'period': period
}
return self.api('POST', URI_NAMESPACE_RETENTION_BASE.format(namespace), params)
def retention_class_update(self, namespace, name, period):
params = {
'period': period
}
return self.api('PUT', URI_NAMESPACE_RETENTION_INSTANCE.format(namespace, name), params)
def set_bucket_retention(self, bucket, period):
params = {
'period': period
}
return self.api('PUT', URI_BUCKET_RETENTION.format(bucket), params)
def get_bucket_retention(self, bucket):
return self.api('GET', URI_BUCKET_RETENTION.format(bucket))
def _build_update_bucket_owner_payload(self, namespace, newowner):
root = ET.Element('object_bucket_update_owner')
ET.SubElement(root, 'namespace').text = namespace
ET.SubElement(root, 'new_owner').text = newowner
return ET.tostring(root)
def bucket_update_owner(self, namespace, bucket, newowner):
uri = URI_BUCKET_UPDATE_OWNER.format(bucket)
parms = self._build_update_bucket_owner_payload(namespace, newowner)
response = self.coreapi('POST', uri, parms, content_type=CONTENT_TYPE_XML)
if response.status_code != HTTP_OK:
print "failure", response.status_code, response.text
raise Exception('failed to update bucket owner')
def vdcinfo_insert(self, name, secretkey, dataEndpoint, cmdEndpoint):
parms = {
'vdcName' : name,
'dataEndPoints' : dataEndpoint,
'cmdEndPoints' : cmdEndpoint,
'secretKeys' : secretkey,
}
while True:
print "VDC insert Params = ", parms
try:
resp = self.coreapi('PUT', URI_VDCINFO_INSERT.format(name), parms, {})
print resp.status_code
if (resp.status_code != 200):
raise Exception("vdcinfo_insert failed, will retry")
return resp
except:
time.sleep(10)
continue
def vdcinfo_show(self, uri):
return self.api('GET', URI_VDCINFO_GET.format(uri))
def vdcinfo_query(self, name):
print 'name' , name
if name.startswith('urn:storageos:VirtualDataCenterData'):
return name
vdcinfo = self.vdcinfo_show(name)
print vdcinfo
return vdcinfo['vdcId']
def vdcinfo_local(self):
return self.api('GET', URI_VDCINFO_LOCAL)
def vdcinfo_list(self):
return self.api('GET', URI_VDCINFO_LIST)
def vnas_list(self):
vnaslist = self.api('GET', URI_VNAS_SERVERS)
if('vnas_server' in vnaslist):
return vnaslist['vnas_server']
def vnas_query(self, name):
if name.startswith('urn:storageos:VirtualNAS'):
return name
vnasservers = self.vnas_list()
for vnas in vnasservers:
if('name' in vnas and vnas['name'] == name):
return vnas['id']
raise Exception('bad vnas name ' + name)
def vnas_show(self, name):
vnasid = self.vnas_query(name)
if(vnasid is not None):
return self.api('GET', URI_VNAS_SERVER.format(vnasid))
def assign_vnas(self, name, project):
vnasid = self.vnas_query(name)
projectURI = self.project_query(project)
params = dict()
vnaslist = []
if(projectURI is not None):
vnaslist.append(vnasid)
params['vnas_server'] = vnaslist
return self.api('PUT', URI_VNAS_SERVER_ASSIGN.format(projectURI), params)
def unassign_vnas(self, name, project):
vnasid = self.vnas_query(name)
projectURI = self.project_query(project)
params = dict()
vnaslist = []
if(projectURI is not None):
vnaslist.append(vnasid)
params['vnas_server'] = vnaslist
return self.api('PUT', URI_VNAS_SERVER_UNASSIGN.format(projectURI), params)
def unmanaged_volume_query(self, name):
if (self.__is_uri(name)):
return name
results = self.un_managed_volume_search(name)
resources = results['resource']
for resource in resources:
# Look for exact match
if (resource['match'] == name):
return resource['id']
# Look for exact "startsWith" match (as in VPlex)
if (resource['match'].startswith(name + " (")):
return resource['id']
raise Exception('bad volume name ' + name)
def un_managed_volume_search(self, name):
if (self.__is_uri(name)):
return name
if (name):
return self.api('GET', URI_UNMANAGED_VOLUMES_SEARCH_NAME.format(name))
def ingest_show_task(self, vol, task):
uri_ingest_task = URI_VDC + '/tasks/{1}'
return self.api('GET', uri_ingest_task.format(vol, task))
def ingest_exported_volumes(self, host, cluster, varray, vpool, project, volspec):
projectURI = self.project_query(project).strip()
varrayURI = self.neighborhood_query(varray).strip()
vpoolURI = self.cos_query("block", vpool).strip()
params = {
'project' : projectURI,
'varray' : varrayURI,
'vpool' : vpoolURI,
}
# Build volume parameter, if specified
if (volspec):
vols = volspec.split(',')
volentry = []
for vol in vols:
volentry.append(self.unmanaged_volume_query(vol))
params['unmanaged_volume_list'] = volentry
if (host):
hostURI = self.host_query(host)
params['host'] = hostURI
# Build cluster parameter, if specified
if (cluster):
clusterURI = self.cluster_query(cluster)
params['cluster'] = clusterURI
if(BOURNE_DEBUG == '1'):
print str(parms)
resp = self.api('POST', URI_UNMANAGED_EXPORTED_VOLUMES, params)
self.assert_is_dict(resp)
if('details' in resp):
print "Failed operation: "+ resp['details']
return resp;
tr_list = resp['task']
result = list()
for tr in tr_list:
s = self.api_sync_2(tr['resource']['id'], tr['id'], self.ingest_show_task)
result.append(s)
return result
def ingest_unexported_volumes(self, varray, vpool, project, volspec):
projectURI = self.project_query(project).strip()
varrayURI = self.neighborhood_query(varray).strip()
vpoolURI = self.cos_query("block", vpool).strip()
params = {
'project' : projectURI,
'varray' : varrayURI,
'vpool' : vpoolURI,
}
# Build volume parameter
if (volspec):
volentry = []
vols = volspec.split(',')
for vol in vols:
volentry.append(self.unmanaged_volume_query(vol))
params['unmanaged_volume_list'] = volentry
if(BOURNE_DEBUG == '1'):
print str(params)
resp = self.api('POST', URI_UNMANAGED_UNEXPORTED_VOLUMES, params)
self.assert_is_dict(resp)
if('details' in resp):
print "Failed operation: "+ resp['details']
return resp;
tr_list = resp['task']
result = list()
for tr in tr_list:
s = self.api_sync_2(tr['resource']['id'], tr['id'], self.ingest_show_task)
result.append(s)
return result
#
# ECS Bucket oprations
#
def ecs_bucket_show_task(self, bkt, task):
uri_bucket_task = URI_ECS_BUCKET + '/tasks/{1}'
return self.api('GET', uri_bucket_task.format(bkt, task))
def ecs_bucket_create(self, label, project, neighbourhood, cos,
soft_quota, hard_quota, owner):
params = {
'name' : label,
'varray' : neighbourhood,
'vpool' : cos,
'soft_quota' : soft_quota,
'hard_quota' : hard_quota,
'owner' : owner
}
print "ECS BUCKET CREATE Params = ", params
o = self.api('POST', URI_ECS_BUCKET_LIST, params, {'project': project})
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.ecs_bucket_show_task)
return s
# input param to be changed to label
def ecs_bucket_delete(self, uri):
params = {
'forceDelete' : 'false'
}
print "ECS bucket delete = ", URI_RESOURCE_DEACTIVATE.format(URI_ECS_BUCKET.format(uri), params)
o = self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_ECS_BUCKET.format(uri)), params)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.ecs_bucket_show_task)
return (o, s)
# Snapshot session operations
def block_snapshot_session_show_task(self, session_uri, op_id):
return self.api('GET', URI_BLOCK_SNAPSHOT_SESSION_TASK.format(session_uri, op_id))
def block_snapshot_session_query(self, source_session_name):
if (self.__is_uri(source_session_name)):
return source_session_name
(source_name, session_name) = source_session_name.rsplit('/', 1)
source_uri = self.volume_query(source_name)
source_uri = source_uri.strip()
session_uris = self.block_snapshot_session_list(source_uri)
for session_uri in session_uris:
session = self.block_snapshot_session_show(session_uri)
if (session['name'] == session_name):
return session['id']
raise Exception('Invalid snapshot session name')
def block_snapshot_session_list(self, source_name):
source_uri = self.volume_query(source_name)
source_uri = source_uri.strip()
sessions_list = self.api('GET', URI_BLOCK_SNAPSHOT_SESSIONS_LIST.format(source_uri))
self.assert_is_dict(sessions_list)
source_sessions = sessions_list['snapshot_session']
source_session_uris = []
if (type(source_sessions) != list):
source_sessions = [source_sessions]
for source_session in source_sessions:
source_session_uris.append(source_session.get('id'))
return source_session_uris
def block_snapshot_session_show(self, session_uri):
return self.api('GET', URI_BLOCK_SNAPSHOT_SESSION.format(session_uri))
def block_snapshot_session_create(self, source_uri, name, target_count, target_name, target_copymode):
params = dict()
params['name'] = name
if (target_count) :
target_params = dict()
params['new_linked_targets'] = target_params
target_params['count'] = target_count
target_params['target_name'] = target_name
if (target_copymode) :
target_params['copy_mode'] = target_copymode
tasklist = self.api('POST', URI_BLOCK_SNAPSHOT_SESSION_CREATE.format(source_uri), params)
self.assert_is_dict(tasklist)
tasks = tasklist['task']
session_uri = ''
task_opid = ''
if (type(tasks) != list):
tasks = [tasks]
for task in tasks:
session_uri = task['resource']['id']
task_opid = task['op_id']
# Creating multiple would be a group operation and if one is
# complete, then they are all complete.
task = self.api_sync_2(session_uri, task_opid, self.block_snapshot_session_show_task)
return (tasklist, task['state'], task['message'])
def block_snapshot_session_delete(self, session_uri, vipronly):
posturi = URI_BLOCK_SNAPSHOT_SESSION_DELETE.format(session_uri)
if (vipronly):
posturi = posturi + '?type=VIPR_ONLY'
tasklist = self.api('POST', posturi)
self.assert_is_dict(tasklist)
tasks = tasklist['task']
session_uri = ''
task_opid = ''
if (type(tasks) != list):
tasks = [tasks]
for task in tasks:
session_uri = task['resource']['id']
task_opid = task['op_id']
# Deleting multiple would be a group operation and if one is
# complete, then they are all complete.
task = self.api_sync_2(session_uri, task_opid, self.block_snapshot_session_show_task)
return (tasklist, task['state'], task['message'])
def block_snapshot_session_restore(self, session_uri):
task = self.api('POST', URI_BLOCK_SNAPSHOT_SESSION_RESTORE.format(session_uri))
task = self.api_sync_2(task['resource']['id'], task['op_id'], self.block_snapshot_session_show_task)
return task
def block_snapshot_session_link_targets(self, session_uri, count, name, copy_mode):
target_info = dict()
target_info['count'] = count
target_info['target_name'] = name
if (copy_mode):
target_info['copy_mode'] = copy_mode
params = dict()
params['new_linked_targets'] = target_info
tasklist = self.api('POST', URI_BLOCK_SNAPSHOT_SESSION_LINK_TARGETS.format(session_uri), params)
self.assert_is_dict(tasklist)
tasks = tasklist['task']
session_uri = ''
task_opid = ''
if (type(tasks) != list):
tasks = [tasks]
for task in tasks:
session_uri = task['resource']['id']
task_opid = task['op_id']
# Creating multiple would be a group operation and if one is
# complete, then they are all complete.
task = self.api_sync_2(session_uri, task_opid, self.block_snapshot_session_show_task)
return (tasklist, task['state'], task['message'])
def block_snapshot_session_unlink_target(self, session_uri, target_uri, delete_target):
target_info = dict()
target_info['id'] = target_uri
if (delete_target):
target_info['delete_target'] = delete_target
params = dict()
params['linked_targets'] = [target_info]
task = self.api('POST', URI_BLOCK_SNAPSHOT_SESSION_UNLINK_TARGETS.format(session_uri), params)
task = self.api_sync_2(task['resource']['id'], task['op_id'], self.block_snapshot_session_show_task)
return task
def block_snapshot_session_relink_target(self, session_uri, target_uri):
target_info = []
target_info.append(target_uri)
params = dict()
params['ids'] = target_info
tasklist = self.api('POST', URI_BLOCK_SNAPSHOT_SESSION_RELINK_TARGETS.format(session_uri), params)
self.assert_is_dict(tasklist)
tasks = tasklist['task']
session_uri = ''
task_opid = ''
if (type(tasks) != list):
tasks = [tasks]
for task in tasks:
session_uri = task['resource']['id']
task_opid = task['op_id']
# Creating multiple would be a group operation and if one is
# complete, then they are all complete.
task = self.api_sync_2(session_uri, task_opid, self.block_snapshot_session_show_task)
return (tasklist, task['state'], task['message'])
# Consistency group snapshot session operations
def block_consistency_group_snapshot_session_create(self, group_uri, name, target_count, target_name, target_copymode):
params = dict()
params['name'] = name
if (target_count) :
target_params = dict()
params['new_linked_targets'] = target_params
target_params['count'] = target_count
target_params['target_name'] = target_name
if (target_copymode) :
target_params['copy_mode'] = target_copymode
tasklist = self.api('POST', URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_SESSION_CREATE.format(group_uri), params)
self.assert_is_dict(tasklist)
tasks = tasklist['task']
session_uri = ''
task_opid = ''
if (type(tasks) != list):
tasks = [tasks]
for task in tasks:
session_uri = task['resource']['id']
task_opid = task['op_id']
# Creating multiple would be a group operation and if one is
# complete, then they are all complete.
task = self.api_sync_2(session_uri, task_opid, self.block_snapshot_session_show_task)
return (tasklist, task['state'], task['message'])
def block_consistency_group_snapshot_session_list(self, group_uri):
group_uri = group_uri.strip()
sessions_list = self.api('GET', URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_SESSION_LIST.format(group_uri))
self.assert_is_dict(sessions_list)
source_sessions = sessions_list['snapshot_session']
source_session_uris = []
if (type(source_sessions) != list):
source_sessions = [source_sessions]
for source_session in source_sessions:
source_session_uris.append(source_session.get('id'))
return source_session_uris
def block_consistency_group_snapshot_session_show(self, session_uri):
return self.api('GET', URI_BLOCK_SNAPSHOT_SESSION.format(session_uri))
def block_snapshot_session_consistency_group_query(self, group_session_name):
if (self.__is_uri(group_session_name)):
return group_session_name
(group_name, session_name) = group_session_name.rsplit('/', 1)
group_uri = self.block_consistency_group_snapshot_session_query(group_name, session_name)
group_uri = group_uri.strip()
session_uris = self.block_snapshot_session_list(source_uri)
for session_uri in session_uris:
session = self.block_snapshot_session_show(session_uri)
if (session['name'] == session_name):
return session['id']
raise Exception('Invalid snapshot session name')
def block_consistency_group_snapshot_session_query(self, name):
if (self.__is_uri(name)):
return name
(group_name, session_name) = name.rsplit('/', 1)
return (self.block_consistency_group_snapshot_session_get_id_by_name(group_name, session_name))
raise Exception('bad consistency group snapshot name')
def block_consistency_group_snapshot_session_get_id_by_name(self, group, name):
groupid = self.block_consistency_group_query(group)
groupid = groupid.strip()
o = self.api('GET', URI_BLOCK_CONSISTENCY_GROUP_SNAPSHOT_SESSION_LIST.format(groupid))
self.assert_is_dict(o)
sessions = o['snapshot_session']
ids = []
if (not o):
return {}
else :
if (type(sessions) != list):
sessions = [sessions]
print 'The requested consistency group snapshot session name : ' + name
for session in sessions:
if(name == session.get('name')):
print 'The selected id : ' + session.get('id')
return session.get('id')
def block_consistency_group_snapshot_session_link_targets(self, session_uri, count, name, copy_mode):
target_info = dict()
target_info['count'] = count
target_info['target_name'] = name
if (copy_mode):
target_info['copy_mode'] = copy_mode
params = dict()
params['new_linked_targets'] = target_info
tasklist = self.api('POST', URI_BLOCK_SNAPSHOT_SESSION_LINK_TARGETS.format(session_uri), params)
self.assert_is_dict(tasklist)
tasks = tasklist['task']
session_uri = ''
task_opid = ''
if (type(tasks) != list):
tasks = [tasks]
for task in tasks:
session_uri = task['resource']['id']
task_opid = task['op_id']
# Creating multiple would be a group operation and if one is
# complete, then they are all complete.
task = self.api_sync_2(session_uri, task_opid, self.block_snapshot_session_show_task)
return (tasklist, task['state'], task['message'])
def block_consistency_group_snapshot_session_unlink_target(self, session_uri, target_uri, delete_target):
target_info = dict()
target_info['id'] = target_uri
if (delete_target):
target_info['delete_target'] = delete_target
params = dict()
params['linked_targets'] = [target_info]
tasklist = self.api('POST', URI_BLOCK_SNAPSHOT_SESSION_UNLINK_TARGETS.format(session_uri), params)
self.assert_is_dict(tasklist)
tasks = tasklist['task']
session_uri = ''
task_opid = ''
if (type(tasks) != list):
tasks = [tasks]
for task in tasks:
session_uri = task['resource']['id']
task_opid = task['op_id']
# Creating multiple would be a group operation and if one is
# complete, then they are all complete.
task = self.api_sync_2(session_uri, task_opid, self.block_snapshot_session_show_task)
return (tasklist, task['state'], task['message'])
def customconfig_query(self, name, scopetype, scope, isdefault):
if (self.__is_uri(name)):
return name
cclist = self.customconfig_list()
for ccrel in cclist:
try:
if (ccrel['name'] == name):
cc = self.api('GET', ccrel['link']['href'])
ccscope = cc['scope']
ccsysdflt = str(cc['system_default'])
print ccsysdflt
print isdefault
if ((ccscope['type'] == scopetype) and (ccscope['value'] == scope) and (ccsysdflt == isdefault)):
return ccrel['id']
except KeyError:
print 'No name key'
raise Exception('Bad custom configuration name: ' + name)
def customconfig_list(self):
cc_list = self.api('GET', URI_CUSTOMCONFIGS)
if (not cc_list):
return {}
ccs = cc_list['config']
if (type(ccs) != list):
return [ccs]
return ccs
def customconfig_show(self, uri):
return self.api('GET', URI_CUSTOMCONFIG.format(uri))
def customconfig_delete(self, uri):
return self.api('POST', URI_CUSTOMCONFIG_DELETE.format(uri))
def customconfig_create(self, type, value, scopetype, scope, register):
scope = {
'type' : scopetype,
'value' : scope,
}
parms = {
'config_type' : type,
'value' : value,
'scope' : scope,
'registered' : register,
}
return self.api('POST', URI_CUSTOMCONFIGS, parms, {})
#
# ComputeSystem Resources - ComputeSystem
#
# APIs for computeSystem (UCS)
# List all compute systems GET /vdc/compute-systems
def computesystem_list(self):
o = self.api('GET', URI_COMPUTE_SYSTEMS)
if (not o):
return {};
else:
return o
# Fetch/query compute systems by name/label
def computesystem_query(self, name):
if (self.__is_uri(name)):
return name
computesystems = self.computesystem_list()
for system in computesystems['compute_system']:
computesystem = self.computesystem_show(system['id'])
if (computesystem['name'] == name):
return computesystem['id']
raise Exception('bad compute system name ' + name)
# Return service profile template id, for the given serviceprofile template name from the given computeSystem name
def computesystem_getSPTid(self, name, sptname):
if (self.__is_uri(name)):
return name
computesystems = self.computesystem_list()
for system in computesystems['compute_system']:
computesystem = self.computesystem_show(system['id'])
if (computesystem['name'] == name):
serviceprofiletemplates = computesystem['service_profile_templates']
for spt in serviceprofiletemplates:
if (spt['name'] == sptname):
return spt['id']
raise Exception('Bad compute system name ' + name + '. Or bad service profile template name ' + sptname)
# Return compute element id, for the given compute element name from the given computeSystem name
def computesystem_get_computeelement_id(self, name, cename):
if (self.__is_uri(cename)):
return cename
print cename
computesystems = self.computesystem_list()
for system in computesystems['compute_system']:
computesystem = self.computesystem_show(system['id'])
if (computesystem['name'] == name):
#Get all computeElements for this compute system
computeelements = self.api('GET', URI_COMPUTE_SYSTEM_COMPUTEELEMENTS.format(computesystem['id']))
for computeElement in computeelements['compute_element']:
if (computeElement['name'] == cename):
return computeElement['id']
raise Exception('Bad compute system name ' + name + '. Or bad compute element name ' + cename)
# Show details of given computesystem uri GET /vdc/compute-systems/{0}
def computesystem_show(self, uri):
return self.api('GET', URI_COMPUTE_SYSTEM.format(uri))
# Get task for a given computesystem uri and task uri
def computesystem_show_task(self, uri, task):
uri_computesystem_task = URI_COMPUTE_SYSTEM + '/tasks/{1}'
return self.api('GET', uri_computesystem_task.format(uri, task))
# Create a compute system POST /vdc/compute-systems
def computesystem_create(self, label, computeip, computeport, user,
password, type, usessl, osinstallnetwork, compute_image_server):
parms = { 'name' : label,
'ip_address' : computeip,
'port_number' : computeport,
'user_name' : user,
'password' : password,
'system_type' : type,
'use_ssl' : usessl,
'os_install_network' : osinstallnetwork
}
if (compute_image_server):
if (compute_image_server == 'null'):
computeImageServerURI = "null"
else:
computeImageServerURI = self.computeimageserver_query(compute_image_server)
parms['compute_image_server'] = computeImageServerURI
return self.api('POST', URI_COMPUTE_SYSTEMS, parms)
# update a compute system PUT /vdc/compute-systems/{0} with the specified imageserver
def computesystem_update(self, uri, compute_image_server):
computeImageServerURI = None
if (compute_image_server):
if (compute_image_server == 'null'):
computeImageServerURI = "null"
else:
computeImageServerURI = self.computeimageserver_query(compute_image_server)
parms = {
'compute_image_server' : computeImageServerURI
}
o = self.api('PUT', URI_COMPUTE_SYSTEM.format(uri), parms)
self.assert_is_dict(o)
s = self.api_sync_2(o['resource']['id'], o['id'], self.computesystem_show_task)
return (o,s)
# Delete compute system
def computesystem_delete(self, uri):
self.computesystem_deregister(uri)
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_COMPUTE_SYSTEM.format(uri)))
# Deregister a compute system
def computesystem_deregister(self, uri):
return self.api('POST', URI_COMPUTE_SYSTEM_DEREGISTER.format(uri))
def computesystem_discover(self, name):
id = self.computesystem_query(name)
return self.api('POST', URI_COMPUTE_SYSTEM_DISCOVER.format(id))
#
# Compute Image Server Resources - ComputeImageServer
# APIs for Compute Image Server
#
# List compute image servers
def computeimageserver_list(self):
o = self.api('GET', URI_COMPUTE_IMAGESERVERS)
if (not o):
return {};
else:
return o
# Get specified image server by name
def computeimageserver_query(self, name):
if (self.__is_uri(name)):
return name
computeimageservers = self.computeimageserver_list()
for imageserver in computeimageservers['compute_imageserver']:
computeimageserver = self.computeimageserver_show(imageserver['id'])
if (computeimageserver['name'] == name):
return computeimageserver['id']
raise Exception('bad compute image server name ' + name)
# Show details of compute image server
def computeimageserver_show(self, uri):
return self.api('GET', URI_COMPUTE_IMAGESERVER.format(uri))
# Get task for a given compute imageserver uri and task uri
def computeimageserver_show_task(self, uri, task):
uri_computeimageserver_task = URI_COMPUTE_IMAGESERVER + '/tasks/{1}'
return self.api('GET', uri_computeimageserver_task.format(uri, task))
# create a compute image server
def computeimageserver_create(self, label, imageserver_ip, imageserver_secondip, imageserver_user,
imageserver_password, tftpBootDir, osinstall_timeout, ssh_timeout, imageimport_timeout):
parms = { 'name' : label,
'imageserver_ip' : imageserver_ip,
'imageserver_secondip' : imageserver_secondip,
'imageserver_user' : imageserver_user,
'imageserver_password' : imageserver_password,
'tftpBootDir' : tftpBootDir,
'osinstall_timeout' : osinstall_timeout,
'ssh_timeout' : ssh_timeout,
'imageimport_timeout' : imageimport_timeout
}
return self.api('POST', URI_COMPUTE_IMAGESERVERS, parms)
# update a compute image server
def computeimageserver_update(self, uri, imageserver_ip, imageserver_secondip):
parms = {}
if not (imageserver_ip):
parms['imageserver_ip'] = imageserver_ip
if not (imageserver_secondip):
parms['imageserver_secondip'] = imageserver_secondip
o = self.api('PUT', URI_COMPUTE_IMAGESERVER.format(uri), parms)
if (not o):
return {}
return o
# delete compute image server
def computeimageserver_delete(self, uri):
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_COMPUTE_IMAGESERVER.format(uri)))
#
# Compute Image Resources - ComputeImage
# APIs for Compute Image
#
# List all computeImages
def computeimage_list(self):
o = self.api('GET', URI_COMPUTE_IMAGES)
if (not o):
return {};
else:
return o
# Fetch a compute image
def computeimage_query(self, name):
if (self.__is_uri(name)):
return name
computeimages = self.computeimage_list()
for image in computeimages['compute_image']:
computeimage = self.computeimage_show(image['id'])
if (computeimage['name'] == name):
return computeimage['id']
raise Exception('bad compute image name ' + name)
# show details of compute image
def computeimage_show(self, uri):
return self.api('GET', URI_COMPUTE_IMAGE.format(uri))
# Get task for a given compute image uri and task uri
def computeimage_show_task(self, uri, task):
uri_computeimage_task = URI_COMPUTE_IMAGE + '/tasks/{1}'
return self.api('GET', uri_computeimage_task.format(uri, task))
# Create a compute image
def computeimage_create(self, label, image_url):
parms = { 'name' : label,
'image_url' : image_url
}
return self.api('POST', URI_COMPUTE_IMAGES, parms)
# delete a compute image
def computeimage_delete(self, uri):
return self.api('POST', URI_RESOURCE_DEACTIVATE.format(URI_COMPUTE_IMAGE.format(uri)))
#
#compute virtual pool APIs
#
# Create a compute virtual pool
def computevirtualpool_create(self, name, computesysname, systemtype, usematchedpools, varray, template, templatetype):
#get varray details
varray_list = []
varrayURI = self.neighborhood_query(varray)
varray_list.append(varrayURI)
varraydictlist = { 'varray' : varray_list }
# get service profile template from for the given compute system
sptIDs = []
templatename=template
if (templatetype == 'Initial'):
templatename=templatename+" (Initial Template)"
else:
templatename=templatename+" (Updating Template)"
templateURI = self.computesystem_getSPTid(computesysname, templatename)
sptIDs.append(templateURI)
sptdictList = { 'service_profile_template': sptIDs }
params = {
'name': name,
'varrays':varray_list,
'service_profile_templates':sptIDs,
'system_type':systemtype,
'use_matched_elements':usematchedpools
}
print params
return self.api('POST', URI_COMPUTE_VIRTUAL_POOLS, params)
# Assign compute elements to the compute virtual pool
def computevirtualpool_assign(self, uri, vpoolparams):
return self.api('PUT', URI_COMPUTE_VIRTUAL_POOL_ASSIGN.format(uri), vpoolparams)
# Fetch a compute virtual pool
def computevirtualpool_query(self, name):
if (self.__is_uri(name)):
return name
computevpools = self.computevirtualpool_list()
for cvp in computevpools['computevirtualpool']:
#computeimage = self.computeimage_show(image['id'])
if (cvp['name'] == name):
return cvp['id']
raise Exception('bad compute virtual pool name ' + name)
# list all compute virtual pools
def computevirtualpool_list(self):
o = self.api('GET', URI_COMPUTE_VIRTUAL_POOLS)
if (not o):
return {};
else:
return o
# shows the filepolicy
def filepolicy_show(self, uri):
return self.api('GET', URI_FILE_POLICY_SHOW.format(uri))
# lists all filepolicies
def filepolicy_list(self):
o = self.api('GET', URI_FILE_POLICIES)
if (not o):
return {};
returnlst = o['file_policy'];
if(type(returnlst) != list):
return [returnlst];
return returnlst;
# queries filepolicy
def filepolicy_query(self, name):
if (self.__is_uri(name)):
return name
filepolicies = self.filepolicy_list()
for filepolicy in filepolicies:
try:
if (filepolicy['name'] == name):
return filepolicy['id']
except KeyError:
print 'no name key'
raise Exception('bad filepolicy name: ' + name)
# deletes the filepolicy
def filepolicy_delete(self, uri):
return self.api('DELETE', URI_FILE_POLICY_DELETE.format(uri))
# creates the filepolicy
def filepolicy_create_pol(self, name, policy_type, apply_at, description, policyscheduleweek, policyschedulemonth, snapshotnamepattern, snapshotexpiretype, snapshotexpirevalue, policyschedulefrequency, policyschedulerepeat, policyscheduletime, replicationconfiguration, replicationtype, replicationcopymode, priority, num_worker_threads, is_access_to_tenants):
create_request = {}
policy_schedule = {}
snapshot_params = {}
replication_params = {}
snapshot_expire_params = {}
create_request['policy_type'] = policy_type
create_request['policy_name'] = name
create_request['policy_description'] = description
create_request['apply_at'] = apply_at
policy_schedule['schedule_frequency'] = policyschedulefrequency
policy_schedule['schedule_repeat'] = policyschedulerepeat
policy_schedule['schedule_time'] = policyscheduletime
policy_schedule['schedule_day_of_week'] = policyscheduleweek
policy_schedule['schedule_day_of_month'] = policyschedulemonth
if(policy_type =='file_snapshot'):
snapshot_expire_params['expire_type'] = snapshotexpiretype
snapshot_expire_params['expire_value'] = snapshotexpirevalue
snapshot_params['snapshot_name_pattern'] = snapshotnamepattern
snapshot_params['snapshot_expire_params'] = snapshot_expire_params
snapshot_params['policy_schedule'] = policy_schedule
create_request['snapshot_params'] = snapshot_params
elif(policy_type =='file_replication'):
create_request['is_access_to_tenants'] = is_access_to_tenants
replication_params['replication_type'] = replicationtype
replication_params['replication_copy_mode'] = replicationcopymode
replication_params['replicate_configuration'] = replicationconfiguration
replication_params['policy_schedule'] = policy_schedule
create_request['priority'] = priority
create_request['num_worker_threads'] = num_worker_threads
create_request['replication_params'] = replication_params
return self.api('POST', URI_FILE_POLICIES, create_request)
# assigns thefilepolicy to vPool
def filepolicy_vpool_assign(self, name, apply_on_target_site, assign_to_vpools, source_varray, target_varrays) :
assign_request = {}
assign_request['apply_on_target_site'] = apply_on_target_site
vpool_assign_param = {}
assign_request_vpools = []
if( assign_to_vpools is not None):
uri = self.cos_query("file", assign_to_vpools).strip()
assign_request_vpools.append(uri)
vpool_assign_param['assign_to_vpools'] = assign_request_vpools
assign_request['vpool_assign_param'] = vpool_assign_param
if (source_varray is not None and target_varrays is not None):
file_replication_topologies = []
file_replication_topology = {}
assign_target_varrays = []
src_varray_uri = self.neighborhood_query(source_varray).strip()
file_replication_topology['source_varray']= src_varray_uri
uri = self.neighborhood_query(target_varrays).strip()
assign_target_varrays.append(uri)
file_replication_topology['target_varrays']= assign_target_varrays
file_replication_topologies.append(file_replication_topology)
assign_request['file_replication_topologies']= file_replication_topologies
filepolicy = self.filepolicy_query(name)
return self.api('POST', URI_FILE_POLICY_ASSIGN.format(filepolicy), assign_request)
# assigns the filepolicy to project
def filepolicy_project_assign(self, name, apply_on_target_site, project_assign_vpool, assign_to_projects, source_varray, target_varrays):
assign_request = {}
assign_request['apply_on_target_site'] = apply_on_target_site
project_assign_param = {}
assign_request_projects = []
assign_request_project_vpools = []
if( project_assign_vpool is not None and assign_to_projects is not None):
uri = self.project_query(assign_to_projects).strip()
assign_request_projects.append(uri)
vpooluri = self.cos_query("file", project_assign_vpool).strip()
project_assign_param['vpool'] = uri
project_assign_param['assign_to_projects'] = assign_request_projects
assign_request['project_assign_param'] = project_assign_param
if (source_varray is not None and target_varrays is not None):
file_replication_topologies = []
file_replication_topology = {}
assign_target_varrays = []
src_varray_uri = self.neighborhood_query(source_varray).strip()
file_replication_topology['source_varray']= src_varray_uri
uri = self.neighborhood_query(target_varrays).strip()
assign_target_varrays.append(uri)
file_replication_topology['target_varrays']= assign_target_varrays
file_replication_topologies.append(file_replication_topology)
assign_request['file_replication_topologies']= file_replication_topologies
filepolicy = self.filepolicy_query(name)
return self.api('POST', URI_FILE_POLICY_ASSIGN.format(filepolicy), assign_request)
# unassigns the filepolicy from vpool
def filepolicy_vpool_unassign(self, name, unassign_from_vpools):
parms={}
unassign_request_vpools = []
if( unassign_from_vpools is not None):
uri = self.cos_query("file", unassign_from_vpools).strip()
unassign_request_vpools.append(uri)
parms['unassign_from'] = unassign_request_vpools
filepolicy = self.filepolicy_query(name)
return self.api('POST', URI_FILE_POLICY_UNASSIGN.format(filepolicy), parms)
# unassigns the filepolicy from project
def filepolicy_project_unassign(self, name, unassign_from_projects):
parms={}
unassign_request_projects = []
if( unassign_from_projects is not None):
uri = self.project_query(unassign_from_projects).strip()
unassign_request_projects.append(uri)
parms['unassign_from'] = unassign_request_projects
filepolicy = self.filepolicy_query(name)
return self.api('POST', URI_FILE_POLICY_UNASSIGN.format(filepolicy), parms)
def storageportgroup_register(self, systemuri, pguri):
return self.api('POST', URI_STORAGEPORTGROUP_REGISTER.format(systemuri, pguri))
def storageportgroup_deregister(self, systemuri, pguri):
return self.api('POST', URI_STORAGEPORTGROUP_DEREGISTER.format(systemuri, pguri))
def storageportgroup_show(self, systemuri, portgroupuri):
return self.api('GET', URI_STORAGEPORTGROUP.format(systemuri, portgroupuri))
def storageportgroup_query(self, name):
#
# name = { portgroup_uri | concat(storagedevice, portgroup) }
#
try:
(sdname, pgname) = name.split('/', 1)
except:
return name
sduri = self.storagedevice_query(sdname)
portgroups = self.storageportgroup_list(sduri)
for pg in portgroups:
portgroup = self.storageportgroup_show(sduri, pg['id'])
if (portgroup['name'] == pgname):
return portgroup['id']
raise Exception('bad storageportgroup name: ' + name)
def storageportgroup_list(self, sduri):
o = self.api('GET', URI_STORAGEPORTGROUPS.format(sduri))
if (not o):
return {};
else:
return o['storage_port_group']
def storageportgroup_delete(self, systemuri, pguri):
o = self.api('POST', URI_STORAGEPORTGROUP_DELETE.format(systemuri, pguri))
self.assert_is_dict(o)
s = self.api_sync_4(o['id'], self.task_show)
return (o, s)
def storageportgroup_create(self, systemuri, name, ports):
params = dict()
params['name'] = name
addports = list()
for port in ports:
print port
porturi = self.storageport_query_by_portname(port, systemuri)
print porturi
addports.append(porturi)
params['storage_ports'] = addports
o = self.api('POST', URI_STORAGEPORTGROUPS.format(systemuri), params)
self.assert_is_dict(o)
s = self.api_sync_4(o['id'], self.task_show)
return (o, s)
def storageport_query_by_portname(self, name, systemuri):
ports = self.storageport_list(systemuri)
for p in ports:
sport = self.storageport_show(systemuri, p['id'])
if (sport['port_name'] == name):
return sport['id']
raise Exception('bad storageport name: ' + name)
def export_group_changeportgroup(self, groupId, portgroupId, wait):
params = dict()
params['new_port_group'] = portgroupId
if wait:
params['wait_before_remove_paths'] = 'true'
else:
params['wait_before_remove_paths'] = 'false'
if(BOURNE_DEBUG == '1'):
print str(parms)
o = self.api('PUT', URI_EXPORTGROUP_CHANGEPORTGROUP.format(groupId), params)
self.assert_is_dict(o)
if(BOURNE_DEBUG == '1'):
print 'OOO: ' + str(o) + ' :OOO'
try:
s = self.api_sync_2(o['resource']['id'], o['op_id'], self.export_show_task)
except:
print o
return (o, s)
| 41.213613 | 375 | 0.608569 | 370,642 | 0.910883 | 0 | 0 | 50,679 | 0.124548 | 0 | 0 | 73,587 | 0.180846 |
d08144c8fccf523fe00afd797e29a4fc88443666 | 851 | py | Python | configuration.py | stoberblog/sunspec-modbus | 2ce7cc6e92cb480cce4e488c8ffd716ec053ec01 | [
"MIT"
]
| 22 | 2018-03-01T16:13:48.000Z | 2022-02-27T07:59:24.000Z | configuration.py | msgis/sunspec-modbus | 2ce7cc6e92cb480cce4e488c8ffd716ec053ec01 | [
"MIT"
]
| null | null | null | configuration.py | msgis/sunspec-modbus | 2ce7cc6e92cb480cce4e488c8ffd716ec053ec01 | [
"MIT"
]
| 7 | 2019-03-02T17:10:29.000Z | 2021-06-19T00:26:05.000Z | # -*- coding: utf-8 -*-
"""
@author: stoberblog
@detail: This is a configuration file for the Solar Modbus project.
"""
# MODBUS DETAILS
INVERTER_IP = "192.168.1.29"
MODBUS_PORT = 7502
METER_ADDR = 240
MODBUS_TIMEOUT = 30 #seconds to wait before failure
# METER INSTALLED
METER_INSTALLED = True
# DATABASE
DATABASE_TYPE = "mariadb" # Current options: mariadb
DATABASE_ADDR = "127.0.0.1"
DATABASE_USER = "sUser"
DATABASE_PASSWD = "sPasswd"
DATABASE_DB = "solarMB"
#SCHEDULER
SCHED_INTERVAL = 1 # Minutes between recollecting new data
# DATA
EPOCH_INVERTER = False # False = Use compueter time, True = get time off inverter (scheduler will still use compurter time)
POW_THERESHOLD = 10 # Watt threshold
LOG_LEVEL = "ERROR" # Levels: NONE, FATAL, ERROR, NOTICE, DEBUG
| 26.59375 | 129 | 0.673325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 529 | 0.621622 |
d0814ab2d80cbf44bcd4c19447312d5ef89e098c | 2,533 | py | Python | tests/test_plain.py | Nafi-Amaan-Hossain/willpyre | a506d32765088c8e59c46672946891e61dce87f2 | [
"BSD-3-Clause"
]
| 1 | 2021-08-16T08:18:22.000Z | 2021-08-16T08:18:22.000Z | tests/test_plain.py | Nafi-Amaan-Hossain/willpyre | a506d32765088c8e59c46672946891e61dce87f2 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_plain.py | Nafi-Amaan-Hossain/willpyre | a506d32765088c8e59c46672946891e61dce87f2 | [
"BSD-3-Clause"
]
| null | null | null | from async_asgi_testclient import TestClient
from myapp import main
import pytest
@pytest.mark.asyncio
async def test_willpyre_app():
async with TestClient(main) as client:
resp = await client.get("/")
assert resp.status_code == 200
assert resp.text == "index page"
@pytest.mark.asyncio
async def test_willpyre_post():
async with TestClient(main) as client:
resp = await client.post("/login/", data="a=anything")
assert resp.status_code == 200
assert resp.text == "anything"
@pytest.mark.asyncio
async def test_willpyre_get():
async with TestClient(main) as client:
resp = await client.get("/login/?user=admin")
assert resp.status_code == 200
assert resp.text == "Welcome admin"
@pytest.mark.asyncio
async def test_trailing_slash():
async with TestClient(main) as client:
resp = await client.get("/login")
assert resp.status_code == 200
assert resp.text == "Welcome ordinary user"
@pytest.mark.asyncio
async def test_url_vars():
async with TestClient(main) as client:
resp = await client.get("/api/hello")
assert resp.status_code == 200
assert resp.text == "You requested the variable hello"
@pytest.mark.asyncio
async def test_url_many():
async with TestClient(main) as client:
resp = await client.get("/static/foo/bar/baz")
assert resp.status_code == 200
assert resp.text == "foobarbaz"
@pytest.mark.asyncio
async def test_utils():
async with TestClient(main) as client:
resp = await client.get("/json")
assert resp.json() == {'a': 'b'}
assert resp.headers["Content-Type"] == "application/json"
@pytest.mark.asyncio
async def test_response404():
async with TestClient(main) as client:
resp = await client.get("/non-exhistent")
assert resp.text == "Not found"
assert resp.status_code == 404
@pytest.mark.asyncio
async def test_response405():
async with TestClient(main) as client:
resp = await client.open("/login", method="NO_SUCH_METHOD")
assert resp.text == "Method not allowed"
assert resp.status_code == 405
@pytest.mark.asyncio
async def test_put():
async with TestClient(main) as client:
resp = await client.put("/others")
assert resp.text == "others"
@pytest.mark.asyncio
async def test_patch():
async with TestClient(main) as client:
resp = await client.patch("/others")
assert resp.text == "others"
| 25.585859 | 67 | 0.660087 | 0 | 0 | 0 | 0 | 2,418 | 0.954599 | 2,187 | 0.863403 | 340 | 0.134228 |
d0819d8d58bd2ac25c22fc55e6bd0a929bf9571f | 122 | py | Python | rangefinder/bosch/requests/constants/__init__.py | tothcs1105/BOSCH-GLM-rangefinder | 419179bd63be97060d91cb87b075da47610dfbfb | [
"MIT"
]
| 1 | 2020-08-03T15:31:13.000Z | 2020-08-03T15:31:13.000Z | rangefinder/bosch/requests/constants/__init__.py | tothcs1105/BOSCH-GLM-rangefinder | 419179bd63be97060d91cb87b075da47610dfbfb | [
"MIT"
]
| null | null | null | rangefinder/bosch/requests/constants/__init__.py | tothcs1105/BOSCH-GLM-rangefinder | 419179bd63be97060d91cb87b075da47610dfbfb | [
"MIT"
]
| null | null | null | from .backlight_mode import BacklightMode
from .angle_unit import AngleUnit
from .measurement_unit import MeasurementUnit
| 30.5 | 45 | 0.877049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d084098c117233a70fe62aa538d6aad7c2570d51 | 577 | py | Python | operations/plus.py | Ootmann/einmaleins | aeb986cf40b7638d14ad69b68fc12b1445cf3c9a | [
"MIT"
]
| null | null | null | operations/plus.py | Ootmann/einmaleins | aeb986cf40b7638d14ad69b68fc12b1445cf3c9a | [
"MIT"
]
| null | null | null | operations/plus.py | Ootmann/einmaleins | aeb986cf40b7638d14ad69b68fc12b1445cf3c9a | [
"MIT"
]
| null | null | null | import random
from operations import abstract_operation
class Plus(abstract_operation.AbstractOperation):
a = 0
b = 0
def __init__(self):
super().__init__("+", "Addieren")
def get_question(self):
return "{} + {}".format(str(self.a), str(self.b))
def solve(self):
return int(self.a + self.b)
def update(self):
r1n = random.randint(0, 1000)
r2n = random.randint(0, 1000)
r12 = [max(r1n, r2n) - min(r1n, r2n), min(r1n, r2n)]
random.shuffle(r12)
self.a = r12[0]
self.b = r12[1]
| 22.192308 | 60 | 0.573657 | 517 | 0.896014 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.038128 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.